aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-14 03:39:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-14 03:39:08 -0400
commit2d65a9f48fcdf7866aab6457bc707ca233e0c791 (patch)
treef93e5838d6ac2e59434367f4ff905f7d9c45fc2b
parentda92da3638a04894afdca8b99e973ddd20268471 (diff)
parentdfda0df3426483cf5fc7441f23f318edbabecb03 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This is the main git pull for the drm, I pretty much froze major pulls at -rc5/6 time, and haven't had much fallout, so will probably continue doing that. Lots of changes all over, big internal header cleanup to make it clear drm features are legacy things and what are things that modern KMS drivers should be using. Also big move to use the new generic fences in all the TTM drivers. core: atomic prep work, vblank rework changes, allows immediate vblank disables major header reworking and cleanups to better delinate legacy interfaces from what KMS drivers should be using. cursor planes locking fixes ttm: move to generic fences (affects all TTM drivers) ppc64 caching fixes radeon: userptr support, uvd for old asics, reset rework for fence changes better buffer placement changes, dpm feature enablement hdmi audio support fixes intel: Cherryview work, 180 degree rotation, skylake prep work, execlist command submission full ppgtt prep work cursor improvements edid caching, vdd handling improvements nouveau: fence reworking kepler memory clock work gt21x clock work fan control improvements hdmi infoframe fixes DP audio ast: ppc64 fixes caching fix rcar: rcar-du DT support ipuv3: prep work for capture support msm: LVDS support for mdp4, new panel, gpu refactoring exynos: exynos3250 SoC support, drop bad mmap interface, mipi dsi changes, and component match support" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (640 commits) drm/mst: rework payload table allocation to conform better. drm/ast: Fix HW cursor image drm/radeon/kv: add uvd/vce info to dpm debugfs output drm/radeon/ci: add uvd/vce info to dpm debugfs output drm/radeon: export reservation_object from dmabuf to ttm drm/radeon: cope with foreign fences inside the reservation object drm/radeon: cope with foreign fences inside display drm/core: use helper to check driver features drm/radeon/cik: write gfx ucode version to ucode addr reg drm/radeon/si: print full CS when we hit a packet 0 drm/radeon: remove unecessary includes drm/radeon/combios: declare legacy_connector_convert as static drm/radeon/atombios: declare connector convert tables as static drm/radeon: drop btc_get_max_clock_from_voltage_dependency_table drm/radeon/dpm: drop clk/voltage dependency filters for BTC drm/radeon/dpm: drop clk/voltage dependency filters for CI drm/radeon/dpm: drop clk/voltage dependency filters for SI drm/radeon/dpm: drop clk/voltage dependency filters for NI drm/radeon: disable audio when we disable hdmi (v2) drm/radeon: split audio enable between eg and r600 (v2) ...
-rw-r--r--Documentation/DocBook/drm.tmpl16
-rw-r--r--Documentation/devicetree/bindings/drm/tilcdc/panel.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/auo,b101xtn01.txt7
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/devicetree/bindings/video/adi,adv7123.txt50
-rw-r--r--Documentation/devicetree/bindings/video/exynos_dsim.txt1
-rw-r--r--Documentation/devicetree/bindings/video/renesas,du.txt84
-rw-r--r--Documentation/devicetree/bindings/video/samsung-fimd.txt1
-rw-r--r--Documentation/devicetree/bindings/video/thine,thc63lvdm83d50
-rw-r--r--Documentation/devicetree/bindings/video/vga-connector.txt36
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi33
-rw-r--r--arch/arm/mach-shmobile/board-koelsch-reference.c19
-rw-r--r--arch/arm/mach-shmobile/board-koelsch.c19
-rw-r--r--arch/arm/mach-shmobile/board-lager-reference.c19
-rw-r--r--arch/arm/mach-shmobile/board-lager.c19
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c19
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c1
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h2
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c38
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h12
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c3
-rw-r--r--drivers/gpu/drm/ast/ast_main.c79
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c46
-rw-r--r--drivers/gpu/drm/ast/ast_post.c23
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h38
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c24
-rw-r--r--drivers/gpu/drm/ati_pcigart.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs.h4
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c1
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c24
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c21
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c1
-rw-r--r--drivers/gpu/drm/drm_auth.c7
-rw-r--r--drivers/gpu/drm/drm_bufs.c94
-rw-r--r--drivers/gpu/drm/drm_crtc.c394
-rw-r--r--drivers/gpu/drm/drm_debugfs.c3
-rw-r--r--drivers/gpu/drm/drm_dma.c11
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c80
-rw-r--r--drivers/gpu/drm/drm_drv.c40
-rw-r--r--drivers/gpu/drm/drm_edid.c123
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c85
-rw-r--r--drivers/gpu/drm/drm_fops.c30
-rw-r--r--drivers/gpu/drm/drm_gem.c12
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c7
-rw-r--r--drivers/gpu/drm/drm_info.c89
-rw-r--r--drivers/gpu/drm/drm_internal.h132
-rw-r--r--drivers/gpu/drm/drm_ioctl.c254
-rw-r--r--drivers/gpu/drm/drm_irq.c463
-rw-r--r--drivers/gpu/drm/drm_legacy.h62
-rw-r--r--drivers/gpu/drm/drm_lock.c36
-rw-r--r--drivers/gpu/drm/drm_memory.c24
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/drm_modes.c1
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c213
-rw-r--r--drivers/gpu/drm/drm_pci.c46
-rw-r--r--drivers/gpu/drm/drm_platform.c38
-rw-r--r--drivers/gpu/drm/drm_prime.c5
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c17
-rw-r--r--drivers/gpu/drm/drm_scatter.c9
-rw-r--r--drivers/gpu/drm/drm_sysfs.c1
-rw-r--r--drivers/gpu/drm/drm_usb.c88
-rw-r--r--drivers/gpu/drm/drm_vm.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c104
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c90
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c106
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c453
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c4
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c3
-rw-r--r--drivers/gpu/drm/gma500/gtt.h1
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c16
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c16
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h2
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c560
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c370
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c31
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c194
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h222
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c370
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c216
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c227
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c291
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h32
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c40
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h47
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c31
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c196
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c241
-rw-r--r--drivers/gpu/drm/i915/i915_params.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h283
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c29
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h3
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c344
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1312
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1255
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h38
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c40
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c38
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c17
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c11
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c83
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c168
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1766
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h114
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c29
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c818
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c256
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h46
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c109
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c6
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c77
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c3
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c21
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c24
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile4
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h6
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h12
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c214
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c285
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c151
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h17
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h10
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h10
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h18
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h10
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h107
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c21
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c89
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h63
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c506
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c151
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c172
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c38
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h6
-rw-r--r--drivers/gpu/drm/nouveau/Makefile12
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/core/ioctl.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c97
-rw-r--r--drivers/gpu/drm/nouveau/core/core/notify.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/gm100.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/conn.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm107.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminve0.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c80
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/event.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/notify.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bar.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/M0205.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/M0209.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/fan.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h160
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h10
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb/regsnv04.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fuse.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/pwr.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/M0205.c136
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/M0209.c137
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/fan.c93
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/timing.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c303
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv04.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c490
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fuse/base.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fuse/g80.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fuse/gf100.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fuse/gm107.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fuse/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv94.c (renamed from drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c)12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/arith.fuc94
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc122
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc20
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc133
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h903
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h1270
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h1300
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h869
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c58
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/gm107.c93
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c120
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c551
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c180
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvif/class.h9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c27
-rw-r--r--drivers/gpu/drm/qxl/Makefile2
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c49
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c33
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h36
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c91
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c21
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c72
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c174
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c103
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c24
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c3
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/Makefile6
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c23
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c51
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c30
-rw-r--r--drivers/gpu/drm/radeon/cik.c52
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c25
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c4
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c6
-rw-r--r--drivers/gpu/drm/radeon/drm_buffer.c (renamed from drivers/gpu/drm/drm_buffer.c)6
-rw-r--r--drivers/gpu/drm/radeon/drm_buffer.h (renamed from include/drm/drm_buffer.h)0
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c24
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c49
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/r100.c21
-rw-r--r--drivers/gpu/drm/radeon/r200.c21
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c159
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c207
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c26
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c25
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c172
-rw-r--r--drivers/gpu/drm/radeon/r600d.h58
-rw-r--r--drivers/gpu/drm/radeon/radeon.h125
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h78
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c77
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c63
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c479
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c121
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c274
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c237
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c159
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c24
-rw-r--r--drivers/gpu/drm/radeon/rv770.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c25
-rw-r--r--drivers/gpu/drm/radeon/si.c8
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c25
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/sid.h2
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c107
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c4
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c173
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c233
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c45
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.h2
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c39
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c3
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_backlight.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_backlight.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c3
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_regs.h2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h2
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c6
-rw-r--r--drivers/gpu/drm/sti/sti_vtac.c12
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c4
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c4
-rw-r--r--drivers/gpu/drm/tegra/gem.h1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c61
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c74
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c304
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c48
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c160
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c3
-rw-r--r--drivers/gpu/drm/udl/Kconfig3
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c4
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c102
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c3
-rw-r--r--drivers/gpu/drm/udl/udl_main.c8
-rw-r--r--drivers/gpu/drm/via/via_dma.c4
-rw-r--r--drivers/gpu/drm/via/via_drv.c3
-rw-r--r--drivers/gpu/drm/via/via_drv.h2
-rw-r--r--drivers/gpu/drm/via/via_map.c6
-rw-r--r--drivers/gpu/drm/via/via_mm.c6
-rw-r--r--drivers/gpu/drm/via/via_verifier.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c183
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c346
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c50
-rw-r--r--drivers/gpu/ipu-v3/Kconfig3
-rw-r--r--drivers/gpu/ipu-v3/Makefile4
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c479
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c217
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c741
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c778
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h30
-rw-r--r--drivers/gpu/ipu-v3/ipu-smfc.c157
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c1
-rw-r--r--drivers/video/fbdev/Kconfig4
-rw-r--r--drivers/video/fbdev/core/Makefile1
-rw-r--r--drivers/video/fbdev/core/fb_cmdline.c110
-rw-r--r--drivers/video/fbdev/core/fbmem.c92
-rw-r--r--drivers/video/fbdev/core/modedb.c3
-rw-r--r--include/drm/ati_pcigart.h30
-rw-r--r--include/drm/drmP.h721
-rw-r--r--include/drm/drm_agpsupport.h26
-rw-r--r--include/drm/drm_crtc.h30
-rw-r--r--include/drm/drm_dp_helper.h16
-rw-r--r--include/drm/drm_dp_mst_helper.h2
-rw-r--r--include/drm/drm_fb_helper.h1
-rw-r--r--include/drm/drm_gem.h183
-rw-r--r--include/drm/drm_gem_cma_helper.h4
-rw-r--r--include/drm/drm_legacy.h203
-rw-r--r--include/drm/drm_memory.h59
-rw-r--r--include/drm/drm_mipi_dsi.h2
-rw-r--r--include/drm/drm_modeset_lock.h20
-rw-r--r--include/drm/drm_usb.h15
-rw-r--r--include/drm/ttm/ttm_bo_api.h49
-rw-r--r--include/drm/ttm/ttm_bo_driver.h32
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h24
-rw-r--r--include/linux/platform_data/rcar-du.h4
-rw-r--r--include/uapi/drm/exynos_drm.h40
-rw-r--r--include/uapi/drm/radeon_drm.h23
-rw-r--r--include/uapi/drm/vmwgfx_drm.h2
-rw-r--r--include/video/imx-ipu-v3.h144
524 files changed, 24583 insertions, 11104 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index bacefc5b222e..be35bc328b77 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -291,10 +291,9 @@ char *date;</synopsis>
291 <title>Device Registration</title> 291 <title>Device Registration</title>
292 <para> 292 <para>
293 A number of functions are provided to help with device registration. 293 A number of functions are provided to help with device registration.
294 The functions deal with PCI, USB and platform devices, respectively. 294 The functions deal with PCI and platform devices, respectively.
295 </para> 295 </para>
296!Edrivers/gpu/drm/drm_pci.c 296!Edrivers/gpu/drm/drm_pci.c
297!Edrivers/gpu/drm/drm_usb.c
298!Edrivers/gpu/drm/drm_platform.c 297!Edrivers/gpu/drm/drm_platform.c
299 <para> 298 <para>
300 New drivers that no longer rely on the services provided by the 299 New drivers that no longer rely on the services provided by the
@@ -3386,6 +3385,13 @@ void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
3386 by scheduling a timer. The delay is accessible through the vblankoffdelay 3385 by scheduling a timer. The delay is accessible through the vblankoffdelay
3387 module parameter or the <varname>drm_vblank_offdelay</varname> global 3386 module parameter or the <varname>drm_vblank_offdelay</varname> global
3388 variable and expressed in milliseconds. Its default value is 5000 ms. 3387 variable and expressed in milliseconds. Its default value is 5000 ms.
3388 Zero means never disable, and a negative value means disable immediately.
3389 Drivers may override the behaviour by setting the
3390 <structname>drm_device</structname>
3391 <structfield>vblank_disable_immediate</structfield> flag, which when set
3392 causes vblank interrupts to be disabled immediately regardless of the
3393 drm_vblank_offdelay value. The flag should only be set if there's a
3394 properly working hardware vblank counter present.
3389 </para> 3395 </para>
3390 <para> 3396 <para>
3391 When a vertical blanking interrupt occurs drivers only need to call the 3397 When a vertical blanking interrupt occurs drivers only need to call the
@@ -3400,6 +3406,7 @@ void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
3400 <sect2> 3406 <sect2>
3401 <title>Vertical Blanking and Interrupt Handling Functions Reference</title> 3407 <title>Vertical Blanking and Interrupt Handling Functions Reference</title>
3402!Edrivers/gpu/drm/drm_irq.c 3408!Edrivers/gpu/drm/drm_irq.c
3409!Finclude/drm/drmP.h drm_crtc_vblank_waitqueue
3403 </sect2> 3410 </sect2>
3404 </sect1> 3411 </sect1>
3405 3412
@@ -3918,6 +3925,11 @@ int num_ioctls;</synopsis>
3918!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser 3925!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
3919!Idrivers/gpu/drm/i915/i915_cmd_parser.c 3926!Idrivers/gpu/drm/i915/i915_cmd_parser.c
3920 </sect2> 3927 </sect2>
3928 <sect2>
3929 <title>Logical Rings, Logical Ring Contexts and Execlists</title>
3930!Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
3931!Idrivers/gpu/drm/i915/intel_lrc.c
3932 </sect2>
3921 </sect1> 3933 </sect1>
3922 </chapter> 3934 </chapter>
3923</part> 3935</part>
diff --git a/Documentation/devicetree/bindings/drm/tilcdc/panel.txt b/Documentation/devicetree/bindings/drm/tilcdc/panel.txt
index 9301c330d1a6..4ab9e2300907 100644
--- a/Documentation/devicetree/bindings/drm/tilcdc/panel.txt
+++ b/Documentation/devicetree/bindings/drm/tilcdc/panel.txt
@@ -18,6 +18,10 @@ Required properties:
18 Documentation/devicetree/bindings/video/display-timing.txt for display 18 Documentation/devicetree/bindings/video/display-timing.txt for display
19 timing binding details. 19 timing binding details.
20 20
21Optional properties:
22- backlight: phandle of the backlight device attached to the panel
23- enable-gpios: GPIO pin to enable or disable the panel
24
21Recommended properties: 25Recommended properties:
22 - pinctrl-names, pinctrl-0: the pincontrol settings to configure 26 - pinctrl-names, pinctrl-0: the pincontrol settings to configure
23 muxing properly for pins that connect to TFP410 device 27 muxing properly for pins that connect to TFP410 device
@@ -29,6 +33,9 @@ Example:
29 compatible = "ti,tilcdc,panel"; 33 compatible = "ti,tilcdc,panel";
30 pinctrl-names = "default"; 34 pinctrl-names = "default";
31 pinctrl-0 = <&bone_lcd3_cape_lcd_pins>; 35 pinctrl-0 = <&bone_lcd3_cape_lcd_pins>;
36 backlight = <&backlight>;
37 enable-gpios = <&gpio3 19 0>;
38
32 panel-info { 39 panel-info {
33 ac-bias = <255>; 40 ac-bias = <255>;
34 ac-bias-intrpt = <0>; 41 ac-bias-intrpt = <0>;
diff --git a/Documentation/devicetree/bindings/panel/auo,b101xtn01.txt b/Documentation/devicetree/bindings/panel/auo,b101xtn01.txt
new file mode 100644
index 000000000000..889d511d66c9
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/auo,b101xtn01.txt
@@ -0,0 +1,7 @@
1AU Optronics Corporation 10.1" WXGA TFT LCD panel
2
3Required properties:
4- compatible: should be "auo,b101xtn01"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index f67e3f84e8bc..c7bb11be15a3 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -92,6 +92,7 @@ maxim Maxim Integrated Products
92mediatek MediaTek Inc. 92mediatek MediaTek Inc.
93micrel Micrel Inc. 93micrel Micrel Inc.
94microchip Microchip Technology Inc. 94microchip Microchip Technology Inc.
95mitsubishi Mitsubishi Electric Corporation
95mosaixtech Mosaix Technologies, Inc. 96mosaixtech Mosaix Technologies, Inc.
96moxa Moxa 97moxa Moxa
97mpl MPL AG 98mpl MPL AG
@@ -144,6 +145,7 @@ st STMicroelectronics
144ste ST-Ericsson 145ste ST-Ericsson
145stericsson ST-Ericsson 146stericsson ST-Ericsson
146synology Synology, Inc. 147synology Synology, Inc.
148thine THine Electronics, Inc.
147ti Texas Instruments 149ti Texas Instruments
148tlm Trusted Logic Mobility 150tlm Trusted Logic Mobility
149toradex Toradex AG 151toradex Toradex AG
diff --git a/Documentation/devicetree/bindings/video/adi,adv7123.txt b/Documentation/devicetree/bindings/video/adi,adv7123.txt
new file mode 100644
index 000000000000..a6b2b2b8f3d9
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/adi,adv7123.txt
@@ -0,0 +1,50 @@
1Analog Device ADV7123 Video DAC
2-------------------------------
3
4The ADV7123 is a digital-to-analog converter that outputs VGA signals from a
5parallel video input.
6
7Required properties:
8
9- compatible: Should be "adi,adv7123"
10
11Optional properties:
12
13- psave-gpios: Power save control GPIO
14
15Required nodes:
16
17The ADV7123 has two video ports. Their connections are modeled using the OF
18graph bindings specified in Documentation/devicetree/bindings/graph.txt.
19
20- Video port 0 for DPI input
21- Video port 1 for VGA output
22
23
24Example
25-------
26
27 adv7123: encoder@0 {
28 compatible = "adi,adv7123";
29
30 ports {
31 #address-cells = <1>;
32 #size-cells = <0>;
33
34 port@0 {
35 reg = <0>;
36
37 adv7123_in: endpoint@0 {
38 remote-endpoint = <&dpi_out>;
39 };
40 };
41
42 port@1 {
43 reg = <1>;
44
45 adv7123_out: endpoint@0 {
46 remote-endpoint = <&vga_connector_in>;
47 };
48 };
49 };
50 };
diff --git a/Documentation/devicetree/bindings/video/exynos_dsim.txt b/Documentation/devicetree/bindings/video/exynos_dsim.txt
index 31036c667d54..e74243b4b317 100644
--- a/Documentation/devicetree/bindings/video/exynos_dsim.txt
+++ b/Documentation/devicetree/bindings/video/exynos_dsim.txt
@@ -2,6 +2,7 @@ Exynos MIPI DSI Master
2 2
3Required properties: 3Required properties:
4 - compatible: value should be one of the following 4 - compatible: value should be one of the following
5 "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
5 "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */ 6 "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
6 "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */ 7 "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
7 - reg: physical base address and length of the registers set for the device 8 - reg: physical base address and length of the registers set for the device
diff --git a/Documentation/devicetree/bindings/video/renesas,du.txt b/Documentation/devicetree/bindings/video/renesas,du.txt
new file mode 100644
index 000000000000..5102830f2760
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/renesas,du.txt
@@ -0,0 +1,84 @@
1* Renesas R-Car Display Unit (DU)
2
3Required Properties:
4
5 - compatible: must be one of the following.
6 - "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
7 - "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
8 - "renesas,du-r8a7791" for R8A7791 (R-Car M2) compatible DU
9
10 - reg: A list of base address and length of each memory resource, one for
11 each entry in the reg-names property.
12 - reg-names: Name of the memory resources. The DU requires one memory
13 resource for the DU core (named "du") and one memory resource for each
14 LVDS encoder (named "lvds.x" with "x" being the LVDS controller numerical
15 index).
16
17 - interrupt-parent: phandle of the parent interrupt controller.
18 - interrupts: Interrupt specifiers for the DU interrupts.
19
20 - clocks: A list of phandles + clock-specifier pairs, one for each entry in
21 the clock-names property.
22 - clock-names: Name of the clocks. This property is model-dependent.
23 - R8A7779 uses a single functional clock. The clock doesn't need to be
24 named.
25 - R8A7790 and R8A7791 use one functional clock per channel and one clock
26 per LVDS encoder. The functional clocks must be named "du.x" with "x"
27 being the channel numerical index. The LVDS clocks must be named
28 "lvds.x" with "x" being the LVDS encoder numerical index.
29
30Required nodes:
31
32The connections to the DU output video ports are modeled using the OF graph
33bindings specified in Documentation/devicetree/bindings/graph.txt.
34
35The following table lists for each supported model the port number
36corresponding to each DU output.
37
38 Port 0 Port1 Port2
39-----------------------------------------------------------------------------
40 R8A7779 (H1) DPAD 0 DPAD 1 -
41 R8A7790 (H2) DPAD LVDS 0 LVDS 1
42 R8A7791 (M2) DPAD LVDS 0 -
43
44
45Example: R8A7790 (R-Car H2) DU
46
47 du: du@feb00000 {
48 compatible = "renesas,du-r8a7790";
49 reg = <0 0xfeb00000 0 0x70000>,
50 <0 0xfeb90000 0 0x1c>,
51 <0 0xfeb94000 0 0x1c>;
52 reg-names = "du", "lvds.0", "lvds.1";
53 interrupt-parent = <&gic>;
54 interrupts = <0 256 IRQ_TYPE_LEVEL_HIGH>,
55 <0 268 IRQ_TYPE_LEVEL_HIGH>,
56 <0 269 IRQ_TYPE_LEVEL_HIGH>;
57 clocks = <&mstp7_clks R8A7790_CLK_DU0>,
58 <&mstp7_clks R8A7790_CLK_DU1>,
59 <&mstp7_clks R8A7790_CLK_DU2>,
60 <&mstp7_clks R8A7790_CLK_LVDS0>,
61 <&mstp7_clks R8A7790_CLK_LVDS1>;
62 clock-names = "du.0", "du.1", "du.2", "lvds.0", "lvds.1";
63
64 ports {
65 #address-cells = <1>;
66 #size-cells = <0>;
67
68 port@0 {
69 reg = <0>;
70 du_out_rgb: endpoint {
71 };
72 };
73 port@1 {
74 reg = <1>;
75 du_out_lvds0: endpoint {
76 };
77 };
78 port@2 {
79 reg = <2>;
80 du_out_lvds1: endpoint {
81 };
82 };
83 };
84 };
diff --git a/Documentation/devicetree/bindings/video/samsung-fimd.txt b/Documentation/devicetree/bindings/video/samsung-fimd.txt
index ecc899b9817b..4e6c77c85546 100644
--- a/Documentation/devicetree/bindings/video/samsung-fimd.txt
+++ b/Documentation/devicetree/bindings/video/samsung-fimd.txt
@@ -9,6 +9,7 @@ Required properties:
9 "samsung,s3c2443-fimd"; /* for S3C24XX SoCs */ 9 "samsung,s3c2443-fimd"; /* for S3C24XX SoCs */
10 "samsung,s3c6400-fimd"; /* for S3C64XX SoCs */ 10 "samsung,s3c6400-fimd"; /* for S3C64XX SoCs */
11 "samsung,s5pv210-fimd"; /* for S5PV210 SoC */ 11 "samsung,s5pv210-fimd"; /* for S5PV210 SoC */
12 "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
12 "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */ 13 "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
13 "samsung,exynos5250-fimd"; /* for Exynos5 SoCs */ 14 "samsung,exynos5250-fimd"; /* for Exynos5 SoCs */
14 15
diff --git a/Documentation/devicetree/bindings/video/thine,thc63lvdm83d b/Documentation/devicetree/bindings/video/thine,thc63lvdm83d
new file mode 100644
index 000000000000..527e236e9a2a
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/thine,thc63lvdm83d
@@ -0,0 +1,50 @@
1THine Electronics THC63LVDM83D LVDS serializer
2----------------------------------------------
3
4The THC63LVDM83D is an LVDS serializer designed to support pixel data
5transmission between a host and a flat panel.
6
7Required properties:
8
9- compatible: Should be "thine,thc63lvdm83d"
10
11Optional properties:
12
13- pwdn-gpios: Power down control GPIO
14
15Required nodes:
16
17The THC63LVDM83D has two video ports. Their connections are modeled using the
18OFgraph bindings specified in Documentation/devicetree/bindings/graph.txt.
19
20- Video port 0 for CMOS/TTL input
21- Video port 1 for LVDS output
22
23
24Example
25-------
26
27 lvds_enc: encoder@0 {
28 compatible = "thine,thc63lvdm83d";
29
30 ports {
31 #address-cells = <1>;
32 #size-cells = <0>;
33
34 port@0 {
35 reg = <0>;
36
37 lvds_enc_in: endpoint@0 {
38 remote-endpoint = <&rgb_out>;
39 };
40 };
41
42 port@1 {
43 reg = <1>;
44
45 lvds_enc_out: endpoint@0 {
46 remote-endpoint = <&panel_in>;
47 };
48 };
49 };
50 };
diff --git a/Documentation/devicetree/bindings/video/vga-connector.txt b/Documentation/devicetree/bindings/video/vga-connector.txt
new file mode 100644
index 000000000000..c727f298e7ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/vga-connector.txt
@@ -0,0 +1,36 @@
1VGA Connector
2=============
3
4Required properties:
5
6- compatible: "vga-connector"
7
8Optional properties:
9
10- label: a symbolic name for the connector corresponding to a hardware label
11- ddc-i2c-bus: phandle to the I2C bus that is connected to VGA DDC
12
13Required nodes:
14
15The VGA connector internal connections are modeled using the OF graph bindings
16specified in Documentation/devicetree/bindings/graph.txt.
17
18The VGA connector has a single port that must be connected to a video source
19port.
20
21
22Example
23-------
24
25vga0: connector@0 {
26 compatible = "vga-connector";
27 label = "vga";
28
29 ddc-i2c-bus = <&i2c3>;
30
31 port {
32 vga_connector_in: endpoint {
33 remote-endpoint = <&adv7123_out>;
34 };
35 };
36};
diff --git a/MAINTAINERS b/MAINTAINERS
index c52367997fb5..ee1bc5bc20ad 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3164,7 +3164,7 @@ F: include/drm/drm_panel.h
3164F: Documentation/devicetree/bindings/panel/ 3164F: Documentation/devicetree/bindings/panel/
3165 3165
3166INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) 3166INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
3167M: Daniel Vetter <daniel.vetter@ffwll.ch> 3167M: Daniel Vetter <daniel.vetter@intel.com>
3168M: Jani Nikula <jani.nikula@linux.intel.com> 3168M: Jani Nikula <jani.nikula@linux.intel.com>
3169L: intel-gfx@lists.freedesktop.org 3169L: intel-gfx@lists.freedesktop.org
3170L: dri-devel@lists.freedesktop.org 3170L: dri-devel@lists.freedesktop.org
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 429a6c6cfcf9..8831c48c2bc9 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -132,6 +132,12 @@
132 reg = <0x10020000 0x4000>; 132 reg = <0x10020000 0x4000>;
133 }; 133 };
134 134
135 mipi_phy: video-phy@10020710 {
136 compatible = "samsung,s5pv210-mipi-video-phy";
137 reg = <0x10020710 8>;
138 #phy-cells = <1>;
139 };
140
135 pd_cam: cam-power-domain@10023C00 { 141 pd_cam: cam-power-domain@10023C00 {
136 compatible = "samsung,exynos4210-pd"; 142 compatible = "samsung,exynos4210-pd";
137 reg = <0x10023C00 0x20>; 143 reg = <0x10023C00 0x20>;
@@ -216,6 +222,33 @@
216 interrupts = <0 240 0>; 222 interrupts = <0 240 0>;
217 }; 223 };
218 224
225 fimd: fimd@11c00000 {
226 compatible = "samsung,exynos3250-fimd";
227 reg = <0x11c00000 0x30000>;
228 interrupt-names = "fifo", "vsync", "lcd_sys";
229 interrupts = <0 84 0>, <0 85 0>, <0 86 0>;
230 clocks = <&cmu CLK_SCLK_FIMD0>, <&cmu CLK_FIMD0>;
231 clock-names = "sclk_fimd", "fimd";
232 samsung,power-domain = <&pd_lcd0>;
233 samsung,sysreg = <&sys_reg>;
234 status = "disabled";
235 };
236
237 dsi_0: dsi@11C80000 {
238 compatible = "samsung,exynos3250-mipi-dsi";
239 reg = <0x11C80000 0x10000>;
240 interrupts = <0 83 0>;
241 samsung,phy-type = <0>;
242 samsung,power-domain = <&pd_lcd0>;
243 phys = <&mipi_phy 1>;
244 phy-names = "dsim";
245 clocks = <&cmu CLK_DSIM0>, <&cmu CLK_SCLK_MIPI0>;
246 clock-names = "bus_clk", "pll_clk";
247 #address-cells = <1>;
248 #size-cells = <0>;
249 status = "disabled";
250 };
251
219 mshc_0: mshc@12510000 { 252 mshc_0: mshc@12510000 {
220 compatible = "samsung,exynos5250-dw-mshc"; 253 compatible = "samsung,exynos5250-dw-mshc";
221 reg = <0x12510000 0x1000>; 254 reg = <0x12510000 0x1000>;
diff --git a/arch/arm/mach-shmobile/board-koelsch-reference.c b/arch/arm/mach-shmobile/board-koelsch-reference.c
index 9db5e6774fb7..46aa540133d6 100644
--- a/arch/arm/mach-shmobile/board-koelsch-reference.c
+++ b/arch/arm/mach-shmobile/board-koelsch-reference.c
@@ -41,16 +41,15 @@ static struct rcar_du_encoder_data koelsch_du_encoders[] = {
41 .width_mm = 210, 41 .width_mm = 210,
42 .height_mm = 158, 42 .height_mm = 158,
43 .mode = { 43 .mode = {
44 .clock = 65000, 44 .pixelclock = 65000000,
45 .hdisplay = 1024, 45 .hactive = 1024,
46 .hsync_start = 1048, 46 .hfront_porch = 20,
47 .hsync_end = 1184, 47 .hback_porch = 160,
48 .htotal = 1344, 48 .hsync_len = 136,
49 .vdisplay = 768, 49 .vactive = 768,
50 .vsync_start = 771, 50 .vfront_porch = 3,
51 .vsync_end = 777, 51 .vback_porch = 29,
52 .vtotal = 806, 52 .vsync_len = 6,
53 .flags = 0,
54 }, 53 },
55 }, 54 },
56 }, 55 },
diff --git a/arch/arm/mach-shmobile/board-koelsch.c b/arch/arm/mach-shmobile/board-koelsch.c
index 126a8b4ec491..7111b5c1d67b 100644
--- a/arch/arm/mach-shmobile/board-koelsch.c
+++ b/arch/arm/mach-shmobile/board-koelsch.c
@@ -63,16 +63,15 @@ static struct rcar_du_encoder_data koelsch_du_encoders[] = {
63 .width_mm = 210, 63 .width_mm = 210,
64 .height_mm = 158, 64 .height_mm = 158,
65 .mode = { 65 .mode = {
66 .clock = 65000, 66 .pixelclock = 65000000,
67 .hdisplay = 1024, 67 .hactive = 1024,
68 .hsync_start = 1048, 68 .hfront_porch = 20,
69 .hsync_end = 1184, 69 .hback_porch = 160,
70 .htotal = 1344, 70 .hsync_len = 136,
71 .vdisplay = 768, 71 .vactive = 768,
72 .vsync_start = 771, 72 .vfront_porch = 3,
73 .vsync_end = 777, 73 .vback_porch = 29,
74 .vtotal = 806, 74 .vsync_len = 6,
75 .flags = 0,
76 }, 75 },
77 }, 76 },
78 }, 77 },
diff --git a/arch/arm/mach-shmobile/board-lager-reference.c b/arch/arm/mach-shmobile/board-lager-reference.c
index 2a05c02bec39..bc4b48357dde 100644
--- a/arch/arm/mach-shmobile/board-lager-reference.c
+++ b/arch/arm/mach-shmobile/board-lager-reference.c
@@ -43,16 +43,15 @@ static struct rcar_du_encoder_data lager_du_encoders[] = {
43 .width_mm = 210, 43 .width_mm = 210,
44 .height_mm = 158, 44 .height_mm = 158,
45 .mode = { 45 .mode = {
46 .clock = 65000, 46 .pixelclock = 65000000,
47 .hdisplay = 1024, 47 .hactive = 1024,
48 .hsync_start = 1048, 48 .hfront_porch = 20,
49 .hsync_end = 1184, 49 .hback_porch = 160,
50 .htotal = 1344, 50 .hsync_len = 136,
51 .vdisplay = 768, 51 .vactive = 768,
52 .vsync_start = 771, 52 .vfront_porch = 3,
53 .vsync_end = 777, 53 .vback_porch = 29,
54 .vtotal = 806, 54 .vsync_len = 6,
55 .flags = 0,
56 }, 55 },
57 }, 56 },
58 }, 57 },
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index f5a98e2942b3..571327b1c942 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -99,16 +99,15 @@ static struct rcar_du_encoder_data lager_du_encoders[] = {
99 .width_mm = 210, 99 .width_mm = 210,
100 .height_mm = 158, 100 .height_mm = 158,
101 .mode = { 101 .mode = {
102 .clock = 65000, 102 .pixelclock = 65000000,
103 .hdisplay = 1024, 103 .hactive = 1024,
104 .hsync_start = 1048, 104 .hfront_porch = 20,
105 .hsync_end = 1184, 105 .hback_porch = 160,
106 .htotal = 1344, 106 .hsync_len = 136,
107 .vdisplay = 768, 107 .vactive = 768,
108 .vsync_start = 771, 108 .vfront_porch = 3,
109 .vsync_end = 777, 109 .vback_porch = 29,
110 .vtotal = 806, 110 .vsync_len = 6,
111 .flags = 0,
112 }, 111 },
113 }, 112 },
114 }, 113 },
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index e5cf4201e769..ce33d7825c49 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -192,16 +192,15 @@ static struct rcar_du_encoder_data du_encoders[] = {
192 .width_mm = 210, 192 .width_mm = 210,
193 .height_mm = 158, 193 .height_mm = 158,
194 .mode = { 194 .mode = {
195 .clock = 65000, 195 .pixelclock = 65000000,
196 .hdisplay = 1024, 196 .hactive = 1024,
197 .hsync_start = 1048, 197 .hfront_porch = 20,
198 .hsync_end = 1184, 198 .hback_porch = 160,
199 .htotal = 1344, 199 .hsync_len = 136,
200 .vdisplay = 768, 200 .vactive = 768,
201 .vsync_start = 771, 201 .vfront_porch = 3,
202 .vsync_end = 777, 202 .vback_porch = 29,
203 .vtotal = 806, 203 .vsync_len = 6,
204 .flags = 0,
205 }, 204 },
206 }, 205 },
207 }, 206 },
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index b066bb3ca01a..e3b4b0f02b3d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -8,6 +8,7 @@ menuconfig DRM
8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" 8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA 9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
10 select HDMI 10 select HDMI
11 select FB_CMDLINE
11 select I2C 12 select I2C
12 select I2C_ALGOBIT 13 select I2C_ALGOBIT
13 select DMA_SHARED_BUFFER 14 select DMA_SHARED_BUFFER
@@ -24,12 +25,6 @@ config DRM_MIPI_DSI
24 bool 25 bool
25 depends on DRM 26 depends on DRM
26 27
27config DRM_USB
28 tristate
29 depends on DRM
30 depends on USB_SUPPORT && USB_ARCH_HAS_HCD
31 select USB
32
33config DRM_KMS_HELPER 28config DRM_KMS_HELPER
34 tristate 29 tristate
35 depends on DRM 30 depends on DRM
@@ -115,6 +110,7 @@ config DRM_RADEON
115 select HWMON 110 select HWMON
116 select BACKLIGHT_CLASS_DEVICE 111 select BACKLIGHT_CLASS_DEVICE
117 select INTERVAL_TREE 112 select INTERVAL_TREE
113 select MMU_NOTIFIER
118 help 114 help
119 Choose this option if you have an ATI Radeon graphics card. There 115 Choose this option if you have an ATI Radeon graphics card. There
120 are both PCI and AGP versions. You don't need to choose this to 116 are both PCI and AGP versions. You don't need to choose this to
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4a55d59ccd22..9292a761ea6d 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -4,7 +4,7 @@
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6 6
7drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ 7drm-y := drm_auth.o drm_bufs.o drm_cache.o \
8 drm_context.o drm_dma.o \ 8 drm_context.o drm_dma.o \
9 drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 9 drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
10 drm_lock.o drm_memory.o drm_drv.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
@@ -22,8 +22,6 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
22drm-$(CONFIG_DRM_PANEL) += drm_panel.o 22drm-$(CONFIG_DRM_PANEL) += drm_panel.o
23drm-$(CONFIG_OF) += drm_of.o 23drm-$(CONFIG_OF) += drm_of.o
24 24
25drm-usb-y := drm_usb.o
26
27drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ 25drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
28 drm_plane_helper.o drm_dp_mst_topology.o 26 drm_plane_helper.o drm_dp_mst_topology.o
29drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 27drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
@@ -36,7 +34,6 @@ CFLAGS_drm_trace_points.o := -I$(src)
36 34
37obj-$(CONFIG_DRM) += drm.o 35obj-$(CONFIG_DRM) += drm.o
38obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o 36obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
39obj-$(CONFIG_DRM_USB) += drm_usb.o
40obj-$(CONFIG_DRM_TTM) += ttm/ 37obj-$(CONFIG_DRM_TTM) += ttm/
41obj-$(CONFIG_DRM_TDFX) += tdfx/ 38obj-$(CONFIG_DRM_TDFX) += tdfx/
42obj-$(CONFIG_DRM_R128) += r128/ 39obj-$(CONFIG_DRM_R128) += r128/
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index e2d5792b140f..f672e6ad8afa 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -308,6 +308,7 @@ static struct drm_driver armada_drm_driver = {
308 .postclose = NULL, 308 .postclose = NULL,
309 .lastclose = armada_drm_lastclose, 309 .lastclose = armada_drm_lastclose,
310 .unload = armada_drm_unload, 310 .unload = armada_drm_unload,
311 .set_busid = drm_platform_set_busid,
311 .get_vblank_counter = drm_vblank_count, 312 .get_vblank_counter = drm_vblank_count,
312 .enable_vblank = armada_drm_enable_vblank, 313 .enable_vblank = armada_drm_enable_vblank,
313 .disable_vblank = armada_drm_disable_vblank, 314 .disable_vblank = armada_drm_disable_vblank,
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
index 00b6cd461a03..b000ea3a829a 100644
--- a/drivers/gpu/drm/armada/armada_gem.h
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -8,6 +8,8 @@
8#ifndef ARMADA_GEM_H 8#ifndef ARMADA_GEM_H
9#define ARMADA_GEM_H 9#define ARMADA_GEM_H
10 10
11#include <drm/drm_gem.h>
12
11/* GEM */ 13/* GEM */
12struct armada_gem_object { 14struct armada_gem_object {
13 struct drm_gem_object obj; 15 struct drm_gem_object obj;
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 5da4b62285fa..76f07f38b941 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -379,11 +379,39 @@ static bool ast_init_dvo(struct drm_device *dev)
379 return true; 379 return true;
380} 380}
381 381
382
383static void ast_init_analog(struct drm_device *dev)
384{
385 struct ast_private *ast = dev->dev_private;
386 u32 data;
387
388 /*
389 * Set DAC source to VGA mode in SCU2C via the P2A
390 * bridge. First configure the P2U to target the SCU
391 * in case it isn't at this stage.
392 */
393 ast_write32(ast, 0xf004, 0x1e6e0000);
394 ast_write32(ast, 0xf000, 0x1);
395
396 /* Then unlock the SCU with the magic password */
397 ast_write32(ast, 0x12000, 0x1688a8a8);
398 ast_write32(ast, 0x12000, 0x1688a8a8);
399 ast_write32(ast, 0x12000, 0x1688a8a8);
400
401 /* Finally, clear bits [17:16] of SCU2c */
402 data = ast_read32(ast, 0x1202c);
403 data &= 0xfffcffff;
404 ast_write32(ast, 0, data);
405
406 /* Disable DVO */
407 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x00);
408}
409
382void ast_init_3rdtx(struct drm_device *dev) 410void ast_init_3rdtx(struct drm_device *dev)
383{ 411{
384 struct ast_private *ast = dev->dev_private; 412 struct ast_private *ast = dev->dev_private;
385 u8 jreg; 413 u8 jreg;
386 u32 data; 414
387 if (ast->chip == AST2300 || ast->chip == AST2400) { 415 if (ast->chip == AST2300 || ast->chip == AST2400) {
388 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); 416 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
389 switch (jreg & 0x0e) { 417 switch (jreg & 0x0e) {
@@ -399,12 +427,8 @@ void ast_init_3rdtx(struct drm_device *dev)
399 default: 427 default:
400 if (ast->tx_chip_type == AST_TX_SIL164) 428 if (ast->tx_chip_type == AST_TX_SIL164)
401 ast_init_dvo(dev); 429 ast_init_dvo(dev);
402 else { 430 else
403 ast_write32(ast, 0x12000, 0x1688a8a8); 431 ast_init_analog(dev);
404 data = ast_read32(ast, 0x1202c);
405 data &= 0xfffcffff;
406 ast_write32(ast, 0, data);
407 }
408 } 432 }
409 } 433 }
410} 434}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index f19682a93c24..9a32d9dfdd26 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -199,6 +199,7 @@ static struct drm_driver driver = {
199 199
200 .load = ast_driver_load, 200 .load = ast_driver_load,
201 .unload = ast_driver_unload, 201 .unload = ast_driver_unload,
202 .set_busid = drm_pci_set_busid,
202 203
203 .fops = &ast_fops, 204 .fops = &ast_fops,
204 .name = DRIVER_NAME, 205 .name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 957d4fabf1e1..86205a28e56b 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -36,6 +36,8 @@
36#include <drm/ttm/ttm_memory.h> 36#include <drm/ttm/ttm_memory.h>
37#include <drm/ttm/ttm_module.h> 37#include <drm/ttm/ttm_module.h>
38 38
39#include <drm/drm_gem.h>
40
39#include <linux/i2c.h> 41#include <linux/i2c.h>
40#include <linux/i2c-algo-bit.h> 42#include <linux/i2c-algo-bit.h>
41 43
@@ -125,8 +127,9 @@ struct ast_gem_object;
125 127
126#define AST_IO_AR_PORT_WRITE (0x40) 128#define AST_IO_AR_PORT_WRITE (0x40)
127#define AST_IO_MISC_PORT_WRITE (0x42) 129#define AST_IO_MISC_PORT_WRITE (0x42)
130#define AST_IO_VGA_ENABLE_PORT (0x43)
128#define AST_IO_SEQ_PORT (0x44) 131#define AST_IO_SEQ_PORT (0x44)
129#define AST_DAC_INDEX_READ (0x3c7) 132#define AST_IO_DAC_INDEX_READ (0x47)
130#define AST_IO_DAC_INDEX_WRITE (0x48) 133#define AST_IO_DAC_INDEX_WRITE (0x48)
131#define AST_IO_DAC_DATA (0x49) 134#define AST_IO_DAC_DATA (0x49)
132#define AST_IO_GR_PORT (0x4E) 135#define AST_IO_GR_PORT (0x4E)
@@ -134,6 +137,8 @@ struct ast_gem_object;
134#define AST_IO_INPUT_STATUS1_READ (0x5A) 137#define AST_IO_INPUT_STATUS1_READ (0x5A)
135#define AST_IO_MISC_PORT_READ (0x4C) 138#define AST_IO_MISC_PORT_READ (0x4C)
136 139
140#define AST_IO_MM_OFFSET (0x380)
141
137#define __ast_read(x) \ 142#define __ast_read(x) \
138static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \ 143static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
139u##x val = 0;\ 144u##x val = 0;\
@@ -316,7 +321,7 @@ struct ast_bo {
316 struct ttm_placement placement; 321 struct ttm_placement placement;
317 struct ttm_bo_kmap_obj kmap; 322 struct ttm_bo_kmap_obj kmap;
318 struct drm_gem_object gem; 323 struct drm_gem_object gem;
319 u32 placements[3]; 324 struct ttm_place placements[3];
320 int pin_count; 325 int pin_count;
321}; 326};
322#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem) 327#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem)
@@ -381,6 +386,9 @@ int ast_bo_push_sysram(struct ast_bo *bo);
381int ast_mmap(struct file *filp, struct vm_area_struct *vma); 386int ast_mmap(struct file *filp, struct vm_area_struct *vma);
382 387
383/* ast post */ 388/* ast post */
389void ast_enable_vga(struct drm_device *dev);
390void ast_enable_mmio(struct drm_device *dev);
391bool ast_is_vga_enabled(struct drm_device *dev);
384void ast_post_gpu(struct drm_device *dev); 392void ast_post_gpu(struct drm_device *dev);
385u32 ast_mindwm(struct ast_private *ast, u32 r); 393u32 ast_mindwm(struct ast_private *ast, u32 r);
386void ast_moutdwm(struct ast_private *ast, u32 r, u32 v); 394void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index cba45c774552..5c60ae524c45 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -186,7 +186,8 @@ static int astfb_create_object(struct ast_fbdev *afbdev,
186static int astfb_create(struct drm_fb_helper *helper, 186static int astfb_create(struct drm_fb_helper *helper,
187 struct drm_fb_helper_surface_size *sizes) 187 struct drm_fb_helper_surface_size *sizes)
188{ 188{
189 struct ast_fbdev *afbdev = (struct ast_fbdev *)helper; 189 struct ast_fbdev *afbdev =
190 container_of(helper, struct ast_fbdev, helper);
190 struct drm_device *dev = afbdev->helper.dev; 191 struct drm_device *dev = afbdev->helper.dev;
191 struct drm_mode_fb_cmd2 mode_cmd; 192 struct drm_mode_fb_cmd2 mode_cmd;
192 struct drm_framebuffer *fb; 193 struct drm_framebuffer *fb;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index b792194e0d9c..035dacc93382 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -63,7 +63,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
63} 63}
64 64
65 65
66static int ast_detect_chip(struct drm_device *dev) 66static int ast_detect_chip(struct drm_device *dev, bool *need_post)
67{ 67{
68 struct ast_private *ast = dev->dev_private; 68 struct ast_private *ast = dev->dev_private;
69 uint32_t data, jreg; 69 uint32_t data, jreg;
@@ -110,6 +110,21 @@ static int ast_detect_chip(struct drm_device *dev)
110 } 110 }
111 } 111 }
112 112
113 /*
114 * If VGA isn't enabled, we need to enable now or subsequent
115 * access to the scratch registers will fail. We also inform
116 * our caller that it needs to POST the chip
117 * (Assumption: VGA not enabled -> need to POST)
118 */
119 if (!ast_is_vga_enabled(dev)) {
120 ast_enable_vga(dev);
121 ast_enable_mmio(dev);
122 DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
123 *need_post = true;
124 } else
125 *need_post = false;
126
127 /* Check if we support wide screen */
113 switch (ast->chip) { 128 switch (ast->chip) {
114 case AST1180: 129 case AST1180:
115 ast->support_wide_screen = true; 130 ast->support_wide_screen = true;
@@ -125,6 +140,7 @@ static int ast_detect_chip(struct drm_device *dev)
125 ast->support_wide_screen = true; 140 ast->support_wide_screen = true;
126 else { 141 else {
127 ast->support_wide_screen = false; 142 ast->support_wide_screen = false;
143 /* Read SCU7c (silicon revision register) */
128 ast_write32(ast, 0xf004, 0x1e6e0000); 144 ast_write32(ast, 0xf004, 0x1e6e0000);
129 ast_write32(ast, 0xf000, 0x1); 145 ast_write32(ast, 0xf000, 0x1);
130 data = ast_read32(ast, 0x1207c); 146 data = ast_read32(ast, 0x1207c);
@@ -137,11 +153,29 @@ static int ast_detect_chip(struct drm_device *dev)
137 break; 153 break;
138 } 154 }
139 155
156 /* Check 3rd Tx option (digital output afaik) */
140 ast->tx_chip_type = AST_TX_NONE; 157 ast->tx_chip_type = AST_TX_NONE;
141 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff); 158
142 if (jreg & 0x80) 159 /*
143 ast->tx_chip_type = AST_TX_SIL164; 160 * VGACRA3 Enhanced Color Mode Register, check if DVO is already
161 * enabled, in that case, assume we have a SIL164 TMDS transmitter
162 *
163 * Don't make that assumption if we the chip wasn't enabled and
164 * is at power-on reset, otherwise we'll incorrectly "detect" a
165 * SIL164 when there is none.
166 */
167 if (!*need_post) {
168 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff);
169 if (jreg & 0x80)
170 ast->tx_chip_type = AST_TX_SIL164;
171 }
172
144 if ((ast->chip == AST2300) || (ast->chip == AST2400)) { 173 if ((ast->chip == AST2300) || (ast->chip == AST2400)) {
174 /*
175 * On AST2300 and 2400, look the configuration set by the SoC in
176 * the SOC scratch register #1 bits 11:8 (interestingly marked
177 * as "reserved" in the spec)
178 */
145 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); 179 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
146 switch (jreg) { 180 switch (jreg) {
147 case 0x04: 181 case 0x04:
@@ -162,6 +196,17 @@ static int ast_detect_chip(struct drm_device *dev)
162 } 196 }
163 } 197 }
164 198
199 /* Print stuff for diagnostic purposes */
200 switch(ast->tx_chip_type) {
201 case AST_TX_SIL164:
202 DRM_INFO("Using Sil164 TMDS transmitter\n");
203 break;
204 case AST_TX_DP501:
205 DRM_INFO("Using DP501 DisplayPort transmitter\n");
206 break;
207 default:
208 DRM_INFO("Analog VGA only\n");
209 }
165 return 0; 210 return 0;
166} 211}
167 212
@@ -346,6 +391,7 @@ static u32 ast_get_vram_info(struct drm_device *dev)
346int ast_driver_load(struct drm_device *dev, unsigned long flags) 391int ast_driver_load(struct drm_device *dev, unsigned long flags)
347{ 392{
348 struct ast_private *ast; 393 struct ast_private *ast;
394 bool need_post;
349 int ret = 0; 395 int ret = 0;
350 396
351 ast = kzalloc(sizeof(struct ast_private), GFP_KERNEL); 397 ast = kzalloc(sizeof(struct ast_private), GFP_KERNEL);
@@ -360,13 +406,27 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
360 ret = -EIO; 406 ret = -EIO;
361 goto out_free; 407 goto out_free;
362 } 408 }
363 ast->ioregs = pci_iomap(dev->pdev, 2, 0); 409
410 /*
411 * If we don't have IO space at all, use MMIO now and
412 * assume the chip has MMIO enabled by default (rev 0x20
413 * and higher).
414 */
415 if (!(pci_resource_flags(dev->pdev, 2) & IORESOURCE_IO)) {
416 DRM_INFO("platform has no IO space, trying MMIO\n");
417 ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
418 }
419
420 /* "map" IO regs if the above hasn't done so already */
364 if (!ast->ioregs) { 421 if (!ast->ioregs) {
365 ret = -EIO; 422 ast->ioregs = pci_iomap(dev->pdev, 2, 0);
366 goto out_free; 423 if (!ast->ioregs) {
424 ret = -EIO;
425 goto out_free;
426 }
367 } 427 }
368 428
369 ast_detect_chip(dev); 429 ast_detect_chip(dev, &need_post);
370 430
371 if (ast->chip != AST1180) { 431 if (ast->chip != AST1180) {
372 ast_get_dram_info(dev); 432 ast_get_dram_info(dev);
@@ -374,6 +434,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
374 DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size); 434 DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
375 } 435 }
376 436
437 if (need_post)
438 ast_post_gpu(dev);
439
377 ret = ast_mm_init(ast); 440 ret = ast_mm_init(ast);
378 if (ret) 441 if (ret)
379 goto out_free; 442 goto out_free;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 5389350244f2..9dc0fd5c1ea4 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -80,6 +80,8 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
80 struct ast_private *ast = crtc->dev->dev_private; 80 struct ast_private *ast = crtc->dev->dev_private;
81 u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate; 81 u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
82 u32 hborder, vborder; 82 u32 hborder, vborder;
83 bool check_sync;
84 struct ast_vbios_enhtable *best = NULL;
83 85
84 switch (crtc->primary->fb->bits_per_pixel) { 86 switch (crtc->primary->fb->bits_per_pixel) {
85 case 8: 87 case 8:
@@ -141,14 +143,34 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
141 } 143 }
142 144
143 refresh_rate = drm_mode_vrefresh(mode); 145 refresh_rate = drm_mode_vrefresh(mode);
144 while (vbios_mode->enh_table->refresh_rate < refresh_rate) { 146 check_sync = vbios_mode->enh_table->flags & WideScreenMode;
145 vbios_mode->enh_table++; 147 do {
146 if ((vbios_mode->enh_table->refresh_rate > refresh_rate) || 148 struct ast_vbios_enhtable *loop = vbios_mode->enh_table;
147 (vbios_mode->enh_table->refresh_rate == 0xff)) { 149
148 vbios_mode->enh_table--; 150 while (loop->refresh_rate != 0xff) {
149 break; 151 if ((check_sync) &&
152 (((mode->flags & DRM_MODE_FLAG_NVSYNC) &&
153 (loop->flags & PVSync)) ||
154 ((mode->flags & DRM_MODE_FLAG_PVSYNC) &&
155 (loop->flags & NVSync)) ||
156 ((mode->flags & DRM_MODE_FLAG_NHSYNC) &&
157 (loop->flags & PHSync)) ||
158 ((mode->flags & DRM_MODE_FLAG_PHSYNC) &&
159 (loop->flags & NHSync)))) {
160 loop++;
161 continue;
162 }
163 if (loop->refresh_rate <= refresh_rate
164 && (!best || loop->refresh_rate > best->refresh_rate))
165 best = loop;
166 loop++;
150 } 167 }
151 } 168 if (best || !check_sync)
169 break;
170 check_sync = 0;
171 } while (1);
172 if (best)
173 vbios_mode->enh_table = best;
152 174
153 hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0; 175 hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
154 vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0; 176 vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
@@ -419,8 +441,10 @@ static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mo
419 struct ast_private *ast = dev->dev_private; 441 struct ast_private *ast = dev->dev_private;
420 u8 jreg; 442 u8 jreg;
421 443
422 jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ); 444 jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
423 jreg |= (vbios_mode->enh_table->flags & SyncNN); 445 jreg &= ~0xC0;
446 if (vbios_mode->enh_table->flags & NVSync) jreg |= 0x80;
447 if (vbios_mode->enh_table->flags & NHSync) jreg |= 0x40;
424 ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg); 448 ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
425} 449}
426 450
@@ -1080,8 +1104,8 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
1080 srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0; 1104 srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
1081 data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4); 1105 data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
1082 data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4); 1106 data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
1083 data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4); 1107 data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
1084 data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4); 1108 data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
1085 1109
1086 writel(data32.ul, dstxor); 1110 writel(data32.ul, dstxor);
1087 csum += data32.ul; 1111 csum += data32.ul;
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 38d437f3a267..810c51d92b99 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -33,18 +33,23 @@
33 33
34static void ast_init_dram_2300(struct drm_device *dev); 34static void ast_init_dram_2300(struct drm_device *dev);
35 35
36static void 36void ast_enable_vga(struct drm_device *dev)
37ast_enable_vga(struct drm_device *dev) 37{
38 struct ast_private *ast = dev->dev_private;
39
40 ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01);
41 ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01);
42}
43
44void ast_enable_mmio(struct drm_device *dev)
38{ 45{
39 struct ast_private *ast = dev->dev_private; 46 struct ast_private *ast = dev->dev_private;
40 47
41 ast_io_write8(ast, 0x43, 0x01); 48 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
42 ast_io_write8(ast, 0x42, 0x01);
43} 49}
44 50
45#if 0 /* will use later */ 51
46static bool 52bool ast_is_vga_enabled(struct drm_device *dev)
47ast_is_vga_enabled(struct drm_device *dev)
48{ 53{
49 struct ast_private *ast = dev->dev_private; 54 struct ast_private *ast = dev->dev_private;
50 u8 ch; 55 u8 ch;
@@ -52,7 +57,7 @@ ast_is_vga_enabled(struct drm_device *dev)
52 if (ast->chip == AST1180) { 57 if (ast->chip == AST1180) {
53 /* TODO 1180 */ 58 /* TODO 1180 */
54 } else { 59 } else {
55 ch = ast_io_read8(ast, 0x43); 60 ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
56 if (ch) { 61 if (ch) {
57 ast_open_key(ast); 62 ast_open_key(ast);
58 ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff); 63 ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
@@ -61,7 +66,6 @@ ast_is_vga_enabled(struct drm_device *dev)
61 } 66 }
62 return 0; 67 return 0;
63} 68}
64#endif
65 69
66static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff }; 70static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
67static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff }; 71static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff };
@@ -371,6 +375,7 @@ void ast_post_gpu(struct drm_device *dev)
371 pci_write_config_dword(ast->dev->pdev, 0x04, reg); 375 pci_write_config_dword(ast->dev->pdev, 0x04, reg);
372 376
373 ast_enable_vga(dev); 377 ast_enable_vga(dev);
378 ast_enable_mmio(dev);
374 ast_open_key(ast); 379 ast_open_key(ast);
375 ast_set_def_ext_reg(dev); 380 ast_set_def_ext_reg(dev);
376 381
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index 05c01ea85294..3608d5aa7451 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -35,14 +35,18 @@
35#define HalfDCLK 0x00000002 35#define HalfDCLK 0x00000002
36#define DoubleScanMode 0x00000004 36#define DoubleScanMode 0x00000004
37#define LineCompareOff 0x00000008 37#define LineCompareOff 0x00000008
38#define SyncPP 0x00000000
39#define SyncPN 0x00000040
40#define SyncNP 0x00000080
41#define SyncNN 0x000000C0
42#define HBorder 0x00000020 38#define HBorder 0x00000020
43#define VBorder 0x00000010 39#define VBorder 0x00000010
44#define WideScreenMode 0x00000100 40#define WideScreenMode 0x00000100
45#define NewModeInfo 0x00000200 41#define NewModeInfo 0x00000200
42#define NHSync 0x00000400
43#define PHSync 0x00000800
44#define NVSync 0x00001000
45#define PVSync 0x00002000
46#define SyncPP (PVSync | PHSync)
47#define SyncPN (PVSync | NHSync)
48#define SyncNP (NVSync | PHSync)
49#define SyncNN (NVSync | NHSync)
46 50
47/* DCLK Index */ 51/* DCLK Index */
48#define VCLK25_175 0x00 52#define VCLK25_175 0x00
@@ -72,6 +76,7 @@
72#define VCLK119 0x17 76#define VCLK119 0x17
73#define VCLK85_5 0x18 77#define VCLK85_5 0x18
74#define VCLK97_75 0x19 78#define VCLK97_75 0x19
79#define VCLK118_25 0x1A
75 80
76static struct ast_vbios_dclk_info dclk_table[] = { 81static struct ast_vbios_dclk_info dclk_table[] = {
77 {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ 82 {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
@@ -100,6 +105,7 @@ static struct ast_vbios_dclk_info dclk_table[] = {
100 {0x77, 0x58, 0x80}, /* 17: VCLK119 */ 105 {0x77, 0x58, 0x80}, /* 17: VCLK119 */
101 {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ 106 {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
102 {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ 107 {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
108 {0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */
103}; 109};
104 110
105static struct ast_vbios_stdtable vbios_stdtable[] = { 111static struct ast_vbios_stdtable vbios_stdtable[] = {
@@ -246,8 +252,10 @@ static struct ast_vbios_enhtable res_1360x768[] = {
246static struct ast_vbios_enhtable res_1600x900[] = { 252static struct ast_vbios_enhtable res_1600x900[] = {
247 {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */ 253 {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
248 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x3A }, 254 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x3A },
249 {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* end */ 255 {2112, 1600, 88,168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */
250 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x3A } 256 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x3A },
257 {2112, 1600, 88,168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */
258 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x3A },
251}; 259};
252 260
253static struct ast_vbios_enhtable res_1920x1080[] = { 261static struct ast_vbios_enhtable res_1920x1080[] = {
@@ -261,11 +269,11 @@ static struct ast_vbios_enhtable res_1920x1080[] = {
261/* 16:10 */ 269/* 16:10 */
262static struct ast_vbios_enhtable res_1280x800[] = { 270static struct ast_vbios_enhtable res_1280x800[] = {
263 {1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */ 271 {1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */
264 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 35 }, 272 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x35 },
265 {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */ 273 {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
266 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x35 }, 274 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x35 },
267 {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */ 275 {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
268 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x35 }, 276 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x35 },
269 277
270}; 278};
271 279
@@ -273,24 +281,24 @@ static struct ast_vbios_enhtable res_1440x900[] = {
273 {1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */ 281 {1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */
274 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 }, 282 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 },
275 {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */ 283 {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
276 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 }, 284 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x36 },
277 {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */ 285 {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
278 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x36 }, 286 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x36 },
279}; 287};
280 288
281static struct ast_vbios_enhtable res_1680x1050[] = { 289static struct ast_vbios_enhtable res_1680x1050[] = {
282 {1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */ 290 {1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */
283 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 }, 291 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 },
284 {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */ 292 {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
285 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 }, 293 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x37 },
286 {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */ 294 {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
287 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x37 }, 295 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x37 },
288}; 296};
289 297
290static struct ast_vbios_enhtable res_1920x1200[] = { 298static struct ast_vbios_enhtable res_1920x1200[] = {
291 {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */ 299 {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB*/
292 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x34 }, 300 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x34 },
293 {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */ 301 {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB */
294 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x34 }, 302 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x34 },
295}; 303};
296 304
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index b8246227bab0..08f82eae6939 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -293,18 +293,22 @@ void ast_mm_fini(struct ast_private *ast)
293void ast_ttm_placement(struct ast_bo *bo, int domain) 293void ast_ttm_placement(struct ast_bo *bo, int domain)
294{ 294{
295 u32 c = 0; 295 u32 c = 0;
296 bo->placement.fpfn = 0; 296 unsigned i;
297 bo->placement.lpfn = 0; 297
298 bo->placement.placement = bo->placements; 298 bo->placement.placement = bo->placements;
299 bo->placement.busy_placement = bo->placements; 299 bo->placement.busy_placement = bo->placements;
300 if (domain & TTM_PL_FLAG_VRAM) 300 if (domain & TTM_PL_FLAG_VRAM)
301 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; 301 bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
302 if (domain & TTM_PL_FLAG_SYSTEM) 302 if (domain & TTM_PL_FLAG_SYSTEM)
303 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 303 bo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
304 if (!c) 304 if (!c)
305 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 305 bo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
306 bo->placement.num_placement = c; 306 bo->placement.num_placement = c;
307 bo->placement.num_busy_placement = c; 307 bo->placement.num_busy_placement = c;
308 for (i = 0; i < c; ++i) {
309 bo->placements[i].fpfn = 0;
310 bo->placements[i].lpfn = 0;
311 }
308} 312}
309 313
310int ast_bo_create(struct drm_device *dev, int size, int align, 314int ast_bo_create(struct drm_device *dev, int size, int align,
@@ -335,7 +339,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
335 ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size, 339 ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
336 ttm_bo_type_device, &astbo->placement, 340 ttm_bo_type_device, &astbo->placement,
337 align >> PAGE_SHIFT, false, NULL, acc_size, 341 align >> PAGE_SHIFT, false, NULL, acc_size,
338 NULL, ast_bo_ttm_destroy); 342 NULL, NULL, ast_bo_ttm_destroy);
339 if (ret) 343 if (ret)
340 return ret; 344 return ret;
341 345
@@ -360,7 +364,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
360 364
361 ast_ttm_placement(bo, pl_flag); 365 ast_ttm_placement(bo, pl_flag);
362 for (i = 0; i < bo->placement.num_placement; i++) 366 for (i = 0; i < bo->placement.num_placement; i++)
363 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 367 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
364 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 368 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
365 if (ret) 369 if (ret)
366 return ret; 370 return ret;
@@ -383,7 +387,7 @@ int ast_bo_unpin(struct ast_bo *bo)
383 return 0; 387 return 0;
384 388
385 for (i = 0; i < bo->placement.num_placement ; i++) 389 for (i = 0; i < bo->placement.num_placement ; i++)
386 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 390 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
387 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 391 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
388 if (ret) 392 if (ret)
389 return ret; 393 return ret;
@@ -407,7 +411,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
407 411
408 ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); 412 ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
409 for (i = 0; i < bo->placement.num_placement ; i++) 413 for (i = 0; i < bo->placement.num_placement ; i++)
410 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 414 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
411 415
412 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 416 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
413 if (ret) { 417 if (ret) {
@@ -423,7 +427,7 @@ int ast_mmap(struct file *filp, struct vm_area_struct *vma)
423 struct ast_private *ast; 427 struct ast_private *ast;
424 428
425 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 429 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
426 return drm_mmap(filp, vma); 430 return -EINVAL;
427 431
428 file_priv = filp->private_data; 432 file_priv = filp->private_data;
429 ast = file_priv->minor->dev->dev_private; 433 ast = file_priv->minor->dev->dev_private;
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index c399dea27a3b..6c4d4b6eba80 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -34,6 +34,8 @@
34#include <linux/export.h> 34#include <linux/export.h>
35#include <drm/drmP.h> 35#include <drm/drmP.h>
36 36
37#include <drm/ati_pcigart.h>
38
37# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ 39# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
38 40
39static int drm_ati_alloc_pcigart_table(struct drm_device *dev, 41static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 7eb52dd44b01..71f2687fc3cc 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -7,6 +7,8 @@
7#include <drm/drm_crtc_helper.h> 7#include <drm/drm_crtc_helper.h>
8#include <drm/drm_fb_helper.h> 8#include <drm/drm_fb_helper.h>
9 9
10#include <drm/drm_gem.h>
11
10#include <ttm/ttm_bo_driver.h> 12#include <ttm/ttm_bo_driver.h>
11#include <ttm/ttm_page_alloc.h> 13#include <ttm/ttm_page_alloc.h>
12 14
@@ -99,7 +101,7 @@ struct bochs_bo {
99 struct ttm_placement placement; 101 struct ttm_placement placement;
100 struct ttm_bo_kmap_obj kmap; 102 struct ttm_bo_kmap_obj kmap;
101 struct drm_gem_object gem; 103 struct drm_gem_object gem;
102 u32 placements[3]; 104 struct ttm_place placements[3];
103 int pin_count; 105 int pin_count;
104}; 106};
105 107
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 9738e9b14708..98837bde2d25 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -82,6 +82,7 @@ static struct drm_driver bochs_driver = {
82 .driver_features = DRIVER_GEM | DRIVER_MODESET, 82 .driver_features = DRIVER_GEM | DRIVER_MODESET,
83 .load = bochs_load, 83 .load = bochs_load,
84 .unload = bochs_unload, 84 .unload = bochs_unload,
85 .set_busid = drm_pci_set_busid,
85 .fops = &bochs_fops, 86 .fops = &bochs_fops,
86 .name = "bochs-drm", 87 .name = "bochs-drm",
87 .desc = "bochs dispi vga interface (qemu stdvga)", 88 .desc = "bochs dispi vga interface (qemu stdvga)",
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 1728a1b0b813..66286ff518d4 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -257,20 +257,26 @@ void bochs_mm_fini(struct bochs_device *bochs)
257 257
258static void bochs_ttm_placement(struct bochs_bo *bo, int domain) 258static void bochs_ttm_placement(struct bochs_bo *bo, int domain)
259{ 259{
260 unsigned i;
260 u32 c = 0; 261 u32 c = 0;
261 bo->placement.fpfn = 0;
262 bo->placement.lpfn = 0;
263 bo->placement.placement = bo->placements; 262 bo->placement.placement = bo->placements;
264 bo->placement.busy_placement = bo->placements; 263 bo->placement.busy_placement = bo->placements;
265 if (domain & TTM_PL_FLAG_VRAM) { 264 if (domain & TTM_PL_FLAG_VRAM) {
266 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED 265 bo->placements[c++].flags = TTM_PL_FLAG_WC
266 | TTM_PL_FLAG_UNCACHED
267 | TTM_PL_FLAG_VRAM; 267 | TTM_PL_FLAG_VRAM;
268 } 268 }
269 if (domain & TTM_PL_FLAG_SYSTEM) { 269 if (domain & TTM_PL_FLAG_SYSTEM) {
270 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 270 bo->placements[c++].flags = TTM_PL_MASK_CACHING
271 | TTM_PL_FLAG_SYSTEM;
271 } 272 }
272 if (!c) { 273 if (!c) {
273 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 274 bo->placements[c++].flags = TTM_PL_MASK_CACHING
275 | TTM_PL_FLAG_SYSTEM;
276 }
277 for (i = 0; i < c; ++i) {
278 bo->placements[i].fpfn = 0;
279 bo->placements[i].lpfn = 0;
274 } 280 }
275 bo->placement.num_placement = c; 281 bo->placement.num_placement = c;
276 bo->placement.num_busy_placement = c; 282 bo->placement.num_busy_placement = c;
@@ -294,7 +300,7 @@ int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
294 300
295 bochs_ttm_placement(bo, pl_flag); 301 bochs_ttm_placement(bo, pl_flag);
296 for (i = 0; i < bo->placement.num_placement; i++) 302 for (i = 0; i < bo->placement.num_placement; i++)
297 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 303 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
298 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 304 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
299 if (ret) 305 if (ret)
300 return ret; 306 return ret;
@@ -319,7 +325,7 @@ int bochs_bo_unpin(struct bochs_bo *bo)
319 return 0; 325 return 0;
320 326
321 for (i = 0; i < bo->placement.num_placement; i++) 327 for (i = 0; i < bo->placement.num_placement; i++)
322 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 328 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
323 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 329 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
324 if (ret) 330 if (ret)
325 return ret; 331 return ret;
@@ -333,7 +339,7 @@ int bochs_mmap(struct file *filp, struct vm_area_struct *vma)
333 struct bochs_device *bochs; 339 struct bochs_device *bochs;
334 340
335 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 341 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
336 return drm_mmap(filp, vma); 342 return -EINVAL;
337 343
338 file_priv = filp->private_data; 344 file_priv = filp->private_data;
339 bochs = file_priv->minor->dev->dev_private; 345 bochs = file_priv->minor->dev->dev_private;
@@ -371,7 +377,7 @@ static int bochs_bo_create(struct drm_device *dev, int size, int align,
371 ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size, 377 ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size,
372 ttm_bo_type_device, &bochsbo->placement, 378 ttm_bo_type_device, &bochsbo->placement,
373 align >> PAGE_SHIFT, false, NULL, acc_size, 379 align >> PAGE_SHIFT, false, NULL, acc_size,
374 NULL, bochs_bo_ttm_destroy); 380 NULL, NULL, bochs_bo_ttm_destroy);
375 if (ret) 381 if (ret)
376 return ret; 382 return ret;
377 383
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 919c73b94447..e705335101a5 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -128,6 +128,7 @@ static struct drm_driver driver = {
128 .driver_features = DRIVER_MODESET | DRIVER_GEM, 128 .driver_features = DRIVER_MODESET | DRIVER_GEM,
129 .load = cirrus_driver_load, 129 .load = cirrus_driver_load,
130 .unload = cirrus_driver_unload, 130 .unload = cirrus_driver_unload,
131 .set_busid = drm_pci_set_busid,
131 .fops = &cirrus_driver_fops, 132 .fops = &cirrus_driver_fops,
132 .name = DRIVER_NAME, 133 .name = DRIVER_NAME,
133 .desc = DRIVER_DESC, 134 .desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 401c890b6c6a..d44e69daa239 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -21,6 +21,8 @@
21#include <drm/ttm/ttm_memory.h> 21#include <drm/ttm/ttm_memory.h>
22#include <drm/ttm/ttm_module.h> 22#include <drm/ttm/ttm_module.h>
23 23
24#include <drm/drm_gem.h>
25
24#define DRIVER_AUTHOR "Matthew Garrett" 26#define DRIVER_AUTHOR "Matthew Garrett"
25 27
26#define DRIVER_NAME "cirrus" 28#define DRIVER_NAME "cirrus"
@@ -163,7 +165,7 @@ struct cirrus_bo {
163 struct ttm_placement placement; 165 struct ttm_placement placement;
164 struct ttm_bo_kmap_obj kmap; 166 struct ttm_bo_kmap_obj kmap;
165 struct drm_gem_object gem; 167 struct drm_gem_object gem;
166 u32 placements[3]; 168 struct ttm_place placements[3];
167 int pin_count; 169 int pin_count;
168}; 170};
169#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem) 171#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 2a135f253e29..d231b1c317af 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -160,7 +160,8 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
160static int cirrusfb_create(struct drm_fb_helper *helper, 160static int cirrusfb_create(struct drm_fb_helper *helper,
161 struct drm_fb_helper_surface_size *sizes) 161 struct drm_fb_helper_surface_size *sizes)
162{ 162{
163 struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper; 163 struct cirrus_fbdev *gfbdev =
164 container_of(helper, struct cirrus_fbdev, helper);
164 struct drm_device *dev = gfbdev->helper.dev; 165 struct drm_device *dev = gfbdev->helper.dev;
165 struct cirrus_device *cdev = gfbdev->helper.dev->dev_private; 166 struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
166 struct fb_info *info; 167 struct fb_info *info;
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 92e6b7786097..dfffd528517a 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -298,18 +298,21 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
298void cirrus_ttm_placement(struct cirrus_bo *bo, int domain) 298void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
299{ 299{
300 u32 c = 0; 300 u32 c = 0;
301 bo->placement.fpfn = 0; 301 unsigned i;
302 bo->placement.lpfn = 0;
303 bo->placement.placement = bo->placements; 302 bo->placement.placement = bo->placements;
304 bo->placement.busy_placement = bo->placements; 303 bo->placement.busy_placement = bo->placements;
305 if (domain & TTM_PL_FLAG_VRAM) 304 if (domain & TTM_PL_FLAG_VRAM)
306 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; 305 bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
307 if (domain & TTM_PL_FLAG_SYSTEM) 306 if (domain & TTM_PL_FLAG_SYSTEM)
308 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 307 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
309 if (!c) 308 if (!c)
310 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 309 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
311 bo->placement.num_placement = c; 310 bo->placement.num_placement = c;
312 bo->placement.num_busy_placement = c; 311 bo->placement.num_busy_placement = c;
312 for (i = 0; i < c; ++i) {
313 bo->placements[i].fpfn = 0;
314 bo->placements[i].lpfn = 0;
315 }
313} 316}
314 317
315int cirrus_bo_create(struct drm_device *dev, int size, int align, 318int cirrus_bo_create(struct drm_device *dev, int size, int align,
@@ -340,7 +343,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
340 ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size, 343 ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
341 ttm_bo_type_device, &cirrusbo->placement, 344 ttm_bo_type_device, &cirrusbo->placement,
342 align >> PAGE_SHIFT, false, NULL, acc_size, 345 align >> PAGE_SHIFT, false, NULL, acc_size,
343 NULL, cirrus_bo_ttm_destroy); 346 NULL, NULL, cirrus_bo_ttm_destroy);
344 if (ret) 347 if (ret)
345 return ret; 348 return ret;
346 349
@@ -365,7 +368,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
365 368
366 cirrus_ttm_placement(bo, pl_flag); 369 cirrus_ttm_placement(bo, pl_flag);
367 for (i = 0; i < bo->placement.num_placement; i++) 370 for (i = 0; i < bo->placement.num_placement; i++)
368 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 371 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
369 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 372 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
370 if (ret) 373 if (ret)
371 return ret; 374 return ret;
@@ -392,7 +395,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
392 395
393 cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); 396 cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
394 for (i = 0; i < bo->placement.num_placement ; i++) 397 for (i = 0; i < bo->placement.num_placement ; i++)
395 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 398 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
396 399
397 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 400 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
398 if (ret) { 401 if (ret) {
@@ -408,7 +411,7 @@ int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
408 struct cirrus_device *cirrus; 411 struct cirrus_device *cirrus;
409 412
410 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 413 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
411 return drm_mmap(filp, vma); 414 return -EINVAL;
412 415
413 file_priv = filp->private_data; 416 file_priv = filp->private_data;
414 cirrus = file_priv->minor->dev->dev_private; 417 cirrus = file_priv->minor->dev->dev_private;
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index dde205cef384..4b2b4aa5033b 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -34,6 +34,7 @@
34#include <drm/drmP.h> 34#include <drm/drmP.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include "drm_legacy.h"
37 38
38#if __OS_HAS_AGP 39#if __OS_HAS_AGP
39 40
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 3cedae12b3c1..fc8e8aaa34fb 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -34,6 +34,13 @@
34 */ 34 */
35 35
36#include <drm/drmP.h> 36#include <drm/drmP.h>
37#include "drm_internal.h"
38
39struct drm_magic_entry {
40 struct list_head head;
41 struct drm_hash_item hash_item;
42 struct drm_file *priv;
43};
37 44
38/** 45/**
39 * Find the file with the given magic number. 46 * Find the file with the given magic number.
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 61acb8f6756d..569064a00693 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1,18 +1,13 @@
1/**
2 * \file drm_bufs.c
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/* 1/*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com 2 * Legacy: Generic DRM Buffer Management
11 * 3 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved. 6 * All Rights Reserved.
15 * 7 *
8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author: Gareth Hughes <gareth@valinux.com>
10 *
16 * Permission is hereby granted, free of charge, to any person obtaining a 11 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"), 12 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation 13 * to deal in the Software without restriction, including without limitation
@@ -39,6 +34,7 @@
39#include <linux/export.h> 34#include <linux/export.h>
40#include <asm/shmparam.h> 35#include <asm/shmparam.h>
41#include <drm/drmP.h> 36#include <drm/drmP.h>
37#include "drm_legacy.h"
42 38
43static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, 39static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
44 struct drm_local_map *map) 40 struct drm_local_map *map)
@@ -365,9 +361,9 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
365 return 0; 361 return 0;
366} 362}
367 363
368int drm_addmap(struct drm_device * dev, resource_size_t offset, 364int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
369 unsigned int size, enum drm_map_type type, 365 unsigned int size, enum drm_map_type type,
370 enum drm_map_flags flags, struct drm_local_map ** map_ptr) 366 enum drm_map_flags flags, struct drm_local_map **map_ptr)
371{ 367{
372 struct drm_map_list *list; 368 struct drm_map_list *list;
373 int rc; 369 int rc;
@@ -377,8 +373,7 @@ int drm_addmap(struct drm_device * dev, resource_size_t offset,
377 *map_ptr = list->map; 373 *map_ptr = list->map;
378 return rc; 374 return rc;
379} 375}
380 376EXPORT_SYMBOL(drm_legacy_addmap);
381EXPORT_SYMBOL(drm_addmap);
382 377
383/** 378/**
384 * Ioctl to specify a range of memory that is available for mapping by a 379 * Ioctl to specify a range of memory that is available for mapping by a
@@ -391,8 +386,8 @@ EXPORT_SYMBOL(drm_addmap);
391 * \return zero on success or a negative value on error. 386 * \return zero on success or a negative value on error.
392 * 387 *
393 */ 388 */
394int drm_addmap_ioctl(struct drm_device *dev, void *data, 389int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
395 struct drm_file *file_priv) 390 struct drm_file *file_priv)
396{ 391{
397 struct drm_map *map = data; 392 struct drm_map *map = data;
398 struct drm_map_list *maplist; 393 struct drm_map_list *maplist;
@@ -429,9 +424,9 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
429 * its being used, and free any associate resource (such as MTRR's) if it's not 424 * its being used, and free any associate resource (such as MTRR's) if it's not
430 * being on use. 425 * being on use.
431 * 426 *
432 * \sa drm_addmap 427 * \sa drm_legacy_addmap
433 */ 428 */
434int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) 429int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
435{ 430{
436 struct drm_map_list *r_list = NULL, *list_t; 431 struct drm_map_list *r_list = NULL, *list_t;
437 drm_dma_handle_t dmah; 432 drm_dma_handle_t dmah;
@@ -478,26 +473,26 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
478 dmah.vaddr = map->handle; 473 dmah.vaddr = map->handle;
479 dmah.busaddr = map->offset; 474 dmah.busaddr = map->offset;
480 dmah.size = map->size; 475 dmah.size = map->size;
481 __drm_pci_free(dev, &dmah); 476 __drm_legacy_pci_free(dev, &dmah);
482 break; 477 break;
483 } 478 }
484 kfree(map); 479 kfree(map);
485 480
486 return 0; 481 return 0;
487} 482}
488EXPORT_SYMBOL(drm_rmmap_locked); 483EXPORT_SYMBOL(drm_legacy_rmmap_locked);
489 484
490int drm_rmmap(struct drm_device *dev, struct drm_local_map *map) 485int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
491{ 486{
492 int ret; 487 int ret;
493 488
494 mutex_lock(&dev->struct_mutex); 489 mutex_lock(&dev->struct_mutex);
495 ret = drm_rmmap_locked(dev, map); 490 ret = drm_legacy_rmmap_locked(dev, map);
496 mutex_unlock(&dev->struct_mutex); 491 mutex_unlock(&dev->struct_mutex);
497 492
498 return ret; 493 return ret;
499} 494}
500EXPORT_SYMBOL(drm_rmmap); 495EXPORT_SYMBOL(drm_legacy_rmmap);
501 496
502/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on 497/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
503 * the last close of the device, and this is necessary for cleanup when things 498 * the last close of the device, and this is necessary for cleanup when things
@@ -514,8 +509,8 @@ EXPORT_SYMBOL(drm_rmmap);
514 * \param arg pointer to a struct drm_map structure. 509 * \param arg pointer to a struct drm_map structure.
515 * \return zero on success or a negative value on error. 510 * \return zero on success or a negative value on error.
516 */ 511 */
517int drm_rmmap_ioctl(struct drm_device *dev, void *data, 512int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
518 struct drm_file *file_priv) 513 struct drm_file *file_priv)
519{ 514{
520 struct drm_map *request = data; 515 struct drm_map *request = data;
521 struct drm_local_map *map = NULL; 516 struct drm_local_map *map = NULL;
@@ -546,7 +541,7 @@ int drm_rmmap_ioctl(struct drm_device *dev, void *data,
546 return 0; 541 return 0;
547 } 542 }
548 543
549 ret = drm_rmmap_locked(dev, map); 544 ret = drm_legacy_rmmap_locked(dev, map);
550 545
551 mutex_unlock(&dev->struct_mutex); 546 mutex_unlock(&dev->struct_mutex);
552 547
@@ -599,7 +594,8 @@ static void drm_cleanup_buf_error(struct drm_device * dev,
599 * reallocates the buffer list of the same size order to accommodate the new 594 * reallocates the buffer list of the same size order to accommodate the new
600 * buffers. 595 * buffers.
601 */ 596 */
602int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) 597int drm_legacy_addbufs_agp(struct drm_device *dev,
598 struct drm_buf_desc *request)
603{ 599{
604 struct drm_device_dma *dma = dev->dma; 600 struct drm_device_dma *dma = dev->dma;
605 struct drm_buf_entry *entry; 601 struct drm_buf_entry *entry;
@@ -759,10 +755,11 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
759 atomic_dec(&dev->buf_alloc); 755 atomic_dec(&dev->buf_alloc);
760 return 0; 756 return 0;
761} 757}
762EXPORT_SYMBOL(drm_addbufs_agp); 758EXPORT_SYMBOL(drm_legacy_addbufs_agp);
763#endif /* __OS_HAS_AGP */ 759#endif /* __OS_HAS_AGP */
764 760
765int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) 761int drm_legacy_addbufs_pci(struct drm_device *dev,
762 struct drm_buf_desc *request)
766{ 763{
767 struct drm_device_dma *dma = dev->dma; 764 struct drm_device_dma *dma = dev->dma;
768 int count; 765 int count;
@@ -964,9 +961,10 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
964 return 0; 961 return 0;
965 962
966} 963}
967EXPORT_SYMBOL(drm_addbufs_pci); 964EXPORT_SYMBOL(drm_legacy_addbufs_pci);
968 965
969static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request) 966static int drm_legacy_addbufs_sg(struct drm_device *dev,
967 struct drm_buf_desc *request)
970{ 968{
971 struct drm_device_dma *dma = dev->dma; 969 struct drm_device_dma *dma = dev->dma;
972 struct drm_buf_entry *entry; 970 struct drm_buf_entry *entry;
@@ -1135,8 +1133,8 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1135 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent 1133 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1136 * PCI memory respectively. 1134 * PCI memory respectively.
1137 */ 1135 */
1138int drm_addbufs(struct drm_device *dev, void *data, 1136int drm_legacy_addbufs(struct drm_device *dev, void *data,
1139 struct drm_file *file_priv) 1137 struct drm_file *file_priv)
1140{ 1138{
1141 struct drm_buf_desc *request = data; 1139 struct drm_buf_desc *request = data;
1142 int ret; 1140 int ret;
@@ -1149,15 +1147,15 @@ int drm_addbufs(struct drm_device *dev, void *data,
1149 1147
1150#if __OS_HAS_AGP 1148#if __OS_HAS_AGP
1151 if (request->flags & _DRM_AGP_BUFFER) 1149 if (request->flags & _DRM_AGP_BUFFER)
1152 ret = drm_addbufs_agp(dev, request); 1150 ret = drm_legacy_addbufs_agp(dev, request);
1153 else 1151 else
1154#endif 1152#endif
1155 if (request->flags & _DRM_SG_BUFFER) 1153 if (request->flags & _DRM_SG_BUFFER)
1156 ret = drm_addbufs_sg(dev, request); 1154 ret = drm_legacy_addbufs_sg(dev, request);
1157 else if (request->flags & _DRM_FB_BUFFER) 1155 else if (request->flags & _DRM_FB_BUFFER)
1158 ret = -EINVAL; 1156 ret = -EINVAL;
1159 else 1157 else
1160 ret = drm_addbufs_pci(dev, request); 1158 ret = drm_legacy_addbufs_pci(dev, request);
1161 1159
1162 return ret; 1160 return ret;
1163} 1161}
@@ -1179,8 +1177,8 @@ int drm_addbufs(struct drm_device *dev, void *data,
1179 * lock, preventing of allocating more buffers after this call. Information 1177 * lock, preventing of allocating more buffers after this call. Information
1180 * about each requested buffer is then copied into user space. 1178 * about each requested buffer is then copied into user space.
1181 */ 1179 */
1182int drm_infobufs(struct drm_device *dev, void *data, 1180int drm_legacy_infobufs(struct drm_device *dev, void *data,
1183 struct drm_file *file_priv) 1181 struct drm_file *file_priv)
1184{ 1182{
1185 struct drm_device_dma *dma = dev->dma; 1183 struct drm_device_dma *dma = dev->dma;
1186 struct drm_buf_info *request = data; 1184 struct drm_buf_info *request = data;
@@ -1260,8 +1258,8 @@ int drm_infobufs(struct drm_device *dev, void *data,
1260 * 1258 *
1261 * \note This ioctl is deprecated and mostly never used. 1259 * \note This ioctl is deprecated and mostly never used.
1262 */ 1260 */
1263int drm_markbufs(struct drm_device *dev, void *data, 1261int drm_legacy_markbufs(struct drm_device *dev, void *data,
1264 struct drm_file *file_priv) 1262 struct drm_file *file_priv)
1265{ 1263{
1266 struct drm_device_dma *dma = dev->dma; 1264 struct drm_device_dma *dma = dev->dma;
1267 struct drm_buf_desc *request = data; 1265 struct drm_buf_desc *request = data;
@@ -1307,8 +1305,8 @@ int drm_markbufs(struct drm_device *dev, void *data,
1307 * Calls free_buffer() for each used buffer. 1305 * Calls free_buffer() for each used buffer.
1308 * This function is primarily used for debugging. 1306 * This function is primarily used for debugging.
1309 */ 1307 */
1310int drm_freebufs(struct drm_device *dev, void *data, 1308int drm_legacy_freebufs(struct drm_device *dev, void *data,
1311 struct drm_file *file_priv) 1309 struct drm_file *file_priv)
1312{ 1310{
1313 struct drm_device_dma *dma = dev->dma; 1311 struct drm_device_dma *dma = dev->dma;
1314 struct drm_buf_free *request = data; 1312 struct drm_buf_free *request = data;
@@ -1340,7 +1338,7 @@ int drm_freebufs(struct drm_device *dev, void *data,
1340 task_pid_nr(current)); 1338 task_pid_nr(current));
1341 return -EINVAL; 1339 return -EINVAL;
1342 } 1340 }
1343 drm_free_buffer(dev, buf); 1341 drm_legacy_free_buffer(dev, buf);
1344 } 1342 }
1345 1343
1346 return 0; 1344 return 0;
@@ -1360,8 +1358,8 @@ int drm_freebufs(struct drm_device *dev, void *data,
1360 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls 1358 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1361 * drm_mmap_dma(). 1359 * drm_mmap_dma().
1362 */ 1360 */
1363int drm_mapbufs(struct drm_device *dev, void *data, 1361int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1364 struct drm_file *file_priv) 1362 struct drm_file *file_priv)
1365{ 1363{
1366 struct drm_device_dma *dma = dev->dma; 1364 struct drm_device_dma *dma = dev->dma;
1367 int retcode = 0; 1365 int retcode = 0;
@@ -1448,7 +1446,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1448 return retcode; 1446 return retcode;
1449} 1447}
1450 1448
1451int drm_dma_ioctl(struct drm_device *dev, void *data, 1449int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1452 struct drm_file *file_priv) 1450 struct drm_file *file_priv)
1453{ 1451{
1454 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1452 if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -1460,7 +1458,7 @@ int drm_dma_ioctl(struct drm_device *dev, void *data,
1460 return -EINVAL; 1458 return -EINVAL;
1461} 1459}
1462 1460
1463struct drm_local_map *drm_getsarea(struct drm_device *dev) 1461struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1464{ 1462{
1465 struct drm_map_list *entry; 1463 struct drm_map_list *entry;
1466 1464
@@ -1472,4 +1470,4 @@ struct drm_local_map *drm_getsarea(struct drm_device *dev)
1472 } 1470 }
1473 return NULL; 1471 return NULL;
1474} 1472}
1475EXPORT_SYMBOL(drm_getsarea); 1473EXPORT_SYMBOL(drm_legacy_getsarea);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 90e773019eac..e79c8d3700d8 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -40,106 +40,12 @@
40#include <drm/drm_modeset_lock.h> 40#include <drm/drm_modeset_lock.h>
41 41
42#include "drm_crtc_internal.h" 42#include "drm_crtc_internal.h"
43#include "drm_internal.h"
43 44
44static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, 45static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
45 struct drm_mode_fb_cmd2 *r, 46 struct drm_mode_fb_cmd2 *r,
46 struct drm_file *file_priv); 47 struct drm_file *file_priv);
47 48
48/**
49 * drm_modeset_lock_all - take all modeset locks
50 * @dev: drm device
51 *
52 * This function takes all modeset locks, suitable where a more fine-grained
53 * scheme isn't (yet) implemented. Locks must be dropped with
54 * drm_modeset_unlock_all.
55 */
56void drm_modeset_lock_all(struct drm_device *dev)
57{
58 struct drm_mode_config *config = &dev->mode_config;
59 struct drm_modeset_acquire_ctx *ctx;
60 int ret;
61
62 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
63 if (WARN_ON(!ctx))
64 return;
65
66 mutex_lock(&config->mutex);
67
68 drm_modeset_acquire_init(ctx, 0);
69
70retry:
71 ret = drm_modeset_lock(&config->connection_mutex, ctx);
72 if (ret)
73 goto fail;
74 ret = drm_modeset_lock_all_crtcs(dev, ctx);
75 if (ret)
76 goto fail;
77
78 WARN_ON(config->acquire_ctx);
79
80 /* now we hold the locks, so now that it is safe, stash the
81 * ctx for drm_modeset_unlock_all():
82 */
83 config->acquire_ctx = ctx;
84
85 drm_warn_on_modeset_not_all_locked(dev);
86
87 return;
88
89fail:
90 if (ret == -EDEADLK) {
91 drm_modeset_backoff(ctx);
92 goto retry;
93 }
94}
95EXPORT_SYMBOL(drm_modeset_lock_all);
96
97/**
98 * drm_modeset_unlock_all - drop all modeset locks
99 * @dev: device
100 *
101 * This function drop all modeset locks taken by drm_modeset_lock_all.
102 */
103void drm_modeset_unlock_all(struct drm_device *dev)
104{
105 struct drm_mode_config *config = &dev->mode_config;
106 struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
107
108 if (WARN_ON(!ctx))
109 return;
110
111 config->acquire_ctx = NULL;
112 drm_modeset_drop_locks(ctx);
113 drm_modeset_acquire_fini(ctx);
114
115 kfree(ctx);
116
117 mutex_unlock(&dev->mode_config.mutex);
118}
119EXPORT_SYMBOL(drm_modeset_unlock_all);
120
121/**
122 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
123 * @dev: device
124 *
125 * Useful as a debug assert.
126 */
127void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
128{
129 struct drm_crtc *crtc;
130
131 /* Locking is currently fubar in the panic handler. */
132 if (oops_in_progress)
133 return;
134
135 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
136 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
137
138 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
139 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
140}
141EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
142
143/* Avoid boilerplate. I'm tired of typing. */ 49/* Avoid boilerplate. I'm tired of typing. */
144#define DRM_ENUM_NAME_FN(fnname, list) \ 50#define DRM_ENUM_NAME_FN(fnname, list) \
145 const char *fnname(int val) \ 51 const char *fnname(int val) \
@@ -515,9 +421,6 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
515 if (ret) 421 if (ret)
516 goto out; 422 goto out;
517 423
518 /* Grab the idr reference. */
519 drm_framebuffer_reference(fb);
520
521 dev->mode_config.num_fb++; 424 dev->mode_config.num_fb++;
522 list_add(&fb->head, &dev->mode_config.fb_list); 425 list_add(&fb->head, &dev->mode_config.fb_list);
523out: 426out:
@@ -527,10 +430,34 @@ out:
527} 430}
528EXPORT_SYMBOL(drm_framebuffer_init); 431EXPORT_SYMBOL(drm_framebuffer_init);
529 432
433/* dev->mode_config.fb_lock must be held! */
434static void __drm_framebuffer_unregister(struct drm_device *dev,
435 struct drm_framebuffer *fb)
436{
437 mutex_lock(&dev->mode_config.idr_mutex);
438 idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
439 mutex_unlock(&dev->mode_config.idr_mutex);
440
441 fb->base.id = 0;
442}
443
530static void drm_framebuffer_free(struct kref *kref) 444static void drm_framebuffer_free(struct kref *kref)
531{ 445{
532 struct drm_framebuffer *fb = 446 struct drm_framebuffer *fb =
533 container_of(kref, struct drm_framebuffer, refcount); 447 container_of(kref, struct drm_framebuffer, refcount);
448 struct drm_device *dev = fb->dev;
449
450 /*
451 * The lookup idr holds a weak reference, which has not necessarily been
452 * removed at this point. Check for that.
453 */
454 mutex_lock(&dev->mode_config.fb_lock);
455 if (fb->base.id) {
456 /* Mark fb as reaped and drop idr ref. */
457 __drm_framebuffer_unregister(dev, fb);
458 }
459 mutex_unlock(&dev->mode_config.fb_lock);
460
534 fb->funcs->destroy(fb); 461 fb->funcs->destroy(fb);
535} 462}
536 463
@@ -567,8 +494,10 @@ struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
567 494
568 mutex_lock(&dev->mode_config.fb_lock); 495 mutex_lock(&dev->mode_config.fb_lock);
569 fb = __drm_framebuffer_lookup(dev, id); 496 fb = __drm_framebuffer_lookup(dev, id);
570 if (fb) 497 if (fb) {
571 drm_framebuffer_reference(fb); 498 if (!kref_get_unless_zero(&fb->refcount))
499 fb = NULL;
500 }
572 mutex_unlock(&dev->mode_config.fb_lock); 501 mutex_unlock(&dev->mode_config.fb_lock);
573 502
574 return fb; 503 return fb;
@@ -612,19 +541,6 @@ static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
612 kref_put(&fb->refcount, drm_framebuffer_free_bug); 541 kref_put(&fb->refcount, drm_framebuffer_free_bug);
613} 542}
614 543
615/* dev->mode_config.fb_lock must be held! */
616static void __drm_framebuffer_unregister(struct drm_device *dev,
617 struct drm_framebuffer *fb)
618{
619 mutex_lock(&dev->mode_config.idr_mutex);
620 idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
621 mutex_unlock(&dev->mode_config.idr_mutex);
622
623 fb->base.id = 0;
624
625 __drm_framebuffer_unreference(fb);
626}
627
628/** 544/**
629 * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr 545 * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
630 * @fb: fb to unregister 546 * @fb: fb to unregister
@@ -764,11 +680,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
764 crtc->funcs = funcs; 680 crtc->funcs = funcs;
765 crtc->invert_dimensions = false; 681 crtc->invert_dimensions = false;
766 682
767 drm_modeset_lock_all(dev);
768 drm_modeset_lock_init(&crtc->mutex); 683 drm_modeset_lock_init(&crtc->mutex);
769 /* dropped by _unlock_all(): */
770 drm_modeset_lock(&crtc->mutex, config->acquire_ctx);
771
772 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); 684 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
773 if (ret) 685 if (ret)
774 goto out; 686 goto out;
@@ -786,7 +698,6 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
786 cursor->possible_crtcs = 1 << drm_crtc_index(crtc); 698 cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
787 699
788 out: 700 out:
789 drm_modeset_unlock_all(dev);
790 701
791 return ret; 702 return ret;
792} 703}
@@ -853,6 +764,59 @@ static void drm_mode_remove(struct drm_connector *connector,
853} 764}
854 765
855/** 766/**
767 * drm_connector_get_cmdline_mode - reads the user's cmdline mode
768 * @connector: connector to quwery
769 * @mode: returned mode
770 *
771 * The kernel supports per-connector configration of its consoles through
772 * use of the video= parameter. This function parses that option and
773 * extracts the user's specified mode (or enable/disable status) for a
774 * particular connector. This is typically only used during the early fbdev
775 * setup.
776 */
777static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
778{
779 struct drm_cmdline_mode *mode = &connector->cmdline_mode;
780 char *option = NULL;
781
782 if (fb_get_options(connector->name, &option))
783 return;
784
785 if (!drm_mode_parse_command_line_for_connector(option,
786 connector,
787 mode))
788 return;
789
790 if (mode->force) {
791 const char *s;
792
793 switch (mode->force) {
794 case DRM_FORCE_OFF:
795 s = "OFF";
796 break;
797 case DRM_FORCE_ON_DIGITAL:
798 s = "ON - dig";
799 break;
800 default:
801 case DRM_FORCE_ON:
802 s = "ON";
803 break;
804 }
805
806 DRM_INFO("forcing %s connector %s\n", connector->name, s);
807 connector->force = mode->force;
808 }
809
810 DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
811 connector->name,
812 mode->xres, mode->yres,
813 mode->refresh_specified ? mode->refresh : 60,
814 mode->rb ? " reduced blanking" : "",
815 mode->margins ? " with margins" : "",
816 mode->interlace ? " interlaced" : "");
817}
818
819/**
856 * drm_connector_init - Init a preallocated connector 820 * drm_connector_init - Init a preallocated connector
857 * @dev: DRM device 821 * @dev: DRM device
858 * @connector: the connector to init 822 * @connector: the connector to init
@@ -904,6 +868,8 @@ int drm_connector_init(struct drm_device *dev,
904 connector->edid_blob_ptr = NULL; 868 connector->edid_blob_ptr = NULL;
905 connector->status = connector_status_unknown; 869 connector->status = connector_status_unknown;
906 870
871 drm_connector_get_cmdline_mode(connector);
872
907 list_add_tail(&connector->head, &dev->mode_config.connector_list); 873 list_add_tail(&connector->head, &dev->mode_config.connector_list);
908 dev->mode_config.num_connector++; 874 dev->mode_config.num_connector++;
909 875
@@ -957,6 +923,29 @@ void drm_connector_cleanup(struct drm_connector *connector)
957EXPORT_SYMBOL(drm_connector_cleanup); 923EXPORT_SYMBOL(drm_connector_cleanup);
958 924
959/** 925/**
926 * drm_connector_index - find the index of a registered connector
927 * @connector: connector to find index for
928 *
929 * Given a registered connector, return the index of that connector within a DRM
930 * device's list of connectors.
931 */
932unsigned int drm_connector_index(struct drm_connector *connector)
933{
934 unsigned int index = 0;
935 struct drm_connector *tmp;
936
937 list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
938 if (tmp == connector)
939 return index;
940
941 index++;
942 }
943
944 BUG();
945}
946EXPORT_SYMBOL(drm_connector_index);
947
948/**
960 * drm_connector_register - register a connector 949 * drm_connector_register - register a connector
961 * @connector: the connector to register 950 * @connector: the connector to register
962 * 951 *
@@ -1261,6 +1250,29 @@ void drm_plane_cleanup(struct drm_plane *plane)
1261EXPORT_SYMBOL(drm_plane_cleanup); 1250EXPORT_SYMBOL(drm_plane_cleanup);
1262 1251
1263/** 1252/**
1253 * drm_plane_index - find the index of a registered plane
1254 * @plane: plane to find index for
1255 *
1256 * Given a registered plane, return the index of that CRTC within a DRM
1257 * device's list of planes.
1258 */
1259unsigned int drm_plane_index(struct drm_plane *plane)
1260{
1261 unsigned int index = 0;
1262 struct drm_plane *tmp;
1263
1264 list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) {
1265 if (tmp == plane)
1266 return index;
1267
1268 index++;
1269 }
1270
1271 BUG();
1272}
1273EXPORT_SYMBOL(drm_plane_index);
1274
1275/**
1264 * drm_plane_force_disable - Forcibly disable a plane 1276 * drm_plane_force_disable - Forcibly disable a plane
1265 * @plane: plane to disable 1277 * @plane: plane to disable
1266 * 1278 *
@@ -1271,19 +1283,21 @@ EXPORT_SYMBOL(drm_plane_cleanup);
1271 */ 1283 */
1272void drm_plane_force_disable(struct drm_plane *plane) 1284void drm_plane_force_disable(struct drm_plane *plane)
1273{ 1285{
1274 struct drm_framebuffer *old_fb = plane->fb;
1275 int ret; 1286 int ret;
1276 1287
1277 if (!old_fb) 1288 if (!plane->fb)
1278 return; 1289 return;
1279 1290
1291 plane->old_fb = plane->fb;
1280 ret = plane->funcs->disable_plane(plane); 1292 ret = plane->funcs->disable_plane(plane);
1281 if (ret) { 1293 if (ret) {
1282 DRM_ERROR("failed to disable plane with busy fb\n"); 1294 DRM_ERROR("failed to disable plane with busy fb\n");
1295 plane->old_fb = NULL;
1283 return; 1296 return;
1284 } 1297 }
1285 /* disconnect the plane from the fb and crtc: */ 1298 /* disconnect the plane from the fb and crtc: */
1286 __drm_framebuffer_unreference(old_fb); 1299 __drm_framebuffer_unreference(plane->old_fb);
1300 plane->old_fb = NULL;
1287 plane->fb = NULL; 1301 plane->fb = NULL;
1288 plane->crtc = NULL; 1302 plane->crtc = NULL;
1289} 1303}
@@ -2249,33 +2263,29 @@ out:
2249 * 2263 *
2250 * src_{x,y,w,h} are provided in 16.16 fixed point format 2264 * src_{x,y,w,h} are provided in 16.16 fixed point format
2251 */ 2265 */
2252static int setplane_internal(struct drm_plane *plane, 2266static int __setplane_internal(struct drm_plane *plane,
2253 struct drm_crtc *crtc, 2267 struct drm_crtc *crtc,
2254 struct drm_framebuffer *fb, 2268 struct drm_framebuffer *fb,
2255 int32_t crtc_x, int32_t crtc_y, 2269 int32_t crtc_x, int32_t crtc_y,
2256 uint32_t crtc_w, uint32_t crtc_h, 2270 uint32_t crtc_w, uint32_t crtc_h,
2257 /* src_{x,y,w,h} values are 16.16 fixed point */ 2271 /* src_{x,y,w,h} values are 16.16 fixed point */
2258 uint32_t src_x, uint32_t src_y, 2272 uint32_t src_x, uint32_t src_y,
2259 uint32_t src_w, uint32_t src_h) 2273 uint32_t src_w, uint32_t src_h)
2260{ 2274{
2261 struct drm_device *dev = plane->dev;
2262 struct drm_framebuffer *old_fb = NULL;
2263 int ret = 0; 2275 int ret = 0;
2264 unsigned int fb_width, fb_height; 2276 unsigned int fb_width, fb_height;
2265 int i; 2277 int i;
2266 2278
2267 /* No fb means shut it down */ 2279 /* No fb means shut it down */
2268 if (!fb) { 2280 if (!fb) {
2269 drm_modeset_lock_all(dev); 2281 plane->old_fb = plane->fb;
2270 old_fb = plane->fb;
2271 ret = plane->funcs->disable_plane(plane); 2282 ret = plane->funcs->disable_plane(plane);
2272 if (!ret) { 2283 if (!ret) {
2273 plane->crtc = NULL; 2284 plane->crtc = NULL;
2274 plane->fb = NULL; 2285 plane->fb = NULL;
2275 } else { 2286 } else {
2276 old_fb = NULL; 2287 plane->old_fb = NULL;
2277 } 2288 }
2278 drm_modeset_unlock_all(dev);
2279 goto out; 2289 goto out;
2280 } 2290 }
2281 2291
@@ -2315,8 +2325,7 @@ static int setplane_internal(struct drm_plane *plane,
2315 goto out; 2325 goto out;
2316 } 2326 }
2317 2327
2318 drm_modeset_lock_all(dev); 2328 plane->old_fb = plane->fb;
2319 old_fb = plane->fb;
2320 ret = plane->funcs->update_plane(plane, crtc, fb, 2329 ret = plane->funcs->update_plane(plane, crtc, fb,
2321 crtc_x, crtc_y, crtc_w, crtc_h, 2330 crtc_x, crtc_y, crtc_w, crtc_h,
2322 src_x, src_y, src_w, src_h); 2331 src_x, src_y, src_w, src_h);
@@ -2325,18 +2334,37 @@ static int setplane_internal(struct drm_plane *plane,
2325 plane->fb = fb; 2334 plane->fb = fb;
2326 fb = NULL; 2335 fb = NULL;
2327 } else { 2336 } else {
2328 old_fb = NULL; 2337 plane->old_fb = NULL;
2329 } 2338 }
2330 drm_modeset_unlock_all(dev);
2331 2339
2332out: 2340out:
2333 if (fb) 2341 if (fb)
2334 drm_framebuffer_unreference(fb); 2342 drm_framebuffer_unreference(fb);
2335 if (old_fb) 2343 if (plane->old_fb)
2336 drm_framebuffer_unreference(old_fb); 2344 drm_framebuffer_unreference(plane->old_fb);
2345 plane->old_fb = NULL;
2337 2346
2338 return ret; 2347 return ret;
2348}
2349
2350static int setplane_internal(struct drm_plane *plane,
2351 struct drm_crtc *crtc,
2352 struct drm_framebuffer *fb,
2353 int32_t crtc_x, int32_t crtc_y,
2354 uint32_t crtc_w, uint32_t crtc_h,
2355 /* src_{x,y,w,h} values are 16.16 fixed point */
2356 uint32_t src_x, uint32_t src_y,
2357 uint32_t src_w, uint32_t src_h)
2358{
2359 int ret;
2360
2361 drm_modeset_lock_all(plane->dev);
2362 ret = __setplane_internal(plane, crtc, fb,
2363 crtc_x, crtc_y, crtc_w, crtc_h,
2364 src_x, src_y, src_w, src_h);
2365 drm_modeset_unlock_all(plane->dev);
2339 2366
2367 return ret;
2340} 2368}
2341 2369
2342/** 2370/**
@@ -2440,7 +2468,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
2440 * crtcs. Atomic modeset will have saner semantics ... 2468 * crtcs. Atomic modeset will have saner semantics ...
2441 */ 2469 */
2442 list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) 2470 list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
2443 tmp->old_fb = tmp->primary->fb; 2471 tmp->primary->old_fb = tmp->primary->fb;
2444 2472
2445 fb = set->fb; 2473 fb = set->fb;
2446 2474
@@ -2453,8 +2481,9 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
2453 list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) { 2481 list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
2454 if (tmp->primary->fb) 2482 if (tmp->primary->fb)
2455 drm_framebuffer_reference(tmp->primary->fb); 2483 drm_framebuffer_reference(tmp->primary->fb);
2456 if (tmp->old_fb) 2484 if (tmp->primary->old_fb)
2457 drm_framebuffer_unreference(tmp->old_fb); 2485 drm_framebuffer_unreference(tmp->primary->old_fb);
2486 tmp->primary->old_fb = NULL;
2458 } 2487 }
2459 2488
2460 return ret; 2489 return ret;
@@ -2701,6 +2730,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
2701 int ret = 0; 2730 int ret = 0;
2702 2731
2703 BUG_ON(!crtc->cursor); 2732 BUG_ON(!crtc->cursor);
2733 WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
2704 2734
2705 /* 2735 /*
2706 * Obtain fb we'll be using (either new or existing) and take an extra 2736 * Obtain fb we'll be using (either new or existing) and take an extra
@@ -2720,11 +2750,9 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
2720 fb = NULL; 2750 fb = NULL;
2721 } 2751 }
2722 } else { 2752 } else {
2723 mutex_lock(&dev->mode_config.mutex);
2724 fb = crtc->cursor->fb; 2753 fb = crtc->cursor->fb;
2725 if (fb) 2754 if (fb)
2726 drm_framebuffer_reference(fb); 2755 drm_framebuffer_reference(fb);
2727 mutex_unlock(&dev->mode_config.mutex);
2728 } 2756 }
2729 2757
2730 if (req->flags & DRM_MODE_CURSOR_MOVE) { 2758 if (req->flags & DRM_MODE_CURSOR_MOVE) {
@@ -2746,7 +2774,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
2746 * setplane_internal will take care of deref'ing either the old or new 2774 * setplane_internal will take care of deref'ing either the old or new
2747 * framebuffer depending on success. 2775 * framebuffer depending on success.
2748 */ 2776 */
2749 ret = setplane_internal(crtc->cursor, crtc, fb, 2777 ret = __setplane_internal(crtc->cursor, crtc, fb,
2750 crtc_x, crtc_y, crtc_w, crtc_h, 2778 crtc_x, crtc_y, crtc_w, crtc_h,
2751 0, 0, src_w, src_h); 2779 0, 0, src_w, src_h);
2752 2780
@@ -2782,10 +2810,12 @@ static int drm_mode_cursor_common(struct drm_device *dev,
2782 * If this crtc has a universal cursor plane, call that plane's update 2810 * If this crtc has a universal cursor plane, call that plane's update
2783 * handler rather than using legacy cursor handlers. 2811 * handler rather than using legacy cursor handlers.
2784 */ 2812 */
2785 if (crtc->cursor) 2813 drm_modeset_lock_crtc(crtc);
2786 return drm_mode_cursor_universal(crtc, req, file_priv); 2814 if (crtc->cursor) {
2815 ret = drm_mode_cursor_universal(crtc, req, file_priv);
2816 goto out;
2817 }
2787 2818
2788 drm_modeset_lock(&crtc->mutex, NULL);
2789 if (req->flags & DRM_MODE_CURSOR_BO) { 2819 if (req->flags & DRM_MODE_CURSOR_BO) {
2790 if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) { 2820 if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
2791 ret = -ENXIO; 2821 ret = -ENXIO;
@@ -2809,7 +2839,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
2809 } 2839 }
2810 } 2840 }
2811out: 2841out:
2812 drm_modeset_unlock(&crtc->mutex); 2842 drm_modeset_unlock_crtc(crtc);
2813 2843
2814 return ret; 2844 return ret;
2815 2845
@@ -3370,7 +3400,16 @@ void drm_fb_release(struct drm_file *priv)
3370 struct drm_device *dev = priv->minor->dev; 3400 struct drm_device *dev = priv->minor->dev;
3371 struct drm_framebuffer *fb, *tfb; 3401 struct drm_framebuffer *fb, *tfb;
3372 3402
3373 mutex_lock(&priv->fbs_lock); 3403 /*
3404 * When the file gets released that means no one else can access the fb
3405 * list any more, so no need to grab fpriv->fbs_lock. And we need to to
3406 * avoid upsetting lockdep since the universal cursor code adds a
3407 * framebuffer while holding mutex locks.
3408 *
3409 * Note that a real deadlock between fpriv->fbs_lock and the modeset
3410 * locks is impossible here since no one else but this function can get
3411 * at it any more.
3412 */
3374 list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { 3413 list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
3375 3414
3376 mutex_lock(&dev->mode_config.fb_lock); 3415 mutex_lock(&dev->mode_config.fb_lock);
@@ -3383,7 +3422,6 @@ void drm_fb_release(struct drm_file *priv)
3383 /* This will also drop the fpriv->fbs reference. */ 3422 /* This will also drop the fpriv->fbs reference. */
3384 drm_framebuffer_remove(fb); 3423 drm_framebuffer_remove(fb);
3385 } 3424 }
3386 mutex_unlock(&priv->fbs_lock);
3387} 3425}
3388 3426
3389/** 3427/**
@@ -3495,9 +3533,10 @@ EXPORT_SYMBOL(drm_property_create_enum);
3495 * @flags: flags specifying the property type 3533 * @flags: flags specifying the property type
3496 * @name: name of the property 3534 * @name: name of the property
3497 * @props: enumeration lists with property bitflags 3535 * @props: enumeration lists with property bitflags
3498 * @num_values: number of pre-defined values 3536 * @num_props: size of the @props array
3537 * @supported_bits: bitmask of all supported enumeration values
3499 * 3538 *
3500 * This creates a new generic drm property which can then be attached to a drm 3539 * This creates a new bitmask drm property which can then be attached to a drm
3501 * object with drm_object_attach_property. The returned property object must be 3540 * object with drm_object_attach_property. The returned property object must be
3502 * freed with drm_property_destroy. 3541 * freed with drm_property_destroy.
3503 * 3542 *
@@ -4157,12 +4196,25 @@ static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
4157 return ret; 4196 return ret;
4158} 4197}
4159 4198
4160static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj, 4199/**
4161 struct drm_property *property, 4200 * drm_mode_plane_set_obj_prop - set the value of a property
4162 uint64_t value) 4201 * @plane: drm plane object to set property value for
4202 * @property: property to set
4203 * @value: value the property should be set to
4204 *
4205 * This functions sets a given property on a given plane object. This function
4206 * calls the driver's ->set_property callback and changes the software state of
4207 * the property if the callback succeeds.
4208 *
4209 * Returns:
4210 * Zero on success, error code on failure.
4211 */
4212int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
4213 struct drm_property *property,
4214 uint64_t value)
4163{ 4215{
4164 int ret = -EINVAL; 4216 int ret = -EINVAL;
4165 struct drm_plane *plane = obj_to_plane(obj); 4217 struct drm_mode_object *obj = &plane->base;
4166 4218
4167 if (plane->funcs->set_property) 4219 if (plane->funcs->set_property)
4168 ret = plane->funcs->set_property(plane, property, value); 4220 ret = plane->funcs->set_property(plane, property, value);
@@ -4171,6 +4223,7 @@ static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
4171 4223
4172 return ret; 4224 return ret;
4173} 4225}
4226EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
4174 4227
4175/** 4228/**
4176 * drm_mode_getproperty_ioctl - get the current value of a object's property 4229 * drm_mode_getproperty_ioctl - get the current value of a object's property
@@ -4309,7 +4362,8 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
4309 ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value); 4362 ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
4310 break; 4363 break;
4311 case DRM_MODE_OBJECT_PLANE: 4364 case DRM_MODE_OBJECT_PLANE:
4312 ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value); 4365 ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
4366 property, arg->value);
4313 break; 4367 break;
4314 } 4368 }
4315 4369
@@ -4529,7 +4583,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
4529{ 4583{
4530 struct drm_mode_crtc_page_flip *page_flip = data; 4584 struct drm_mode_crtc_page_flip *page_flip = data;
4531 struct drm_crtc *crtc; 4585 struct drm_crtc *crtc;
4532 struct drm_framebuffer *fb = NULL, *old_fb = NULL; 4586 struct drm_framebuffer *fb = NULL;
4533 struct drm_pending_vblank_event *e = NULL; 4587 struct drm_pending_vblank_event *e = NULL;
4534 unsigned long flags; 4588 unsigned long flags;
4535 int ret = -EINVAL; 4589 int ret = -EINVAL;
@@ -4545,7 +4599,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
4545 if (!crtc) 4599 if (!crtc)
4546 return -ENOENT; 4600 return -ENOENT;
4547 4601
4548 drm_modeset_lock(&crtc->mutex, NULL); 4602 drm_modeset_lock_crtc(crtc);
4549 if (crtc->primary->fb == NULL) { 4603 if (crtc->primary->fb == NULL) {
4550 /* The framebuffer is currently unbound, presumably 4604 /* The framebuffer is currently unbound, presumably
4551 * due to a hotplug event, that userspace has not 4605 * due to a hotplug event, that userspace has not
@@ -4601,7 +4655,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
4601 (void (*) (struct drm_pending_event *)) kfree; 4655 (void (*) (struct drm_pending_event *)) kfree;
4602 } 4656 }
4603 4657
4604 old_fb = crtc->primary->fb; 4658 crtc->primary->old_fb = crtc->primary->fb;
4605 ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags); 4659 ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
4606 if (ret) { 4660 if (ret) {
4607 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { 4661 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -4611,7 +4665,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
4611 kfree(e); 4665 kfree(e);
4612 } 4666 }
4613 /* Keep the old fb, don't unref it. */ 4667 /* Keep the old fb, don't unref it. */
4614 old_fb = NULL; 4668 crtc->primary->old_fb = NULL;
4615 } else { 4669 } else {
4616 /* 4670 /*
4617 * Warn if the driver hasn't properly updated the crtc->fb 4671 * Warn if the driver hasn't properly updated the crtc->fb
@@ -4627,9 +4681,10 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
4627out: 4681out:
4628 if (fb) 4682 if (fb)
4629 drm_framebuffer_unreference(fb); 4683 drm_framebuffer_unreference(fb);
4630 if (old_fb) 4684 if (crtc->primary->old_fb)
4631 drm_framebuffer_unreference(old_fb); 4685 drm_framebuffer_unreference(crtc->primary->old_fb);
4632 drm_modeset_unlock(&crtc->mutex); 4686 crtc->primary->old_fb = NULL;
4687 drm_modeset_unlock_crtc(crtc);
4633 4688
4634 return ret; 4689 return ret;
4635} 4690}
@@ -4645,9 +4700,14 @@ out:
4645void drm_mode_config_reset(struct drm_device *dev) 4700void drm_mode_config_reset(struct drm_device *dev)
4646{ 4701{
4647 struct drm_crtc *crtc; 4702 struct drm_crtc *crtc;
4703 struct drm_plane *plane;
4648 struct drm_encoder *encoder; 4704 struct drm_encoder *encoder;
4649 struct drm_connector *connector; 4705 struct drm_connector *connector;
4650 4706
4707 list_for_each_entry(plane, &dev->mode_config.plane_list, head)
4708 if (plane->funcs->reset)
4709 plane->funcs->reset(plane);
4710
4651 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 4711 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
4652 if (crtc->funcs->reset) 4712 if (crtc->funcs->reset)
4653 crtc->funcs->reset(crtc); 4713 crtc->funcs->reset(crtc);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 13bd42923dd4..3bcf8e6a85b3 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -36,6 +36,7 @@
36#include <linux/export.h> 36#include <linux/export.h>
37#include <drm/drmP.h> 37#include <drm/drmP.h>
38#include <drm/drm_edid.h> 38#include <drm/drm_edid.h>
39#include "drm_internal.h"
39 40
40#if defined(CONFIG_DEBUG_FS) 41#if defined(CONFIG_DEBUG_FS)
41 42
@@ -49,9 +50,7 @@ static const struct drm_info_list drm_debugfs_list[] = {
49 {"clients", drm_clients_info, 0}, 50 {"clients", drm_clients_info, 0},
50 {"bufs", drm_bufs_info, 0}, 51 {"bufs", drm_bufs_info, 0},
51 {"gem_names", drm_gem_name_info, DRIVER_GEM}, 52 {"gem_names", drm_gem_name_info, DRIVER_GEM},
52#if DRM_DEBUG_CODE
53 {"vma", drm_vma_info, 0}, 53 {"vma", drm_vma_info, 0},
54#endif
55}; 54};
56#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list) 55#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
57 56
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 8a140a953754..ea481800ef56 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -35,6 +35,7 @@
35 35
36#include <linux/export.h> 36#include <linux/export.h>
37#include <drm/drmP.h> 37#include <drm/drmP.h>
38#include "drm_legacy.h"
38 39
39/** 40/**
40 * Initialize the DMA data. 41 * Initialize the DMA data.
@@ -124,7 +125,7 @@ void drm_legacy_dma_takedown(struct drm_device *dev)
124 * 125 *
125 * Resets the fields of \p buf. 126 * Resets the fields of \p buf.
126 */ 127 */
127void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf) 128void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
128{ 129{
129 if (!buf) 130 if (!buf)
130 return; 131 return;
@@ -142,8 +143,8 @@ void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
142 * 143 *
143 * Frees each buffer associated with \p file_priv not already on the hardware. 144 * Frees each buffer associated with \p file_priv not already on the hardware.
144 */ 145 */
145void drm_core_reclaim_buffers(struct drm_device *dev, 146void drm_legacy_reclaim_buffers(struct drm_device *dev,
146 struct drm_file *file_priv) 147 struct drm_file *file_priv)
147{ 148{
148 struct drm_device_dma *dma = dev->dma; 149 struct drm_device_dma *dma = dev->dma;
149 int i; 150 int i;
@@ -154,7 +155,7 @@ void drm_core_reclaim_buffers(struct drm_device *dev,
154 if (dma->buflist[i]->file_priv == file_priv) { 155 if (dma->buflist[i]->file_priv == file_priv) {
155 switch (dma->buflist[i]->list) { 156 switch (dma->buflist[i]->list) {
156 case DRM_LIST_NONE: 157 case DRM_LIST_NONE:
157 drm_free_buffer(dev, dma->buflist[i]); 158 drm_legacy_free_buffer(dev, dma->buflist[i]);
158 break; 159 break;
159 case DRM_LIST_WAIT: 160 case DRM_LIST_WAIT:
160 dma->buflist[i]->list = DRM_LIST_RECLAIM; 161 dma->buflist[i]->list = DRM_LIST_RECLAIM;
@@ -166,5 +167,3 @@ void drm_core_reclaim_buffers(struct drm_device *dev,
166 } 167 }
167 } 168 }
168} 169}
169
170EXPORT_SYMBOL(drm_core_reclaim_buffers);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index ac3c2738db94..070f913d2dba 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -682,7 +682,7 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
682static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, 682static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
683 struct drm_dp_vcpi *vcpi) 683 struct drm_dp_vcpi *vcpi)
684{ 684{
685 int ret; 685 int ret, vcpi_ret;
686 686
687 mutex_lock(&mgr->payload_lock); 687 mutex_lock(&mgr->payload_lock);
688 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1); 688 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
@@ -692,8 +692,16 @@ static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
692 goto out_unlock; 692 goto out_unlock;
693 } 693 }
694 694
695 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
696 if (vcpi_ret > mgr->max_payloads) {
697 ret = -EINVAL;
698 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
699 goto out_unlock;
700 }
701
695 set_bit(ret, &mgr->payload_mask); 702 set_bit(ret, &mgr->payload_mask);
696 vcpi->vcpi = ret; 703 set_bit(vcpi_ret, &mgr->vcpi_mask);
704 vcpi->vcpi = vcpi_ret + 1;
697 mgr->proposed_vcpis[ret - 1] = vcpi; 705 mgr->proposed_vcpis[ret - 1] = vcpi;
698out_unlock: 706out_unlock:
699 mutex_unlock(&mgr->payload_lock); 707 mutex_unlock(&mgr->payload_lock);
@@ -701,15 +709,23 @@ out_unlock:
701} 709}
702 710
703static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, 711static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
704 int id) 712 int vcpi)
705{ 713{
706 if (id == 0) 714 int i;
715 if (vcpi == 0)
707 return; 716 return;
708 717
709 mutex_lock(&mgr->payload_lock); 718 mutex_lock(&mgr->payload_lock);
710 DRM_DEBUG_KMS("putting payload %d\n", id); 719 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
711 clear_bit(id, &mgr->payload_mask); 720 clear_bit(vcpi - 1, &mgr->vcpi_mask);
712 mgr->proposed_vcpis[id - 1] = NULL; 721
722 for (i = 0; i < mgr->max_payloads; i++) {
723 if (mgr->proposed_vcpis[i])
724 if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
725 mgr->proposed_vcpis[i] = NULL;
726 clear_bit(i + 1, &mgr->payload_mask);
727 }
728 }
713 mutex_unlock(&mgr->payload_lock); 729 mutex_unlock(&mgr->payload_lock);
714} 730}
715 731
@@ -1563,7 +1579,7 @@ static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1563 } 1579 }
1564 1580
1565 drm_dp_dpcd_write_payload(mgr, id, payload); 1581 drm_dp_dpcd_write_payload(mgr, id, payload);
1566 payload->payload_state = 0; 1582 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
1567 return 0; 1583 return 0;
1568} 1584}
1569 1585
@@ -1590,7 +1606,7 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1590 */ 1606 */
1591int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) 1607int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1592{ 1608{
1593 int i; 1609 int i, j;
1594 int cur_slots = 1; 1610 int cur_slots = 1;
1595 struct drm_dp_payload req_payload; 1611 struct drm_dp_payload req_payload;
1596 struct drm_dp_mst_port *port; 1612 struct drm_dp_mst_port *port;
@@ -1607,26 +1623,46 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1607 port = NULL; 1623 port = NULL;
1608 req_payload.num_slots = 0; 1624 req_payload.num_slots = 0;
1609 } 1625 }
1626
1627 if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1628 mgr->payloads[i].start_slot = req_payload.start_slot;
1629 }
1610 /* work out what is required to happen with this payload */ 1630 /* work out what is required to happen with this payload */
1611 if (mgr->payloads[i].start_slot != req_payload.start_slot || 1631 if (mgr->payloads[i].num_slots != req_payload.num_slots) {
1612 mgr->payloads[i].num_slots != req_payload.num_slots) {
1613 1632
1614 /* need to push an update for this payload */ 1633 /* need to push an update for this payload */
1615 if (req_payload.num_slots) { 1634 if (req_payload.num_slots) {
1616 drm_dp_create_payload_step1(mgr, i + 1, &req_payload); 1635 drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
1617 mgr->payloads[i].num_slots = req_payload.num_slots; 1636 mgr->payloads[i].num_slots = req_payload.num_slots;
1618 } else if (mgr->payloads[i].num_slots) { 1637 } else if (mgr->payloads[i].num_slots) {
1619 mgr->payloads[i].num_slots = 0; 1638 mgr->payloads[i].num_slots = 0;
1620 drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]); 1639 drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
1621 req_payload.payload_state = mgr->payloads[i].payload_state; 1640 req_payload.payload_state = mgr->payloads[i].payload_state;
1622 } else 1641 mgr->payloads[i].start_slot = 0;
1623 req_payload.payload_state = 0; 1642 }
1624
1625 mgr->payloads[i].start_slot = req_payload.start_slot;
1626 mgr->payloads[i].payload_state = req_payload.payload_state; 1643 mgr->payloads[i].payload_state = req_payload.payload_state;
1627 } 1644 }
1628 cur_slots += req_payload.num_slots; 1645 cur_slots += req_payload.num_slots;
1629 } 1646 }
1647
1648 for (i = 0; i < mgr->max_payloads; i++) {
1649 if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1650 DRM_DEBUG_KMS("removing payload %d\n", i);
1651 for (j = i; j < mgr->max_payloads - 1; j++) {
1652 memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1653 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1654 if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1655 set_bit(j + 1, &mgr->payload_mask);
1656 } else {
1657 clear_bit(j + 1, &mgr->payload_mask);
1658 }
1659 }
1660 memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1661 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1662 clear_bit(mgr->max_payloads, &mgr->payload_mask);
1663
1664 }
1665 }
1630 mutex_unlock(&mgr->payload_lock); 1666 mutex_unlock(&mgr->payload_lock);
1631 1667
1632 return 0; 1668 return 0;
@@ -1657,9 +1693,9 @@ int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1657 1693
1658 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state); 1694 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1659 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) { 1695 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
1660 ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]); 1696 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1661 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { 1697 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1662 ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]); 1698 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1663 } 1699 }
1664 if (ret) { 1700 if (ret) {
1665 mutex_unlock(&mgr->payload_lock); 1701 mutex_unlock(&mgr->payload_lock);
@@ -1772,7 +1808,7 @@ static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
1772 case DP_LINK_BW_5_4: 1808 case DP_LINK_BW_5_4:
1773 return 10 * dp_link_count; 1809 return 10 * dp_link_count;
1774 } 1810 }
1775 return 0; 1811 BUG();
1776} 1812}
1777 1813
1778/** 1814/**
@@ -1861,6 +1897,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
1861 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); 1897 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
1862 mgr->payload_mask = 0; 1898 mgr->payload_mask = 0;
1863 set_bit(0, &mgr->payload_mask); 1899 set_bit(0, &mgr->payload_mask);
1900 mgr->vcpi_mask = 0;
1864 } 1901 }
1865 1902
1866out_unlock: 1903out_unlock:
@@ -2071,6 +2108,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2071 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify 2108 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2072 * @mgr: manager to notify irq for. 2109 * @mgr: manager to notify irq for.
2073 * @esi: 4 bytes from SINK_COUNT_ESI 2110 * @esi: 4 bytes from SINK_COUNT_ESI
2111 * @handled: whether the hpd interrupt was consumed or not
2074 * 2112 *
2075 * This should be called from the driver when it detects a short IRQ, 2113 * This should be called from the driver when it detects a short IRQ,
2076 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The 2114 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
@@ -2474,7 +2512,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
2474 mutex_unlock(&mgr->lock); 2512 mutex_unlock(&mgr->lock);
2475 2513
2476 mutex_lock(&mgr->payload_lock); 2514 mutex_lock(&mgr->payload_lock);
2477 seq_printf(m, "vcpi: %lx\n", mgr->payload_mask); 2515 seq_printf(m, "vcpi: %lx %lx\n", mgr->payload_mask, mgr->vcpi_mask);
2478 2516
2479 for (i = 0; i < mgr->max_payloads; i++) { 2517 for (i = 0; i < mgr->max_payloads; i++) {
2480 if (mgr->proposed_vcpis[i]) { 2518 if (mgr->proposed_vcpis[i]) {
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 3242e208c0d0..bc3da32d4585 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -35,32 +35,20 @@
35#include <drm/drmP.h> 35#include <drm/drmP.h>
36#include <drm/drm_core.h> 36#include <drm/drm_core.h>
37#include "drm_legacy.h" 37#include "drm_legacy.h"
38#include "drm_internal.h"
38 39
39unsigned int drm_debug = 0; /* 1 to enable debug output */ 40unsigned int drm_debug = 0; /* 1 to enable debug output */
40EXPORT_SYMBOL(drm_debug); 41EXPORT_SYMBOL(drm_debug);
41 42
42unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
43
44unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
45
46/*
47 * Default to use monotonic timestamps for wait-for-vblank and page-flip
48 * complete events.
49 */
50unsigned int drm_timestamp_monotonic = 1;
51
52MODULE_AUTHOR(CORE_AUTHOR); 43MODULE_AUTHOR(CORE_AUTHOR);
53MODULE_DESCRIPTION(CORE_DESC); 44MODULE_DESCRIPTION(CORE_DESC);
54MODULE_LICENSE("GPL and additional rights"); 45MODULE_LICENSE("GPL and additional rights");
55MODULE_PARM_DESC(debug, "Enable debug output"); 46MODULE_PARM_DESC(debug, "Enable debug output");
56MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); 47MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
57MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); 48MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
58MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); 49MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
59 50
60module_param_named(debug, drm_debug, int, 0600); 51module_param_named(debug, drm_debug, int, 0600);
61module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
62module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
63module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
64 52
65static DEFINE_SPINLOCK(drm_minor_lock); 53static DEFINE_SPINLOCK(drm_minor_lock);
66static struct idr drm_minors_idr; 54static struct idr drm_minors_idr;
@@ -68,22 +56,19 @@ static struct idr drm_minors_idr;
68struct class *drm_class; 56struct class *drm_class;
69static struct dentry *drm_debugfs_root; 57static struct dentry *drm_debugfs_root;
70 58
71int drm_err(const char *func, const char *format, ...) 59void drm_err(const char *func, const char *format, ...)
72{ 60{
73 struct va_format vaf; 61 struct va_format vaf;
74 va_list args; 62 va_list args;
75 int r;
76 63
77 va_start(args, format); 64 va_start(args, format);
78 65
79 vaf.fmt = format; 66 vaf.fmt = format;
80 vaf.va = &args; 67 vaf.va = &args;
81 68
82 r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf); 69 printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
83 70
84 va_end(args); 71 va_end(args);
85
86 return r;
87} 72}
88EXPORT_SYMBOL(drm_err); 73EXPORT_SYMBOL(drm_err);
89 74
@@ -102,6 +87,8 @@ void drm_ut_debug_printk(const char *function_name, const char *format, ...)
102} 87}
103EXPORT_SYMBOL(drm_ut_debug_printk); 88EXPORT_SYMBOL(drm_ut_debug_printk);
104 89
90#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
91
105struct drm_master *drm_master_create(struct drm_minor *minor) 92struct drm_master *drm_master_create(struct drm_minor *minor)
106{ 93{
107 struct drm_master *master; 94 struct drm_master *master;
@@ -133,7 +120,6 @@ EXPORT_SYMBOL(drm_master_get);
133static void drm_master_destroy(struct kref *kref) 120static void drm_master_destroy(struct kref *kref)
134{ 121{
135 struct drm_master *master = container_of(kref, struct drm_master, refcount); 122 struct drm_master *master = container_of(kref, struct drm_master, refcount);
136 struct drm_magic_entry *pt, *next;
137 struct drm_device *dev = master->minor->dev; 123 struct drm_device *dev = master->minor->dev;
138 struct drm_map_list *r_list, *list_temp; 124 struct drm_map_list *r_list, *list_temp;
139 125
@@ -143,7 +129,7 @@ static void drm_master_destroy(struct kref *kref)
143 129
144 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { 130 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
145 if (r_list->master == master) { 131 if (r_list->master == master) {
146 drm_rmmap_locked(dev, r_list->map); 132 drm_legacy_rmmap_locked(dev, r_list->map);
147 r_list = NULL; 133 r_list = NULL;
148 } 134 }
149 } 135 }
@@ -154,12 +140,6 @@ static void drm_master_destroy(struct kref *kref)
154 master->unique_len = 0; 140 master->unique_len = 0;
155 } 141 }
156 142
157 list_for_each_entry_safe(pt, next, &master->magicfree, head) {
158 list_del(&pt->head);
159 drm_ht_remove_item(&master->magiclist, &pt->hash_item);
160 kfree(pt);
161 }
162
163 drm_ht_remove(&master->magiclist); 143 drm_ht_remove(&master->magiclist);
164 144
165 mutex_unlock(&dev->struct_mutex); 145 mutex_unlock(&dev->struct_mutex);
@@ -615,7 +595,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
615 goto err_ht; 595 goto err_ht;
616 } 596 }
617 597
618 if (driver->driver_features & DRIVER_GEM) { 598 if (drm_core_check_feature(dev, DRIVER_GEM)) {
619 ret = drm_gem_init(dev); 599 ret = drm_gem_init(dev);
620 if (ret) { 600 if (ret) {
621 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); 601 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
@@ -645,7 +625,7 @@ static void drm_dev_release(struct kref *ref)
645{ 625{
646 struct drm_device *dev = container_of(ref, struct drm_device, ref); 626 struct drm_device *dev = container_of(ref, struct drm_device, ref);
647 627
648 if (dev->driver->driver_features & DRIVER_GEM) 628 if (drm_core_check_feature(dev, DRIVER_GEM))
649 drm_gem_destroy(dev); 629 drm_gem_destroy(dev);
650 630
651 drm_legacy_ctxbitmap_cleanup(dev); 631 drm_legacy_ctxbitmap_cleanup(dev);
@@ -779,7 +759,7 @@ void drm_dev_unregister(struct drm_device *dev)
779 drm_vblank_cleanup(dev); 759 drm_vblank_cleanup(dev);
780 760
781 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) 761 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
782 drm_rmmap(dev, r_list->map); 762 drm_legacy_rmmap(dev, r_list->map);
783 763
784 drm_minor_unregister(dev, DRM_MINOR_LEGACY); 764 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
785 drm_minor_unregister(dev, DRM_MINOR_RENDER); 765 drm_minor_unregister(dev, DRM_MINOR_RENDER);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 1dbf3bc4c6a3..3bf999134bcc 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -632,27 +632,27 @@ static const struct drm_display_mode edid_cea_modes[] = {
632 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 632 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
633 DRM_MODE_FLAG_INTERLACE), 633 DRM_MODE_FLAG_INTERLACE),
634 .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 634 .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
635 /* 6 - 1440x480i@60Hz */ 635 /* 6 - 720(1440)x480i@60Hz */
636 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 636 { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
637 1602, 1716, 0, 480, 488, 494, 525, 0, 637 801, 858, 0, 480, 488, 494, 525, 0,
638 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 638 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
639 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), 639 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
640 .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, 640 .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
641 /* 7 - 1440x480i@60Hz */ 641 /* 7 - 720(1440)x480i@60Hz */
642 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 642 { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
643 1602, 1716, 0, 480, 488, 494, 525, 0, 643 801, 858, 0, 480, 488, 494, 525, 0,
644 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 644 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
645 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), 645 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
646 .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 646 .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
647 /* 8 - 1440x240@60Hz */ 647 /* 8 - 720(1440)x240@60Hz */
648 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 648 { DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
649 1602, 1716, 0, 240, 244, 247, 262, 0, 649 801, 858, 0, 240, 244, 247, 262, 0,
650 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 650 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
651 DRM_MODE_FLAG_DBLCLK), 651 DRM_MODE_FLAG_DBLCLK),
652 .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, 652 .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
653 /* 9 - 1440x240@60Hz */ 653 /* 9 - 720(1440)x240@60Hz */
654 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 654 { DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
655 1602, 1716, 0, 240, 244, 247, 262, 0, 655 801, 858, 0, 240, 244, 247, 262, 0,
656 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 656 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
657 DRM_MODE_FLAG_DBLCLK), 657 DRM_MODE_FLAG_DBLCLK),
658 .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 658 .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@@ -714,27 +714,27 @@ static const struct drm_display_mode edid_cea_modes[] = {
714 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 714 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
715 DRM_MODE_FLAG_INTERLACE), 715 DRM_MODE_FLAG_INTERLACE),
716 .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 716 .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
717 /* 21 - 1440x576i@50Hz */ 717 /* 21 - 720(1440)x576i@50Hz */
718 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 718 { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
719 1590, 1728, 0, 576, 580, 586, 625, 0, 719 795, 864, 0, 576, 580, 586, 625, 0,
720 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 720 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
721 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), 721 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
722 .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, 722 .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
723 /* 22 - 1440x576i@50Hz */ 723 /* 22 - 720(1440)x576i@50Hz */
724 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 724 { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
725 1590, 1728, 0, 576, 580, 586, 625, 0, 725 795, 864, 0, 576, 580, 586, 625, 0,
726 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 726 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
727 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), 727 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
728 .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 728 .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
729 /* 23 - 1440x288@50Hz */ 729 /* 23 - 720(1440)x288@50Hz */
730 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 730 { DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
731 1590, 1728, 0, 288, 290, 293, 312, 0, 731 795, 864, 0, 288, 290, 293, 312, 0,
732 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 732 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
733 DRM_MODE_FLAG_DBLCLK), 733 DRM_MODE_FLAG_DBLCLK),
734 .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, 734 .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
735 /* 24 - 1440x288@50Hz */ 735 /* 24 - 720(1440)x288@50Hz */
736 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 736 { DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
737 1590, 1728, 0, 288, 290, 293, 312, 0, 737 795, 864, 0, 288, 290, 293, 312, 0,
738 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 738 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
739 DRM_MODE_FLAG_DBLCLK), 739 DRM_MODE_FLAG_DBLCLK),
740 .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 740 .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@@ -837,17 +837,17 @@ static const struct drm_display_mode edid_cea_modes[] = {
837 796, 864, 0, 576, 581, 586, 625, 0, 837 796, 864, 0, 576, 581, 586, 625, 0,
838 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 838 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
839 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 839 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
840 /* 44 - 1440x576i@100Hz */ 840 /* 44 - 720(1440)x576i@100Hz */
841 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 841 { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
842 1590, 1728, 0, 576, 580, 586, 625, 0, 842 795, 864, 0, 576, 580, 586, 625, 0,
843 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 843 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
844 DRM_MODE_FLAG_DBLCLK), 844 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
845 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, 845 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
846 /* 45 - 1440x576i@100Hz */ 846 /* 45 - 720(1440)x576i@100Hz */
847 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 847 { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
848 1590, 1728, 0, 576, 580, 586, 625, 0, 848 795, 864, 0, 576, 580, 586, 625, 0,
849 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 849 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
850 DRM_MODE_FLAG_DBLCLK), 850 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
851 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 851 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
852 /* 46 - 1920x1080i@120Hz */ 852 /* 46 - 1920x1080i@120Hz */
853 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 853 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
@@ -870,15 +870,15 @@ static const struct drm_display_mode edid_cea_modes[] = {
870 798, 858, 0, 480, 489, 495, 525, 0, 870 798, 858, 0, 480, 489, 495, 525, 0,
871 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 871 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
872 .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 872 .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
873 /* 50 - 1440x480i@120Hz */ 873 /* 50 - 720(1440)x480i@120Hz */
874 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 874 { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
875 1602, 1716, 0, 480, 488, 494, 525, 0, 875 801, 858, 0, 480, 488, 494, 525, 0,
876 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 876 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
877 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), 877 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
878 .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, 878 .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
879 /* 51 - 1440x480i@120Hz */ 879 /* 51 - 720(1440)x480i@120Hz */
880 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 880 { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
881 1602, 1716, 0, 480, 488, 494, 525, 0, 881 801, 858, 0, 480, 488, 494, 525, 0,
882 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 882 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
883 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), 883 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
884 .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 884 .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@@ -892,15 +892,15 @@ static const struct drm_display_mode edid_cea_modes[] = {
892 796, 864, 0, 576, 581, 586, 625, 0, 892 796, 864, 0, 576, 581, 586, 625, 0,
893 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 893 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
894 .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 894 .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
895 /* 54 - 1440x576i@200Hz */ 895 /* 54 - 720(1440)x576i@200Hz */
896 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 896 { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
897 1590, 1728, 0, 576, 580, 586, 625, 0, 897 795, 864, 0, 576, 580, 586, 625, 0,
898 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 898 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
899 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), 899 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
900 .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, 900 .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
901 /* 55 - 1440x576i@200Hz */ 901 /* 55 - 720(1440)x576i@200Hz */
902 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 902 { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
903 1590, 1728, 0, 576, 580, 586, 625, 0, 903 795, 864, 0, 576, 580, 586, 625, 0,
904 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 904 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
905 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), 905 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
906 .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 906 .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@@ -914,15 +914,15 @@ static const struct drm_display_mode edid_cea_modes[] = {
914 798, 858, 0, 480, 489, 495, 525, 0, 914 798, 858, 0, 480, 489, 495, 525, 0,
915 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 915 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
916 .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 916 .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
917 /* 58 - 1440x480i@240 */ 917 /* 58 - 720(1440)x480i@240 */
918 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 918 { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
919 1602, 1716, 0, 480, 488, 494, 525, 0, 919 801, 858, 0, 480, 488, 494, 525, 0,
920 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 920 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
921 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), 921 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
922 .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, 922 .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
923 /* 59 - 1440x480i@240 */ 923 /* 59 - 720(1440)x480i@240 */
924 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 924 { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
925 1602, 1716, 0, 480, 488, 494, 525, 0, 925 801, 858, 0, 480, 488, 494, 525, 0,
926 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 926 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
927 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), 927 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
928 .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 928 .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@@ -2103,7 +2103,8 @@ static int
2103add_inferred_modes(struct drm_connector *connector, struct edid *edid) 2103add_inferred_modes(struct drm_connector *connector, struct edid *edid)
2104{ 2104{
2105 struct detailed_mode_closure closure = { 2105 struct detailed_mode_closure closure = {
2106 connector, edid, 0, 0, 0 2106 .connector = connector,
2107 .edid = edid,
2107 }; 2108 };
2108 2109
2109 if (version_greater(edid, 1, 0)) 2110 if (version_greater(edid, 1, 0))
@@ -2169,7 +2170,8 @@ add_established_modes(struct drm_connector *connector, struct edid *edid)
2169 ((edid->established_timings.mfg_rsvd & 0x80) << 9); 2170 ((edid->established_timings.mfg_rsvd & 0x80) << 9);
2170 int i, modes = 0; 2171 int i, modes = 0;
2171 struct detailed_mode_closure closure = { 2172 struct detailed_mode_closure closure = {
2172 connector, edid, 0, 0, 0 2173 .connector = connector,
2174 .edid = edid,
2173 }; 2175 };
2174 2176
2175 for (i = 0; i <= EDID_EST_TIMINGS; i++) { 2177 for (i = 0; i <= EDID_EST_TIMINGS; i++) {
@@ -2227,7 +2229,8 @@ add_standard_modes(struct drm_connector *connector, struct edid *edid)
2227{ 2229{
2228 int i, modes = 0; 2230 int i, modes = 0;
2229 struct detailed_mode_closure closure = { 2231 struct detailed_mode_closure closure = {
2230 connector, edid, 0, 0, 0 2232 .connector = connector,
2233 .edid = edid,
2231 }; 2234 };
2232 2235
2233 for (i = 0; i < EDID_STD_TIMINGS; i++) { 2236 for (i = 0; i < EDID_STD_TIMINGS; i++) {
@@ -2313,7 +2316,8 @@ static int
2313add_cvt_modes(struct drm_connector *connector, struct edid *edid) 2316add_cvt_modes(struct drm_connector *connector, struct edid *edid)
2314{ 2317{
2315 struct detailed_mode_closure closure = { 2318 struct detailed_mode_closure closure = {
2316 connector, edid, 0, 0, 0 2319 .connector = connector,
2320 .edid = edid,
2317 }; 2321 };
2318 2322
2319 if (version_greater(edid, 1, 2)) 2323 if (version_greater(edid, 1, 2))
@@ -2357,11 +2361,10 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
2357 u32 quirks) 2361 u32 quirks)
2358{ 2362{
2359 struct detailed_mode_closure closure = { 2363 struct detailed_mode_closure closure = {
2360 connector, 2364 .connector = connector,
2361 edid, 2365 .edid = edid,
2362 1, 2366 .preferred = 1,
2363 quirks, 2367 .quirks = quirks,
2364 0
2365 }; 2368 };
2366 2369
2367 if (closure.preferred && !version_greater(edid, 1, 3)) 2370 if (closure.preferred && !version_greater(edid, 1, 3))
@@ -3433,10 +3436,10 @@ EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
3433/** 3436/**
3434 * drm_assign_hdmi_deep_color_info - detect whether monitor supports 3437 * drm_assign_hdmi_deep_color_info - detect whether monitor supports
3435 * hdmi deep color modes and update drm_display_info if so. 3438 * hdmi deep color modes and update drm_display_info if so.
3436 *
3437 * @edid: monitor EDID information 3439 * @edid: monitor EDID information
3438 * @info: Updated with maximum supported deep color bpc and color format 3440 * @info: Updated with maximum supported deep color bpc and color format
3439 * if deep color supported. 3441 * if deep color supported.
3442 * @connector: DRM connector, used only for debug output
3440 * 3443 *
3441 * Parse the CEA extension according to CEA-861-B. 3444 * Parse the CEA extension according to CEA-861-B.
3442 * Return true if HDMI deep color supported, false if not or unknown. 3445 * Return true if HDMI deep color supported, false if not or unknown.
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3144db9dc0f1..0c0c39bac23d 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -126,7 +126,7 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
126 126
127 WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex)); 127 WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
128 if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) { 128 if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
129 temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector) * (fb_helper->connector_count + 1), GFP_KERNEL); 129 temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector *) * (fb_helper->connector_count + 1), GFP_KERNEL);
130 if (!temp) 130 if (!temp)
131 return -ENOMEM; 131 return -ENOMEM;
132 132
@@ -171,60 +171,6 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
171} 171}
172EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); 172EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
173 173
174static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
175{
176 struct drm_fb_helper_connector *fb_helper_conn;
177 int i;
178
179 for (i = 0; i < fb_helper->connector_count; i++) {
180 struct drm_cmdline_mode *mode;
181 struct drm_connector *connector;
182 char *option = NULL;
183
184 fb_helper_conn = fb_helper->connector_info[i];
185 connector = fb_helper_conn->connector;
186 mode = &fb_helper_conn->cmdline_mode;
187
188 /* do something on return - turn off connector maybe */
189 if (fb_get_options(connector->name, &option))
190 continue;
191
192 if (drm_mode_parse_command_line_for_connector(option,
193 connector,
194 mode)) {
195 if (mode->force) {
196 const char *s;
197 switch (mode->force) {
198 case DRM_FORCE_OFF:
199 s = "OFF";
200 break;
201 case DRM_FORCE_ON_DIGITAL:
202 s = "ON - dig";
203 break;
204 default:
205 case DRM_FORCE_ON:
206 s = "ON";
207 break;
208 }
209
210 DRM_INFO("forcing %s connector %s\n",
211 connector->name, s);
212 connector->force = mode->force;
213 }
214
215 DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
216 connector->name,
217 mode->xres, mode->yres,
218 mode->refresh_specified ? mode->refresh : 60,
219 mode->rb ? " reduced blanking" : "",
220 mode->margins ? " with margins" : "",
221 mode->interlace ? " interlaced" : "");
222 }
223
224 }
225 return 0;
226}
227
228static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper) 174static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
229{ 175{
230 uint16_t *r_base, *g_base, *b_base; 176 uint16_t *r_base, *g_base, *b_base;
@@ -345,10 +291,17 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
345 291
346 drm_warn_on_modeset_not_all_locked(dev); 292 drm_warn_on_modeset_not_all_locked(dev);
347 293
348 list_for_each_entry(plane, &dev->mode_config.plane_list, head) 294 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
349 if (plane->type != DRM_PLANE_TYPE_PRIMARY) 295 if (plane->type != DRM_PLANE_TYPE_PRIMARY)
350 drm_plane_force_disable(plane); 296 drm_plane_force_disable(plane);
351 297
298 if (dev->mode_config.rotation_property) {
299 drm_mode_plane_set_obj_prop(plane,
300 dev->mode_config.rotation_property,
301 BIT(DRM_ROTATE_0));
302 }
303 }
304
352 for (i = 0; i < fb_helper->crtc_count; i++) { 305 for (i = 0; i < fb_helper->crtc_count; i++) {
353 struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; 306 struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
354 struct drm_crtc *crtc = mode_set->crtc; 307 struct drm_crtc *crtc = mode_set->crtc;
@@ -419,11 +372,11 @@ static bool drm_fb_helper_force_kernel_mode(void)
419 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 372 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
420 continue; 373 continue;
421 374
422 /* NOTE: we use lockless flag below to avoid grabbing other 375 /*
423 * modeset locks. So just trylock the underlying mutex 376 * NOTE: Use trylock mode to avoid deadlocks and sleeping in
424 * directly: 377 * panic context.
425 */ 378 */
426 if (!mutex_trylock(&dev->mode_config.mutex)) { 379 if (__drm_modeset_lock_all(dev, true) != 0) {
427 error = true; 380 error = true;
428 continue; 381 continue;
429 } 382 }
@@ -432,7 +385,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
432 if (ret) 385 if (ret)
433 error = true; 386 error = true;
434 387
435 mutex_unlock(&dev->mode_config.mutex); 388 drm_modeset_unlock_all(dev);
436 } 389 }
437 return error; 390 return error;
438} 391}
@@ -1013,7 +966,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
1013 struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; 966 struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
1014 struct drm_cmdline_mode *cmdline_mode; 967 struct drm_cmdline_mode *cmdline_mode;
1015 968
1016 cmdline_mode = &fb_helper_conn->cmdline_mode; 969 cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
1017 970
1018 if (cmdline_mode->bpp_specified) { 971 if (cmdline_mode->bpp_specified) {
1019 switch (cmdline_mode->bpp) { 972 switch (cmdline_mode->bpp) {
@@ -1260,9 +1213,7 @@ EXPORT_SYMBOL(drm_has_preferred_mode);
1260 1213
1261static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector) 1214static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
1262{ 1215{
1263 struct drm_cmdline_mode *cmdline_mode; 1216 return fb_connector->connector->cmdline_mode.specified;
1264 cmdline_mode = &fb_connector->cmdline_mode;
1265 return cmdline_mode->specified;
1266} 1217}
1267 1218
1268struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, 1219struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
@@ -1272,7 +1223,7 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f
1272 struct drm_display_mode *mode = NULL; 1223 struct drm_display_mode *mode = NULL;
1273 bool prefer_non_interlace; 1224 bool prefer_non_interlace;
1274 1225
1275 cmdline_mode = &fb_helper_conn->cmdline_mode; 1226 cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
1276 if (cmdline_mode->specified == false) 1227 if (cmdline_mode->specified == false)
1277 return mode; 1228 return mode;
1278 1229
@@ -1657,8 +1608,6 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
1657 struct drm_device *dev = fb_helper->dev; 1608 struct drm_device *dev = fb_helper->dev;
1658 int count = 0; 1609 int count = 0;
1659 1610
1660 drm_fb_helper_parse_command_line(fb_helper);
1661
1662 mutex_lock(&dev->mode_config.mutex); 1611 mutex_lock(&dev->mode_config.mutex);
1663 count = drm_fb_helper_probe_connector_modes(fb_helper, 1612 count = drm_fb_helper_probe_connector_modes(fb_helper,
1664 dev->mode_config.max_width, 1613 dev->mode_config.max_width,
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 79d5221c6e41..ed7bc68f7e87 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -39,10 +39,10 @@
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/module.h> 40#include <linux/module.h>
41#include "drm_legacy.h" 41#include "drm_legacy.h"
42#include "drm_internal.h"
42 43
43/* from BKL pushdown */ 44/* from BKL pushdown */
44DEFINE_MUTEX(drm_global_mutex); 45DEFINE_MUTEX(drm_global_mutex);
45EXPORT_SYMBOL(drm_global_mutex);
46 46
47static int drm_open_helper(struct file *filp, struct drm_minor *minor); 47static int drm_open_helper(struct file *filp, struct drm_minor *minor);
48 48
@@ -171,7 +171,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
171 init_waitqueue_head(&priv->event_wait); 171 init_waitqueue_head(&priv->event_wait);
172 priv->event_space = 4096; /* set aside 4k for event buffer */ 172 priv->event_space = 4096; /* set aside 4k for event buffer */
173 173
174 if (dev->driver->driver_features & DRIVER_GEM) 174 if (drm_core_check_feature(dev, DRIVER_GEM))
175 drm_gem_open(dev, priv); 175 drm_gem_open(dev, priv);
176 176
177 if (drm_core_check_feature(dev, DRIVER_PRIME)) 177 if (drm_core_check_feature(dev, DRIVER_PRIME))
@@ -256,7 +256,7 @@ out_close:
256out_prime_destroy: 256out_prime_destroy:
257 if (drm_core_check_feature(dev, DRIVER_PRIME)) 257 if (drm_core_check_feature(dev, DRIVER_PRIME))
258 drm_prime_destroy_file_private(&priv->prime); 258 drm_prime_destroy_file_private(&priv->prime);
259 if (dev->driver->driver_features & DRIVER_GEM) 259 if (drm_core_check_feature(dev, DRIVER_GEM))
260 drm_gem_release(dev, priv); 260 drm_gem_release(dev, priv);
261 put_pid(priv->pid); 261 put_pid(priv->pid);
262 kfree(priv); 262 kfree(priv);
@@ -268,11 +268,11 @@ static void drm_master_release(struct drm_device *dev, struct file *filp)
268{ 268{
269 struct drm_file *file_priv = filp->private_data; 269 struct drm_file *file_priv = filp->private_data;
270 270
271 if (drm_i_have_hw_lock(dev, file_priv)) { 271 if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
272 DRM_DEBUG("File %p released, freeing lock for context %d\n", 272 DRM_DEBUG("File %p released, freeing lock for context %d\n",
273 filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); 273 filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
274 drm_lock_free(&file_priv->master->lock, 274 drm_legacy_lock_free(&file_priv->master->lock,
275 _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); 275 _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
276 } 276 }
277} 277}
278 278
@@ -330,8 +330,6 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
330 */ 330 */
331int drm_lastclose(struct drm_device * dev) 331int drm_lastclose(struct drm_device * dev)
332{ 332{
333 struct drm_vma_entry *vma, *vma_temp;
334
335 DRM_DEBUG("\n"); 333 DRM_DEBUG("\n");
336 334
337 if (dev->driver->lastclose) 335 if (dev->driver->lastclose)
@@ -346,13 +344,7 @@ int drm_lastclose(struct drm_device * dev)
346 drm_agp_clear(dev); 344 drm_agp_clear(dev);
347 345
348 drm_legacy_sg_cleanup(dev); 346 drm_legacy_sg_cleanup(dev);
349 347 drm_legacy_vma_flush(dev);
350 /* Clear vma list (only built for debugging) */
351 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
352 list_del(&vma->head);
353 kfree(vma);
354 }
355
356 drm_legacy_dma_takedown(dev); 348 drm_legacy_dma_takedown(dev);
357 349
358 mutex_unlock(&dev->struct_mutex); 350 mutex_unlock(&dev->struct_mutex);
@@ -412,14 +404,14 @@ int drm_release(struct inode *inode, struct file *filp)
412 drm_master_release(dev, filp); 404 drm_master_release(dev, filp);
413 405
414 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 406 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
415 drm_core_reclaim_buffers(dev, file_priv); 407 drm_legacy_reclaim_buffers(dev, file_priv);
416 408
417 drm_events_release(file_priv); 409 drm_events_release(file_priv);
418 410
419 if (dev->driver->driver_features & DRIVER_MODESET) 411 if (drm_core_check_feature(dev, DRIVER_MODESET))
420 drm_fb_release(file_priv); 412 drm_fb_release(file_priv);
421 413
422 if (dev->driver->driver_features & DRIVER_GEM) 414 if (drm_core_check_feature(dev, DRIVER_GEM))
423 drm_gem_release(dev, file_priv); 415 drm_gem_release(dev, file_priv);
424 416
425 drm_legacy_ctxbitmap_flush(dev, file_priv); 417 drm_legacy_ctxbitmap_flush(dev, file_priv);
@@ -464,6 +456,8 @@ int drm_release(struct inode *inode, struct file *filp)
464 if (drm_core_check_feature(dev, DRIVER_PRIME)) 456 if (drm_core_check_feature(dev, DRIVER_PRIME))
465 drm_prime_destroy_file_private(&file_priv->prime); 457 drm_prime_destroy_file_private(&file_priv->prime);
466 458
459 WARN_ON(!list_empty(&file_priv->event_list));
460
467 put_pid(file_priv->pid); 461 put_pid(file_priv->pid);
468 kfree(file_priv); 462 kfree(file_priv);
469 463
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 6adee4c2afc0..f6ca51259fa3 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -38,6 +38,8 @@
38#include <linux/dma-buf.h> 38#include <linux/dma-buf.h>
39#include <drm/drmP.h> 39#include <drm/drmP.h>
40#include <drm/drm_vma_manager.h> 40#include <drm/drm_vma_manager.h>
41#include <drm/drm_gem.h>
42#include "drm_internal.h"
41 43
42/** @file drm_gem.c 44/** @file drm_gem.c
43 * 45 *
@@ -146,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev,
146EXPORT_SYMBOL(drm_gem_object_init); 148EXPORT_SYMBOL(drm_gem_object_init);
147 149
148/** 150/**
149 * drm_gem_object_init - initialize an allocated private GEM object 151 * drm_gem_private_object_init - initialize an allocated private GEM object
150 * @dev: drm_device the object should be initialized for 152 * @dev: drm_device the object should be initialized for
151 * @obj: drm_gem_object to initialize 153 * @obj: drm_gem_object to initialize
152 * @size: object size 154 * @size: object size
@@ -579,7 +581,7 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,
579 struct drm_gem_close *args = data; 581 struct drm_gem_close *args = data;
580 int ret; 582 int ret;
581 583
582 if (!(dev->driver->driver_features & DRIVER_GEM)) 584 if (!drm_core_check_feature(dev, DRIVER_GEM))
583 return -ENODEV; 585 return -ENODEV;
584 586
585 ret = drm_gem_handle_delete(file_priv, args->handle); 587 ret = drm_gem_handle_delete(file_priv, args->handle);
@@ -606,7 +608,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
606 struct drm_gem_object *obj; 608 struct drm_gem_object *obj;
607 int ret; 609 int ret;
608 610
609 if (!(dev->driver->driver_features & DRIVER_GEM)) 611 if (!drm_core_check_feature(dev, DRIVER_GEM))
610 return -ENODEV; 612 return -ENODEV;
611 613
612 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 614 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
@@ -659,7 +661,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
659 int ret; 661 int ret;
660 u32 handle; 662 u32 handle;
661 663
662 if (!(dev->driver->driver_features & DRIVER_GEM)) 664 if (!drm_core_check_feature(dev, DRIVER_GEM))
663 return -ENODEV; 665 return -ENODEV;
664 666
665 mutex_lock(&dev->object_name_lock); 667 mutex_lock(&dev->object_name_lock);
@@ -887,7 +889,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
887 vma_pages(vma)); 889 vma_pages(vma));
888 if (!node) { 890 if (!node) {
889 mutex_unlock(&dev->struct_mutex); 891 mutex_unlock(&dev->struct_mutex);
890 return drm_mmap(filp, vma); 892 return -EINVAL;
891 } else if (!drm_vma_node_is_allowed(node, filp)) { 893 } else if (!drm_vma_node_is_allowed(node, filp)) {
892 mutex_unlock(&dev->struct_mutex); 894 mutex_unlock(&dev->struct_mutex);
893 return -EACCES; 895 return -EACCES;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index e467e67af6e7..0316310e2cc4 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -316,7 +316,8 @@ out:
316EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table); 316EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
317 317
318struct drm_gem_object * 318struct drm_gem_object *
319drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size, 319drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
320 struct dma_buf_attachment *attach,
320 struct sg_table *sgt) 321 struct sg_table *sgt)
321{ 322{
322 struct drm_gem_cma_object *cma_obj; 323 struct drm_gem_cma_object *cma_obj;
@@ -325,14 +326,14 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
325 return ERR_PTR(-EINVAL); 326 return ERR_PTR(-EINVAL);
326 327
327 /* Create a CMA GEM buffer. */ 328 /* Create a CMA GEM buffer. */
328 cma_obj = __drm_gem_cma_create(dev, size); 329 cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
329 if (IS_ERR(cma_obj)) 330 if (IS_ERR(cma_obj))
330 return ERR_CAST(cma_obj); 331 return ERR_CAST(cma_obj);
331 332
332 cma_obj->paddr = sg_dma_address(sgt->sgl); 333 cma_obj->paddr = sg_dma_address(sgt->sgl);
333 cma_obj->sgt = sgt; 334 cma_obj->sgt = sgt;
334 335
335 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, size); 336 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
336 337
337 return &cma_obj->base; 338 return &cma_obj->base;
338} 339}
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index ecaf0fa2eec8..51efebd434f3 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -35,6 +35,9 @@
35 35
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include <drm/drmP.h> 37#include <drm/drmP.h>
38#include <drm/drm_gem.h>
39
40#include "drm_legacy.h"
38 41
39/** 42/**
40 * Called when "/proc/dri/.../name" is read. 43 * Called when "/proc/dri/.../name" is read.
@@ -183,15 +186,32 @@ int drm_clients_info(struct seq_file *m, void *data)
183 struct drm_device *dev = node->minor->dev; 186 struct drm_device *dev = node->minor->dev;
184 struct drm_file *priv; 187 struct drm_file *priv;
185 188
189 seq_printf(m,
190 "%20s %5s %3s master a %5s %10s\n",
191 "command",
192 "pid",
193 "dev",
194 "uid",
195 "magic");
196
197 /* dev->filelist is sorted youngest first, but we want to present
198 * oldest first (i.e. kernel, servers, clients), so walk backwardss.
199 */
186 mutex_lock(&dev->struct_mutex); 200 mutex_lock(&dev->struct_mutex);
187 seq_printf(m, "a dev pid uid magic\n\n"); 201 list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
188 list_for_each_entry(priv, &dev->filelist, lhead) { 202 struct task_struct *task;
189 seq_printf(m, "%c %3d %5d %5d %10u\n", 203
190 priv->authenticated ? 'y' : 'n', 204 rcu_read_lock(); /* locks pid_task()->comm */
191 priv->minor->index, 205 task = pid_task(priv->pid, PIDTYPE_PID);
206 seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
207 task ? task->comm : "<unknown>",
192 pid_vnr(priv->pid), 208 pid_vnr(priv->pid),
209 priv->minor->index,
210 priv->is_master ? 'y' : 'n',
211 priv->authenticated ? 'y' : 'n',
193 from_kuid_munged(seq_user_ns(m), priv->uid), 212 from_kuid_munged(seq_user_ns(m), priv->uid),
194 priv->magic); 213 priv->magic);
214 rcu_read_unlock();
195 } 215 }
196 mutex_unlock(&dev->struct_mutex); 216 mutex_unlock(&dev->struct_mutex);
197 return 0; 217 return 0;
@@ -223,62 +243,3 @@ int drm_gem_name_info(struct seq_file *m, void *data)
223 243
224 return 0; 244 return 0;
225} 245}
226
227#if DRM_DEBUG_CODE
228
229int drm_vma_info(struct seq_file *m, void *data)
230{
231 struct drm_info_node *node = (struct drm_info_node *) m->private;
232 struct drm_device *dev = node->minor->dev;
233 struct drm_vma_entry *pt;
234 struct vm_area_struct *vma;
235 unsigned long vma_count = 0;
236#if defined(__i386__)
237 unsigned int pgprot;
238#endif
239
240 mutex_lock(&dev->struct_mutex);
241 list_for_each_entry(pt, &dev->vmalist, head)
242 vma_count++;
243
244 seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
245 vma_count, high_memory,
246 (void *)(unsigned long)virt_to_phys(high_memory));
247
248 list_for_each_entry(pt, &dev->vmalist, head) {
249 vma = pt->vma;
250 if (!vma)
251 continue;
252 seq_printf(m,
253 "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
254 pt->pid,
255 (void *)vma->vm_start, (void *)vma->vm_end,
256 vma->vm_flags & VM_READ ? 'r' : '-',
257 vma->vm_flags & VM_WRITE ? 'w' : '-',
258 vma->vm_flags & VM_EXEC ? 'x' : '-',
259 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
260 vma->vm_flags & VM_LOCKED ? 'l' : '-',
261 vma->vm_flags & VM_IO ? 'i' : '-',
262 vma->vm_pgoff);
263
264#if defined(__i386__)
265 pgprot = pgprot_val(vma->vm_page_prot);
266 seq_printf(m, " %c%c%c%c%c%c%c%c%c",
267 pgprot & _PAGE_PRESENT ? 'p' : '-',
268 pgprot & _PAGE_RW ? 'w' : 'r',
269 pgprot & _PAGE_USER ? 'u' : 's',
270 pgprot & _PAGE_PWT ? 't' : 'b',
271 pgprot & _PAGE_PCD ? 'u' : 'c',
272 pgprot & _PAGE_ACCESSED ? 'a' : '-',
273 pgprot & _PAGE_DIRTY ? 'd' : '-',
274 pgprot & _PAGE_PSE ? 'm' : 'k',
275 pgprot & _PAGE_GLOBAL ? 'g' : 'l');
276#endif
277 seq_printf(m, "\n");
278 }
279 mutex_unlock(&dev->struct_mutex);
280 return 0;
281}
282
283#endif
284
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
new file mode 100644
index 000000000000..7cc0a3516871
--- /dev/null
+++ b/drivers/gpu/drm/drm_internal.h
@@ -0,0 +1,132 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 * Daniel Vetter <daniel.vetter@ffwll.ch>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24/* drm_irq.c */
25extern unsigned int drm_timestamp_monotonic;
26
27/* drm_fops.c */
28extern struct mutex drm_global_mutex;
29int drm_lastclose(struct drm_device *dev);
30
31/* drm_pci.c */
32int drm_pci_set_unique(struct drm_device *dev,
33 struct drm_master *master,
34 struct drm_unique *u);
35int drm_irq_by_busid(struct drm_device *dev, void *data,
36 struct drm_file *file_priv);
37
38/* drm_vm.c */
39int drm_vma_info(struct seq_file *m, void *data);
40void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
41void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
42
43/* drm_prime.c */
44int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
45 struct drm_file *file_priv);
46int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
47 struct drm_file *file_priv);
48
49void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
50void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
51void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
52 struct dma_buf *dma_buf);
53
54/* drm_info.c */
55int drm_name_info(struct seq_file *m, void *data);
56int drm_vm_info(struct seq_file *m, void *data);
57int drm_bufs_info(struct seq_file *m, void *data);
58int drm_vblank_info(struct seq_file *m, void *data);
59int drm_clients_info(struct seq_file *m, void* data);
60int drm_gem_name_info(struct seq_file *m, void *data);
61
62/* drm_irq.c */
63int drm_control(struct drm_device *dev, void *data,
64 struct drm_file *file_priv);
65int drm_modeset_ctl(struct drm_device *dev, void *data,
66 struct drm_file *file_priv);
67
68/* drm_auth.c */
69int drm_getmagic(struct drm_device *dev, void *data,
70 struct drm_file *file_priv);
71int drm_authmagic(struct drm_device *dev, void *data,
72 struct drm_file *file_priv);
73int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
74
75/* drm_sysfs.c */
76extern struct class *drm_class;
77
78struct class *drm_sysfs_create(struct module *owner, char *name);
79void drm_sysfs_destroy(void);
80struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
81int drm_sysfs_connector_add(struct drm_connector *connector);
82void drm_sysfs_connector_remove(struct drm_connector *connector);
83
84/* drm_gem.c */
85int drm_gem_init(struct drm_device *dev);
86void drm_gem_destroy(struct drm_device *dev);
87int drm_gem_handle_create_tail(struct drm_file *file_priv,
88 struct drm_gem_object *obj,
89 u32 *handlep);
90int drm_gem_close_ioctl(struct drm_device *dev, void *data,
91 struct drm_file *file_priv);
92int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
93 struct drm_file *file_priv);
94int drm_gem_open_ioctl(struct drm_device *dev, void *data,
95 struct drm_file *file_priv);
96void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
97void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
98
99/* drm_drv.c */
100int drm_setmaster_ioctl(struct drm_device *dev, void *data,
101 struct drm_file *file_priv);
102int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
103 struct drm_file *file_priv);
104struct drm_master *drm_master_create(struct drm_minor *minor);
105
106/* drm_debugfs.c */
107#if defined(CONFIG_DEBUG_FS)
108int drm_debugfs_init(struct drm_minor *minor, int minor_id,
109 struct dentry *root);
110int drm_debugfs_cleanup(struct drm_minor *minor);
111int drm_debugfs_connector_add(struct drm_connector *connector);
112void drm_debugfs_connector_remove(struct drm_connector *connector);
113#else
114static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
115 struct dentry *root)
116{
117 return 0;
118}
119
120static inline int drm_debugfs_cleanup(struct drm_minor *minor)
121{
122 return 0;
123}
124
125static inline int drm_debugfs_connector_add(struct drm_connector *connector)
126{
127 return 0;
128}
129static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
130{
131}
132#endif
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 40be746b7e68..00587a1e3c83 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -31,6 +31,7 @@
31#include <drm/drmP.h> 31#include <drm/drmP.h>
32#include <drm/drm_core.h> 32#include <drm/drm_core.h>
33#include "drm_legacy.h" 33#include "drm_legacy.h"
34#include "drm_internal.h"
34 35
35#include <linux/pci.h> 36#include <linux/pci.h>
36#include <linux/export.h> 37#include <linux/export.h>
@@ -41,121 +42,6 @@
41static int drm_version(struct drm_device *dev, void *data, 42static int drm_version(struct drm_device *dev, void *data,
42 struct drm_file *file_priv); 43 struct drm_file *file_priv);
43 44
44#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
45 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
46
47/** Ioctl table */
48static const struct drm_ioctl_desc drm_ioctls[] = {
49 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
50 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
51 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
52 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
53 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
54 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
55 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
56 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
57 DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
58 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
59
60 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
61 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
62 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
63 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
64
65 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
66 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
67
68 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
69 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
70
71 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
72 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
73
74 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
75 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
76 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
77 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
78 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
79 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
80 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
81
82 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
83 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
84
85 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
86 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
87
88 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
89
90 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
91 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
92 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
93 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
94 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
95 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
96
97 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
98
99#if __OS_HAS_AGP
100 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
101 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
102 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
103 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
104 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
105 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
106 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
107 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
108#endif
109
110 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
111 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
112
113 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
114
115 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
116
117 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
118
119 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
120 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
121 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
122
123 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
124
125 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
126 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
127
128 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
129 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
130 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
131 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
132 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
133 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
134 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
135 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
136 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
137 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
139 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
140 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
141 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
142 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
148 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
149 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
150 DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
151 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
152 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
153 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
154 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
155};
156
157#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
158
159/** 45/**
160 * Get the bus id. 46 * Get the bus id.
161 * 47 *
@@ -167,7 +53,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
167 * 53 *
168 * Copies the bus id from drm_device::unique into user space. 54 * Copies the bus id from drm_device::unique into user space.
169 */ 55 */
170int drm_getunique(struct drm_device *dev, void *data, 56static int drm_getunique(struct drm_device *dev, void *data,
171 struct drm_file *file_priv) 57 struct drm_file *file_priv)
172{ 58{
173 struct drm_unique *u = data; 59 struct drm_unique *u = data;
@@ -189,7 +75,6 @@ drm_unset_busid(struct drm_device *dev,
189 kfree(master->unique); 75 kfree(master->unique);
190 master->unique = NULL; 76 master->unique = NULL;
191 master->unique_len = 0; 77 master->unique_len = 0;
192 master->unique_size = 0;
193} 78}
194 79
195/** 80/**
@@ -207,7 +92,7 @@ drm_unset_busid(struct drm_device *dev,
207 * version 1.1 or greater. Also note that KMS is all version 1.1 and later and 92 * version 1.1 or greater. Also note that KMS is all version 1.1 and later and
208 * UMS was only ever supported on pci devices. 93 * UMS was only ever supported on pci devices.
209 */ 94 */
210int drm_setunique(struct drm_device *dev, void *data, 95static int drm_setunique(struct drm_device *dev, void *data,
211 struct drm_file *file_priv) 96 struct drm_file *file_priv)
212{ 97{
213 struct drm_unique *u = data; 98 struct drm_unique *u = data;
@@ -245,15 +130,15 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
245 if (master->unique != NULL) 130 if (master->unique != NULL)
246 drm_unset_busid(dev, master); 131 drm_unset_busid(dev, master);
247 132
248 if (dev->driver->bus && dev->driver->bus->set_busid) { 133 if (dev->driver->set_busid) {
249 ret = dev->driver->bus->set_busid(dev, master); 134 ret = dev->driver->set_busid(dev, master);
250 if (ret) { 135 if (ret) {
251 drm_unset_busid(dev, master); 136 drm_unset_busid(dev, master);
252 return ret; 137 return ret;
253 } 138 }
254 } else { 139 } else {
255 if (WARN(dev->unique == NULL, 140 if (WARN(dev->unique == NULL,
256 "No drm_bus.set_busid() implementation provided by " 141 "No drm_driver.set_busid() implementation provided by "
257 "%ps. Use drm_dev_set_unique() to set the unique " 142 "%ps. Use drm_dev_set_unique() to set the unique "
258 "name explicitly.", dev->driver)) 143 "name explicitly.", dev->driver))
259 return -EINVAL; 144 return -EINVAL;
@@ -279,7 +164,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
279 * Searches for the mapping with the specified offset and copies its information 164 * Searches for the mapping with the specified offset and copies its information
280 * into userspace 165 * into userspace
281 */ 166 */
282int drm_getmap(struct drm_device *dev, void *data, 167static int drm_getmap(struct drm_device *dev, void *data,
283 struct drm_file *file_priv) 168 struct drm_file *file_priv)
284{ 169{
285 struct drm_map *map = data; 170 struct drm_map *map = data;
@@ -340,7 +225,7 @@ int drm_getmap(struct drm_device *dev, void *data,
340 * Searches for the client with the specified index and copies its information 225 * Searches for the client with the specified index and copies its information
341 * into userspace 226 * into userspace
342 */ 227 */
343int drm_getclient(struct drm_device *dev, void *data, 228static int drm_getclient(struct drm_device *dev, void *data,
344 struct drm_file *file_priv) 229 struct drm_file *file_priv)
345{ 230{
346 struct drm_client *client = data; 231 struct drm_client *client = data;
@@ -380,7 +265,7 @@ int drm_getclient(struct drm_device *dev, void *data,
380 * 265 *
381 * \return zero on success or a negative number on failure. 266 * \return zero on success or a negative number on failure.
382 */ 267 */
383int drm_getstats(struct drm_device *dev, void *data, 268static int drm_getstats(struct drm_device *dev, void *data,
384 struct drm_file *file_priv) 269 struct drm_file *file_priv)
385{ 270{
386 struct drm_stats *stats = data; 271 struct drm_stats *stats = data;
@@ -394,7 +279,7 @@ int drm_getstats(struct drm_device *dev, void *data,
394/** 279/**
395 * Get device/driver capabilities 280 * Get device/driver capabilities
396 */ 281 */
397int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) 282static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
398{ 283{
399 struct drm_get_cap *req = data; 284 struct drm_get_cap *req = data;
400 285
@@ -444,7 +329,7 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
444/** 329/**
445 * Set device/driver capabilities 330 * Set device/driver capabilities
446 */ 331 */
447int 332static int
448drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv) 333drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
449{ 334{
450 struct drm_set_client_cap *req = data; 335 struct drm_set_client_cap *req = data;
@@ -478,7 +363,7 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
478 * 363 *
479 * Sets the requested interface version 364 * Sets the requested interface version
480 */ 365 */
481int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) 366static int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
482{ 367{
483 struct drm_set_version *sv = data; 368 struct drm_set_version *sv = data;
484 int if_version, retcode = 0; 369 int if_version, retcode = 0;
@@ -624,6 +509,121 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
624 return 0; 509 return 0;
625} 510}
626 511
512#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
513 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
514
515/** Ioctl table */
516static const struct drm_ioctl_desc drm_ioctls[] = {
517 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
518 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
519 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
520 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
521 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
522 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
523 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
524 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
525 DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
526 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
527
528 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
529 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
530 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
531 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
532
533 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
534 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
535
536 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
537 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
538
539 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
540 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
541
542 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
543 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
544 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
545 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
546 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
547 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
548 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
549
550 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
551 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
552
553 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
554 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
555
556 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
557
558 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
559 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
560 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
561 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
562 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
563 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
564
565 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
566
567#if __OS_HAS_AGP
568 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
569 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
570 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
571 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
572 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
573 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
574 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
575 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
576#endif
577
578 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
579 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
580
581 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
582
583 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
584
585 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
586
587 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
588 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
589 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
590
591 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
592
593 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
594 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
595
596 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
597 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
598 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
599 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
600 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
601 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
602 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
603 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
604 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
605 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
606 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
607 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
608 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
609 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
610 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
611 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
612 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
613 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
614 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
615 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
616 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
617 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
618 DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
619 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
620 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
621 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
622 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
623};
624
625#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
626
627/** 627/**
628 * Called whenever a process performs an ioctl on /dev/drm. 628 * Called whenever a process performs an ioctl on /dev/drm.
629 * 629 *
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 08ba1209228e..5ef03c216a27 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -34,6 +34,7 @@
34 34
35#include <drm/drmP.h> 35#include <drm/drmP.h>
36#include "drm_trace.h" 36#include "drm_trace.h"
37#include "drm_internal.h"
37 38
38#include <linux/interrupt.h> /* For task queue support */ 39#include <linux/interrupt.h> /* For task queue support */
39#include <linux/slab.h> 40#include <linux/slab.h>
@@ -55,12 +56,91 @@
55 */ 56 */
56#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000 57#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
57 58
59static bool
60drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
61 struct timeval *tvblank, unsigned flags);
62
63static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
64
58/* 65/*
59 * Clear vblank timestamp buffer for a crtc. 66 * Default to use monotonic timestamps for wait-for-vblank and page-flip
67 * complete events.
68 */
69unsigned int drm_timestamp_monotonic = 1;
70
71static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
72
73module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
74module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
75module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
76
77/**
78 * drm_update_vblank_count - update the master vblank counter
79 * @dev: DRM device
80 * @crtc: counter to update
81 *
82 * Call back into the driver to update the appropriate vblank counter
83 * (specified by @crtc). Deal with wraparound, if it occurred, and
84 * update the last read value so we can deal with wraparound on the next
85 * call if necessary.
86 *
87 * Only necessary when going from off->on, to account for frames we
88 * didn't get an interrupt for.
89 *
90 * Note: caller must hold dev->vbl_lock since this reads & writes
91 * device vblank fields.
60 */ 92 */
61static void clear_vblank_timestamps(struct drm_device *dev, int crtc) 93static void drm_update_vblank_count(struct drm_device *dev, int crtc)
62{ 94{
63 memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time)); 95 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
96 u32 cur_vblank, diff, tslot;
97 bool rc;
98 struct timeval t_vblank;
99
100 /*
101 * Interrupts were disabled prior to this call, so deal with counter
102 * wrap if needed.
103 * NOTE! It's possible we lost a full dev->max_vblank_count events
104 * here if the register is small or we had vblank interrupts off for
105 * a long time.
106 *
107 * We repeat the hardware vblank counter & timestamp query until
108 * we get consistent results. This to prevent races between gpu
109 * updating its hardware counter while we are retrieving the
110 * corresponding vblank timestamp.
111 */
112 do {
113 cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
114 rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
115 } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
116
117 /* Deal with counter wrap */
118 diff = cur_vblank - vblank->last;
119 if (cur_vblank < vblank->last) {
120 diff += dev->max_vblank_count;
121
122 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
123 crtc, vblank->last, cur_vblank, diff);
124 }
125
126 DRM_DEBUG("updating vblank count on crtc %d, missed %d\n",
127 crtc, diff);
128
129 if (diff == 0)
130 return;
131
132 /* Reinitialize corresponding vblank timestamp if high-precision query
133 * available. Skip this step if query unsupported or failed. Will
134 * reinitialize delayed at next vblank interrupt in that case.
135 */
136 if (rc) {
137 tslot = atomic_read(&vblank->count) + diff;
138 vblanktimestamp(dev, crtc, tslot) = t_vblank;
139 }
140
141 smp_mb__before_atomic();
142 atomic_add(diff, &vblank->count);
143 smp_mb__after_atomic();
64} 144}
65 145
66/* 146/*
@@ -71,10 +151,11 @@ static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
71 */ 151 */
72static void vblank_disable_and_save(struct drm_device *dev, int crtc) 152static void vblank_disable_and_save(struct drm_device *dev, int crtc)
73{ 153{
154 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
74 unsigned long irqflags; 155 unsigned long irqflags;
75 u32 vblcount; 156 u32 vblcount;
76 s64 diff_ns; 157 s64 diff_ns;
77 int vblrc; 158 bool vblrc;
78 struct timeval tvblank; 159 struct timeval tvblank;
79 int count = DRM_TIMESTAMP_MAXRETRIES; 160 int count = DRM_TIMESTAMP_MAXRETRIES;
80 161
@@ -84,8 +165,28 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
84 */ 165 */
85 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 166 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
86 167
168 /*
169 * If the vblank interrupt was already disbled update the count
170 * and timestamp to maintain the appearance that the counter
171 * has been ticking all along until this time. This makes the
172 * count account for the entire time between drm_vblank_on() and
173 * drm_vblank_off().
174 *
175 * But only do this if precise vblank timestamps are available.
176 * Otherwise we might read a totally bogus timestamp since drivers
177 * lacking precise timestamp support rely upon sampling the system clock
178 * at vblank interrupt time. Which obviously won't work out well if the
179 * vblank interrupt is disabled.
180 */
181 if (!vblank->enabled &&
182 drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0)) {
183 drm_update_vblank_count(dev, crtc);
184 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
185 return;
186 }
187
87 dev->driver->disable_vblank(dev, crtc); 188 dev->driver->disable_vblank(dev, crtc);
88 dev->vblank[crtc].enabled = false; 189 vblank->enabled = false;
89 190
90 /* No further vblank irq's will be processed after 191 /* No further vblank irq's will be processed after
91 * this point. Get current hardware vblank count and 192 * this point. Get current hardware vblank count and
@@ -100,9 +201,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
100 * delayed gpu counter increment. 201 * delayed gpu counter increment.
101 */ 202 */
102 do { 203 do {
103 dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc); 204 vblank->last = dev->driver->get_vblank_counter(dev, crtc);
104 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); 205 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
105 } while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc); 206 } while (vblank->last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
106 207
107 if (!count) 208 if (!count)
108 vblrc = 0; 209 vblrc = 0;
@@ -110,7 +211,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
110 /* Compute time difference to stored timestamp of last vblank 211 /* Compute time difference to stored timestamp of last vblank
111 * as updated by last invocation of drm_handle_vblank() in vblank irq. 212 * as updated by last invocation of drm_handle_vblank() in vblank irq.
112 */ 213 */
113 vblcount = atomic_read(&dev->vblank[crtc].count); 214 vblcount = atomic_read(&vblank->count);
114 diff_ns = timeval_to_ns(&tvblank) - 215 diff_ns = timeval_to_ns(&tvblank) -
115 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); 216 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
116 217
@@ -126,14 +227,18 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
126 * available. In that case we can't account for this and just 227 * available. In that case we can't account for this and just
127 * hope for the best. 228 * hope for the best.
128 */ 229 */
129 if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { 230 if (vblrc && (abs64(diff_ns) > 1000000)) {
130 atomic_inc(&dev->vblank[crtc].count); 231 /* Store new timestamp in ringbuffer. */
232 vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
233
234 /* Increment cooked vblank count. This also atomically commits
235 * the timestamp computed above.
236 */
237 smp_mb__before_atomic();
238 atomic_inc(&vblank->count);
131 smp_mb__after_atomic(); 239 smp_mb__after_atomic();
132 } 240 }
133 241
134 /* Invalidate all timestamps while vblank irq's are off. */
135 clear_vblank_timestamps(dev, crtc);
136
137 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 242 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
138} 243}
139 244
@@ -164,14 +269,20 @@ static void vblank_disable_fn(unsigned long arg)
164void drm_vblank_cleanup(struct drm_device *dev) 269void drm_vblank_cleanup(struct drm_device *dev)
165{ 270{
166 int crtc; 271 int crtc;
272 unsigned long irqflags;
167 273
168 /* Bail if the driver didn't call drm_vblank_init() */ 274 /* Bail if the driver didn't call drm_vblank_init() */
169 if (dev->num_crtcs == 0) 275 if (dev->num_crtcs == 0)
170 return; 276 return;
171 277
172 for (crtc = 0; crtc < dev->num_crtcs; crtc++) { 278 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
173 del_timer_sync(&dev->vblank[crtc].disable_timer); 279 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
174 vblank_disable_fn((unsigned long)&dev->vblank[crtc]); 280
281 del_timer_sync(&vblank->disable_timer);
282
283 spin_lock_irqsave(&dev->vbl_lock, irqflags);
284 vblank_disable_and_save(dev, crtc);
285 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
175 } 286 }
176 287
177 kfree(dev->vblank); 288 kfree(dev->vblank);
@@ -204,11 +315,13 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
204 goto err; 315 goto err;
205 316
206 for (i = 0; i < num_crtcs; i++) { 317 for (i = 0; i < num_crtcs; i++) {
207 dev->vblank[i].dev = dev; 318 struct drm_vblank_crtc *vblank = &dev->vblank[i];
208 dev->vblank[i].crtc = i; 319
209 init_waitqueue_head(&dev->vblank[i].queue); 320 vblank->dev = dev;
210 setup_timer(&dev->vblank[i].disable_timer, vblank_disable_fn, 321 vblank->crtc = i;
211 (unsigned long)&dev->vblank[i]); 322 init_waitqueue_head(&vblank->queue);
323 setup_timer(&vblank->disable_timer, vblank_disable_fn,
324 (unsigned long)vblank);
212 } 325 }
213 326
214 DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n"); 327 DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@@ -224,7 +337,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
224 return 0; 337 return 0;
225 338
226err: 339err:
227 drm_vblank_cleanup(dev); 340 dev->num_crtcs = 0;
228 return ret; 341 return ret;
229} 342}
230EXPORT_SYMBOL(drm_vblank_init); 343EXPORT_SYMBOL(drm_vblank_init);
@@ -360,9 +473,11 @@ int drm_irq_uninstall(struct drm_device *dev)
360 if (dev->num_crtcs) { 473 if (dev->num_crtcs) {
361 spin_lock_irqsave(&dev->vbl_lock, irqflags); 474 spin_lock_irqsave(&dev->vbl_lock, irqflags);
362 for (i = 0; i < dev->num_crtcs; i++) { 475 for (i = 0; i < dev->num_crtcs; i++) {
363 wake_up(&dev->vblank[i].queue); 476 struct drm_vblank_crtc *vblank = &dev->vblank[i];
364 dev->vblank[i].enabled = false; 477
365 dev->vblank[i].last = 478 wake_up(&vblank->queue);
479 vblank->enabled = false;
480 vblank->last =
366 dev->driver->get_vblank_counter(dev, i); 481 dev->driver->get_vblank_counter(dev, i);
367 } 482 }
368 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 483 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -617,7 +732,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
617 * within vblank area, counting down the number of lines until 732 * within vblank area, counting down the number of lines until
618 * start of scanout. 733 * start of scanout.
619 */ 734 */
620 invbl = vbl_status & DRM_SCANOUTPOS_INVBL; 735 invbl = vbl_status & DRM_SCANOUTPOS_IN_VBLANK;
621 736
622 /* Convert scanout position into elapsed time at raw_time query 737 /* Convert scanout position into elapsed time at raw_time query
623 * since start of scanout at first display scanline. delta_ns 738 * since start of scanout at first display scanline. delta_ns
@@ -647,7 +762,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
647 762
648 vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD; 763 vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
649 if (invbl) 764 if (invbl)
650 vbl_status |= DRM_VBLANKTIME_INVBL; 765 vbl_status |= DRM_VBLANKTIME_IN_VBLANK;
651 766
652 return vbl_status; 767 return vbl_status;
653} 768}
@@ -679,10 +794,11 @@ static struct timeval get_drm_timestamp(void)
679 * call, i.e., it isn't very precisely locked to the true vblank. 794 * call, i.e., it isn't very precisely locked to the true vblank.
680 * 795 *
681 * Returns: 796 * Returns:
682 * Non-zero if timestamp is considered to be very precise, zero otherwise. 797 * True if timestamp is considered to be very precise, false otherwise.
683 */ 798 */
684u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, 799static bool
685 struct timeval *tvblank, unsigned flags) 800drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
801 struct timeval *tvblank, unsigned flags)
686{ 802{
687 int ret; 803 int ret;
688 804
@@ -694,7 +810,7 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
694 ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error, 810 ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
695 tvblank, flags); 811 tvblank, flags);
696 if (ret > 0) 812 if (ret > 0)
697 return (u32) ret; 813 return true;
698 } 814 }
699 815
700 /* GPU high precision timestamp query unsupported or failed. 816 /* GPU high precision timestamp query unsupported or failed.
@@ -702,9 +818,8 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
702 */ 818 */
703 *tvblank = get_drm_timestamp(); 819 *tvblank = get_drm_timestamp();
704 820
705 return 0; 821 return false;
706} 822}
707EXPORT_SYMBOL(drm_get_last_vbltimestamp);
708 823
709/** 824/**
710 * drm_vblank_count - retrieve "cooked" vblank counter value 825 * drm_vblank_count - retrieve "cooked" vblank counter value
@@ -720,7 +835,11 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
720 */ 835 */
721u32 drm_vblank_count(struct drm_device *dev, int crtc) 836u32 drm_vblank_count(struct drm_device *dev, int crtc)
722{ 837{
723 return atomic_read(&dev->vblank[crtc].count); 838 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
839
840 if (WARN_ON(crtc >= dev->num_crtcs))
841 return 0;
842 return atomic_read(&vblank->count);
724} 843}
725EXPORT_SYMBOL(drm_vblank_count); 844EXPORT_SYMBOL(drm_vblank_count);
726 845
@@ -740,18 +859,22 @@ EXPORT_SYMBOL(drm_vblank_count);
740u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 859u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
741 struct timeval *vblanktime) 860 struct timeval *vblanktime)
742{ 861{
862 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
743 u32 cur_vblank; 863 u32 cur_vblank;
744 864
865 if (WARN_ON(crtc >= dev->num_crtcs))
866 return 0;
867
745 /* Read timestamp from slot of _vblank_time ringbuffer 868 /* Read timestamp from slot of _vblank_time ringbuffer
746 * that corresponds to current vblank count. Retry if 869 * that corresponds to current vblank count. Retry if
747 * count has incremented during readout. This works like 870 * count has incremented during readout. This works like
748 * a seqlock. 871 * a seqlock.
749 */ 872 */
750 do { 873 do {
751 cur_vblank = atomic_read(&dev->vblank[crtc].count); 874 cur_vblank = atomic_read(&vblank->count);
752 *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); 875 *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
753 smp_rmb(); 876 smp_rmb();
754 } while (cur_vblank != atomic_read(&dev->vblank[crtc].count)); 877 } while (cur_vblank != atomic_read(&vblank->count));
755 878
756 return cur_vblank; 879 return cur_vblank;
757} 880}
@@ -800,83 +923,20 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
800EXPORT_SYMBOL(drm_send_vblank_event); 923EXPORT_SYMBOL(drm_send_vblank_event);
801 924
802/** 925/**
803 * drm_update_vblank_count - update the master vblank counter
804 * @dev: DRM device
805 * @crtc: counter to update
806 *
807 * Call back into the driver to update the appropriate vblank counter
808 * (specified by @crtc). Deal with wraparound, if it occurred, and
809 * update the last read value so we can deal with wraparound on the next
810 * call if necessary.
811 *
812 * Only necessary when going from off->on, to account for frames we
813 * didn't get an interrupt for.
814 *
815 * Note: caller must hold dev->vbl_lock since this reads & writes
816 * device vblank fields.
817 */
818static void drm_update_vblank_count(struct drm_device *dev, int crtc)
819{
820 u32 cur_vblank, diff, tslot, rc;
821 struct timeval t_vblank;
822
823 /*
824 * Interrupts were disabled prior to this call, so deal with counter
825 * wrap if needed.
826 * NOTE! It's possible we lost a full dev->max_vblank_count events
827 * here if the register is small or we had vblank interrupts off for
828 * a long time.
829 *
830 * We repeat the hardware vblank counter & timestamp query until
831 * we get consistent results. This to prevent races between gpu
832 * updating its hardware counter while we are retrieving the
833 * corresponding vblank timestamp.
834 */
835 do {
836 cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
837 rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
838 } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
839
840 /* Deal with counter wrap */
841 diff = cur_vblank - dev->vblank[crtc].last;
842 if (cur_vblank < dev->vblank[crtc].last) {
843 diff += dev->max_vblank_count;
844
845 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
846 crtc, dev->vblank[crtc].last, cur_vblank, diff);
847 }
848
849 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
850 crtc, diff);
851
852 /* Reinitialize corresponding vblank timestamp if high-precision query
853 * available. Skip this step if query unsupported or failed. Will
854 * reinitialize delayed at next vblank interrupt in that case.
855 */
856 if (rc) {
857 tslot = atomic_read(&dev->vblank[crtc].count) + diff;
858 vblanktimestamp(dev, crtc, tslot) = t_vblank;
859 }
860
861 smp_mb__before_atomic();
862 atomic_add(diff, &dev->vblank[crtc].count);
863 smp_mb__after_atomic();
864}
865
866/**
867 * drm_vblank_enable - enable the vblank interrupt on a CRTC 926 * drm_vblank_enable - enable the vblank interrupt on a CRTC
868 * @dev: DRM device 927 * @dev: DRM device
869 * @crtc: CRTC in question 928 * @crtc: CRTC in question
870 */ 929 */
871static int drm_vblank_enable(struct drm_device *dev, int crtc) 930static int drm_vblank_enable(struct drm_device *dev, int crtc)
872{ 931{
932 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
873 int ret = 0; 933 int ret = 0;
874 934
875 assert_spin_locked(&dev->vbl_lock); 935 assert_spin_locked(&dev->vbl_lock);
876 936
877 spin_lock(&dev->vblank_time_lock); 937 spin_lock(&dev->vblank_time_lock);
878 938
879 if (!dev->vblank[crtc].enabled) { 939 if (!vblank->enabled) {
880 /* 940 /*
881 * Enable vblank irqs under vblank_time_lock protection. 941 * Enable vblank irqs under vblank_time_lock protection.
882 * All vblank count & timestamp updates are held off 942 * All vblank count & timestamp updates are held off
@@ -887,9 +947,9 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
887 ret = dev->driver->enable_vblank(dev, crtc); 947 ret = dev->driver->enable_vblank(dev, crtc);
888 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); 948 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
889 if (ret) 949 if (ret)
890 atomic_dec(&dev->vblank[crtc].refcount); 950 atomic_dec(&vblank->refcount);
891 else { 951 else {
892 dev->vblank[crtc].enabled = true; 952 vblank->enabled = true;
893 drm_update_vblank_count(dev, crtc); 953 drm_update_vblank_count(dev, crtc);
894 } 954 }
895 } 955 }
@@ -914,16 +974,20 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
914 */ 974 */
915int drm_vblank_get(struct drm_device *dev, int crtc) 975int drm_vblank_get(struct drm_device *dev, int crtc)
916{ 976{
977 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
917 unsigned long irqflags; 978 unsigned long irqflags;
918 int ret = 0; 979 int ret = 0;
919 980
981 if (WARN_ON(crtc >= dev->num_crtcs))
982 return -EINVAL;
983
920 spin_lock_irqsave(&dev->vbl_lock, irqflags); 984 spin_lock_irqsave(&dev->vbl_lock, irqflags);
921 /* Going from 0->1 means we have to enable interrupts again */ 985 /* Going from 0->1 means we have to enable interrupts again */
922 if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) { 986 if (atomic_add_return(1, &vblank->refcount) == 1) {
923 ret = drm_vblank_enable(dev, crtc); 987 ret = drm_vblank_enable(dev, crtc);
924 } else { 988 } else {
925 if (!dev->vblank[crtc].enabled) { 989 if (!vblank->enabled) {
926 atomic_dec(&dev->vblank[crtc].refcount); 990 atomic_dec(&vblank->refcount);
927 ret = -EINVAL; 991 ret = -EINVAL;
928 } 992 }
929 } 993 }
@@ -963,13 +1027,23 @@ EXPORT_SYMBOL(drm_crtc_vblank_get);
963 */ 1027 */
964void drm_vblank_put(struct drm_device *dev, int crtc) 1028void drm_vblank_put(struct drm_device *dev, int crtc)
965{ 1029{
966 BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0); 1030 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
1031
1032 BUG_ON(atomic_read(&vblank->refcount) == 0);
1033
1034 if (WARN_ON(crtc >= dev->num_crtcs))
1035 return;
967 1036
968 /* Last user schedules interrupt disable */ 1037 /* Last user schedules interrupt disable */
969 if (atomic_dec_and_test(&dev->vblank[crtc].refcount) && 1038 if (atomic_dec_and_test(&vblank->refcount)) {
970 (drm_vblank_offdelay > 0)) 1039 if (drm_vblank_offdelay == 0)
971 mod_timer(&dev->vblank[crtc].disable_timer, 1040 return;
972 jiffies + ((drm_vblank_offdelay * HZ)/1000)); 1041 else if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0)
1042 vblank_disable_fn((unsigned long)vblank);
1043 else
1044 mod_timer(&vblank->disable_timer,
1045 jiffies + ((drm_vblank_offdelay * HZ)/1000));
1046 }
973} 1047}
974EXPORT_SYMBOL(drm_vblank_put); 1048EXPORT_SYMBOL(drm_vblank_put);
975 1049
@@ -989,6 +1063,50 @@ void drm_crtc_vblank_put(struct drm_crtc *crtc)
989EXPORT_SYMBOL(drm_crtc_vblank_put); 1063EXPORT_SYMBOL(drm_crtc_vblank_put);
990 1064
991/** 1065/**
1066 * drm_wait_one_vblank - wait for one vblank
1067 * @dev: DRM device
1068 * @crtc: crtc index
1069 *
1070 * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
1071 * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
1072 * due to lack of driver support or because the crtc is off.
1073 */
1074void drm_wait_one_vblank(struct drm_device *dev, int crtc)
1075{
1076 int ret;
1077 u32 last;
1078
1079 ret = drm_vblank_get(dev, crtc);
1080 if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", crtc, ret))
1081 return;
1082
1083 last = drm_vblank_count(dev, crtc);
1084
1085 ret = wait_event_timeout(dev->vblank[crtc].queue,
1086 last != drm_vblank_count(dev, crtc),
1087 msecs_to_jiffies(100));
1088
1089 WARN(ret == 0, "vblank wait timed out on crtc %i\n", crtc);
1090
1091 drm_vblank_put(dev, crtc);
1092}
1093EXPORT_SYMBOL(drm_wait_one_vblank);
1094
1095/**
1096 * drm_crtc_wait_one_vblank - wait for one vblank
1097 * @crtc: DRM crtc
1098 *
1099 * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
1100 * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
1101 * due to lack of driver support or because the crtc is off.
1102 */
1103void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
1104{
1105 drm_wait_one_vblank(crtc->dev, drm_crtc_index(crtc));
1106}
1107EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
1108
1109/**
992 * drm_vblank_off - disable vblank events on a CRTC 1110 * drm_vblank_off - disable vblank events on a CRTC
993 * @dev: DRM device 1111 * @dev: DRM device
994 * @crtc: CRTC in question 1112 * @crtc: CRTC in question
@@ -1004,19 +1122,34 @@ EXPORT_SYMBOL(drm_crtc_vblank_put);
1004 */ 1122 */
1005void drm_vblank_off(struct drm_device *dev, int crtc) 1123void drm_vblank_off(struct drm_device *dev, int crtc)
1006{ 1124{
1125 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
1007 struct drm_pending_vblank_event *e, *t; 1126 struct drm_pending_vblank_event *e, *t;
1008 struct timeval now; 1127 struct timeval now;
1009 unsigned long irqflags; 1128 unsigned long irqflags;
1010 unsigned int seq; 1129 unsigned int seq;
1011 1130
1012 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1131 if (WARN_ON(crtc >= dev->num_crtcs))
1132 return;
1133
1134 spin_lock_irqsave(&dev->event_lock, irqflags);
1135
1136 spin_lock(&dev->vbl_lock);
1013 vblank_disable_and_save(dev, crtc); 1137 vblank_disable_and_save(dev, crtc);
1014 wake_up(&dev->vblank[crtc].queue); 1138 wake_up(&vblank->queue);
1139
1140 /*
1141 * Prevent subsequent drm_vblank_get() from re-enabling
1142 * the vblank interrupt by bumping the refcount.
1143 */
1144 if (!vblank->inmodeset) {
1145 atomic_inc(&vblank->refcount);
1146 vblank->inmodeset = 1;
1147 }
1148 spin_unlock(&dev->vbl_lock);
1015 1149
1016 /* Send any queued vblank events, lest the natives grow disquiet */ 1150 /* Send any queued vblank events, lest the natives grow disquiet */
1017 seq = drm_vblank_count_and_time(dev, crtc, &now); 1151 seq = drm_vblank_count_and_time(dev, crtc, &now);
1018 1152
1019 spin_lock(&dev->event_lock);
1020 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { 1153 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
1021 if (e->pipe != crtc) 1154 if (e->pipe != crtc)
1022 continue; 1155 continue;
@@ -1027,9 +1160,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
1027 drm_vblank_put(dev, e->pipe); 1160 drm_vblank_put(dev, e->pipe);
1028 send_vblank_event(dev, e, seq, &now); 1161 send_vblank_event(dev, e, seq, &now);
1029 } 1162 }
1030 spin_unlock(&dev->event_lock); 1163 spin_unlock_irqrestore(&dev->event_lock, irqflags);
1031
1032 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1033} 1164}
1034EXPORT_SYMBOL(drm_vblank_off); 1165EXPORT_SYMBOL(drm_vblank_off);
1035 1166
@@ -1066,11 +1197,35 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
1066 */ 1197 */
1067void drm_vblank_on(struct drm_device *dev, int crtc) 1198void drm_vblank_on(struct drm_device *dev, int crtc)
1068{ 1199{
1200 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
1069 unsigned long irqflags; 1201 unsigned long irqflags;
1070 1202
1203 if (WARN_ON(crtc >= dev->num_crtcs))
1204 return;
1205
1071 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1206 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1072 /* re-enable interrupts if there's are users left */ 1207 /* Drop our private "prevent drm_vblank_get" refcount */
1073 if (atomic_read(&dev->vblank[crtc].refcount) != 0) 1208 if (vblank->inmodeset) {
1209 atomic_dec(&vblank->refcount);
1210 vblank->inmodeset = 0;
1211 }
1212
1213 /*
1214 * sample the current counter to avoid random jumps
1215 * when drm_vblank_enable() applies the diff
1216 *
1217 * -1 to make sure user will never see the same
1218 * vblank counter value before and after a modeset
1219 */
1220 vblank->last =
1221 (dev->driver->get_vblank_counter(dev, crtc) - 1) &
1222 dev->max_vblank_count;
1223 /*
1224 * re-enable interrupts if there are users left, or the
1225 * user wishes vblank interrupts to be enabled all the time.
1226 */
1227 if (atomic_read(&vblank->refcount) != 0 ||
1228 (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
1074 WARN_ON(drm_vblank_enable(dev, crtc)); 1229 WARN_ON(drm_vblank_enable(dev, crtc));
1075 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1230 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1076} 1231}
@@ -1118,9 +1273,15 @@ EXPORT_SYMBOL(drm_crtc_vblank_on);
1118 */ 1273 */
1119void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) 1274void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
1120{ 1275{
1276 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
1277
1121 /* vblank is not initialized (IRQ not installed ?), or has been freed */ 1278 /* vblank is not initialized (IRQ not installed ?), or has been freed */
1122 if (!dev->num_crtcs) 1279 if (!dev->num_crtcs)
1123 return; 1280 return;
1281
1282 if (WARN_ON(crtc >= dev->num_crtcs))
1283 return;
1284
1124 /* 1285 /*
1125 * To avoid all the problems that might happen if interrupts 1286 * To avoid all the problems that might happen if interrupts
1126 * were enabled/disabled around or between these calls, we just 1287 * were enabled/disabled around or between these calls, we just
@@ -1128,10 +1289,10 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
1128 * to avoid corrupting the count if multiple, mismatch calls occur), 1289 * to avoid corrupting the count if multiple, mismatch calls occur),
1129 * so that interrupts remain enabled in the interim. 1290 * so that interrupts remain enabled in the interim.
1130 */ 1291 */
1131 if (!dev->vblank[crtc].inmodeset) { 1292 if (!vblank->inmodeset) {
1132 dev->vblank[crtc].inmodeset = 0x1; 1293 vblank->inmodeset = 0x1;
1133 if (drm_vblank_get(dev, crtc) == 0) 1294 if (drm_vblank_get(dev, crtc) == 0)
1134 dev->vblank[crtc].inmodeset |= 0x2; 1295 vblank->inmodeset |= 0x2;
1135 } 1296 }
1136} 1297}
1137EXPORT_SYMBOL(drm_vblank_pre_modeset); 1298EXPORT_SYMBOL(drm_vblank_pre_modeset);
@@ -1146,21 +1307,22 @@ EXPORT_SYMBOL(drm_vblank_pre_modeset);
1146 */ 1307 */
1147void drm_vblank_post_modeset(struct drm_device *dev, int crtc) 1308void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
1148{ 1309{
1310 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
1149 unsigned long irqflags; 1311 unsigned long irqflags;
1150 1312
1151 /* vblank is not initialized (IRQ not installed ?), or has been freed */ 1313 /* vblank is not initialized (IRQ not installed ?), or has been freed */
1152 if (!dev->num_crtcs) 1314 if (!dev->num_crtcs)
1153 return; 1315 return;
1154 1316
1155 if (dev->vblank[crtc].inmodeset) { 1317 if (vblank->inmodeset) {
1156 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1318 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1157 dev->vblank_disable_allowed = true; 1319 dev->vblank_disable_allowed = true;
1158 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1320 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1159 1321
1160 if (dev->vblank[crtc].inmodeset & 0x2) 1322 if (vblank->inmodeset & 0x2)
1161 drm_vblank_put(dev, crtc); 1323 drm_vblank_put(dev, crtc);
1162 1324
1163 dev->vblank[crtc].inmodeset = 0; 1325 vblank->inmodeset = 0;
1164 } 1326 }
1165} 1327}
1166EXPORT_SYMBOL(drm_vblank_post_modeset); 1328EXPORT_SYMBOL(drm_vblank_post_modeset);
@@ -1212,6 +1374,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
1212 union drm_wait_vblank *vblwait, 1374 union drm_wait_vblank *vblwait,
1213 struct drm_file *file_priv) 1375 struct drm_file *file_priv)
1214{ 1376{
1377 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1215 struct drm_pending_vblank_event *e; 1378 struct drm_pending_vblank_event *e;
1216 struct timeval now; 1379 struct timeval now;
1217 unsigned long flags; 1380 unsigned long flags;
@@ -1235,6 +1398,18 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
1235 1398
1236 spin_lock_irqsave(&dev->event_lock, flags); 1399 spin_lock_irqsave(&dev->event_lock, flags);
1237 1400
1401 /*
1402 * drm_vblank_off() might have been called after we called
1403 * drm_vblank_get(). drm_vblank_off() holds event_lock
1404 * around the vblank disable, so no need for further locking.
1405 * The reference from drm_vblank_get() protects against
1406 * vblank disable from another source.
1407 */
1408 if (!vblank->enabled) {
1409 ret = -EINVAL;
1410 goto err_unlock;
1411 }
1412
1238 if (file_priv->event_space < sizeof e->event) { 1413 if (file_priv->event_space < sizeof e->event) {
1239 ret = -EBUSY; 1414 ret = -EBUSY;
1240 goto err_unlock; 1415 goto err_unlock;
@@ -1295,6 +1470,7 @@ err_put:
1295int drm_wait_vblank(struct drm_device *dev, void *data, 1470int drm_wait_vblank(struct drm_device *dev, void *data,
1296 struct drm_file *file_priv) 1471 struct drm_file *file_priv)
1297{ 1472{
1473 struct drm_vblank_crtc *vblank;
1298 union drm_wait_vblank *vblwait = data; 1474 union drm_wait_vblank *vblwait = data;
1299 int ret; 1475 int ret;
1300 unsigned int flags, seq, crtc, high_crtc; 1476 unsigned int flags, seq, crtc, high_crtc;
@@ -1324,6 +1500,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1324 if (crtc >= dev->num_crtcs) 1500 if (crtc >= dev->num_crtcs)
1325 return -EINVAL; 1501 return -EINVAL;
1326 1502
1503 vblank = &dev->vblank[crtc];
1504
1327 ret = drm_vblank_get(dev, crtc); 1505 ret = drm_vblank_get(dev, crtc);
1328 if (ret) { 1506 if (ret) {
1329 DRM_DEBUG("failed to acquire vblank counter, %d\n", ret); 1507 DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
@@ -1356,11 +1534,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1356 1534
1357 DRM_DEBUG("waiting on vblank count %d, crtc %d\n", 1535 DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
1358 vblwait->request.sequence, crtc); 1536 vblwait->request.sequence, crtc);
1359 dev->vblank[crtc].last_wait = vblwait->request.sequence; 1537 vblank->last_wait = vblwait->request.sequence;
1360 DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ, 1538 DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
1361 (((drm_vblank_count(dev, crtc) - 1539 (((drm_vblank_count(dev, crtc) -
1362 vblwait->request.sequence) <= (1 << 23)) || 1540 vblwait->request.sequence) <= (1 << 23)) ||
1363 !dev->vblank[crtc].enabled || 1541 !vblank->enabled ||
1364 !dev->irq_enabled)); 1542 !dev->irq_enabled));
1365 1543
1366 if (ret != -EINTR) { 1544 if (ret != -EINTR) {
@@ -1385,12 +1563,11 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
1385{ 1563{
1386 struct drm_pending_vblank_event *e, *t; 1564 struct drm_pending_vblank_event *e, *t;
1387 struct timeval now; 1565 struct timeval now;
1388 unsigned long flags;
1389 unsigned int seq; 1566 unsigned int seq;
1390 1567
1391 seq = drm_vblank_count_and_time(dev, crtc, &now); 1568 assert_spin_locked(&dev->event_lock);
1392 1569
1393 spin_lock_irqsave(&dev->event_lock, flags); 1570 seq = drm_vblank_count_and_time(dev, crtc, &now);
1394 1571
1395 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { 1572 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
1396 if (e->pipe != crtc) 1573 if (e->pipe != crtc)
@@ -1406,8 +1583,6 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
1406 send_vblank_event(dev, e, seq, &now); 1583 send_vblank_event(dev, e, seq, &now);
1407 } 1584 }
1408 1585
1409 spin_unlock_irqrestore(&dev->event_lock, flags);
1410
1411 trace_drm_vblank_event(crtc, seq); 1586 trace_drm_vblank_event(crtc, seq);
1412} 1587}
1413 1588
@@ -1421,6 +1596,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
1421 */ 1596 */
1422bool drm_handle_vblank(struct drm_device *dev, int crtc) 1597bool drm_handle_vblank(struct drm_device *dev, int crtc)
1423{ 1598{
1599 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
1424 u32 vblcount; 1600 u32 vblcount;
1425 s64 diff_ns; 1601 s64 diff_ns;
1426 struct timeval tvblank; 1602 struct timeval tvblank;
@@ -1429,15 +1605,21 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1429 if (!dev->num_crtcs) 1605 if (!dev->num_crtcs)
1430 return false; 1606 return false;
1431 1607
1608 if (WARN_ON(crtc >= dev->num_crtcs))
1609 return false;
1610
1611 spin_lock_irqsave(&dev->event_lock, irqflags);
1612
1432 /* Need timestamp lock to prevent concurrent execution with 1613 /* Need timestamp lock to prevent concurrent execution with
1433 * vblank enable/disable, as this would cause inconsistent 1614 * vblank enable/disable, as this would cause inconsistent
1434 * or corrupted timestamps and vblank counts. 1615 * or corrupted timestamps and vblank counts.
1435 */ 1616 */
1436 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 1617 spin_lock(&dev->vblank_time_lock);
1437 1618
1438 /* Vblank irq handling disabled. Nothing to do. */ 1619 /* Vblank irq handling disabled. Nothing to do. */
1439 if (!dev->vblank[crtc].enabled) { 1620 if (!vblank->enabled) {
1440 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1621 spin_unlock(&dev->vblank_time_lock);
1622 spin_unlock_irqrestore(&dev->event_lock, irqflags);
1441 return false; 1623 return false;
1442 } 1624 }
1443 1625
@@ -1446,7 +1628,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1446 */ 1628 */
1447 1629
1448 /* Get current timestamp and count. */ 1630 /* Get current timestamp and count. */
1449 vblcount = atomic_read(&dev->vblank[crtc].count); 1631 vblcount = atomic_read(&vblank->count);
1450 drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); 1632 drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
1451 1633
1452 /* Compute time difference to timestamp of last vblank */ 1634 /* Compute time difference to timestamp of last vblank */
@@ -1470,17 +1652,20 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1470 * the timestamp computed above. 1652 * the timestamp computed above.
1471 */ 1653 */
1472 smp_mb__before_atomic(); 1654 smp_mb__before_atomic();
1473 atomic_inc(&dev->vblank[crtc].count); 1655 atomic_inc(&vblank->count);
1474 smp_mb__after_atomic(); 1656 smp_mb__after_atomic();
1475 } else { 1657 } else {
1476 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", 1658 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
1477 crtc, (int) diff_ns); 1659 crtc, (int) diff_ns);
1478 } 1660 }
1479 1661
1480 wake_up(&dev->vblank[crtc].queue); 1662 spin_unlock(&dev->vblank_time_lock);
1663
1664 wake_up(&vblank->queue);
1481 drm_handle_vblank_events(dev, crtc); 1665 drm_handle_vblank_events(dev, crtc);
1482 1666
1483 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1667 spin_unlock_irqrestore(&dev->event_lock, irqflags);
1668
1484 return true; 1669 return true;
1485} 1670}
1486EXPORT_SYMBOL(drm_handle_vblank); 1671EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index d34f20a79b7c..c1dc61473db5 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -23,6 +23,15 @@
23 * OTHER DEALINGS IN THE SOFTWARE. 23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 24 */
25 25
26/*
27 * This file contains legacy interfaces that modern drm drivers
28 * should no longer be using. They cannot be removed as legacy
29 * drivers use them, and removing them are API breaks.
30 */
31#include <linux/list.h>
32#include <drm/drm_legacy.h>
33
34struct agp_memory;
26struct drm_device; 35struct drm_device;
27struct drm_file; 36struct drm_file;
28 37
@@ -48,4 +57,57 @@ int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
48int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f); 57int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
49int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f); 58int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
50 59
60/*
61 * Generic Buffer Management
62 */
63
64#define DRM_MAP_HASH_OFFSET 0x10000000
65
66int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
67int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
68int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
69int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f);
70int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f);
71int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
72int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
73int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
74
75void drm_legacy_vma_flush(struct drm_device *d);
76
77/*
78 * AGP Support
79 */
80
81struct drm_agp_mem {
82 unsigned long handle;
83 struct agp_memory *memory;
84 unsigned long bound;
85 int pages;
86 struct list_head head;
87};
88
89/*
90 * Generic Userspace Locking-API
91 */
92
93int drm_legacy_i_have_hw_lock(struct drm_device *d, struct drm_file *f);
94int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
95int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
96int drm_legacy_lock_free(struct drm_lock_data *lock, unsigned int ctx);
97
98/* DMA support */
99int drm_legacy_dma_setup(struct drm_device *dev);
100void drm_legacy_dma_takedown(struct drm_device *dev);
101void drm_legacy_free_buffer(struct drm_device *dev,
102 struct drm_buf * buf);
103void drm_legacy_reclaim_buffers(struct drm_device *dev,
104 struct drm_file *filp);
105
106/* Scatter Gather Support */
107void drm_legacy_sg_cleanup(struct drm_device *dev);
108int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
109 struct drm_file *file_priv);
110int drm_legacy_sg_free(struct drm_device *dev, void *data,
111 struct drm_file *file_priv);
112
51#endif /* __DRM_LEGACY_H__ */ 113#endif /* __DRM_LEGACY_H__ */
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index e26b59e385ff..f861361a635e 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -36,6 +36,7 @@
36#include <linux/export.h> 36#include <linux/export.h>
37#include <drm/drmP.h> 37#include <drm/drmP.h>
38#include "drm_legacy.h" 38#include "drm_legacy.h"
39#include "drm_internal.h"
39 40
40static int drm_notifier(void *priv); 41static int drm_notifier(void *priv);
41 42
@@ -52,7 +53,8 @@ static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
52 * 53 *
53 * Add the current task to the lock wait queue, and attempt to take to lock. 54 * Add the current task to the lock wait queue, and attempt to take to lock.
54 */ 55 */
55int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) 56int drm_legacy_lock(struct drm_device *dev, void *data,
57 struct drm_file *file_priv)
56{ 58{
57 DECLARE_WAITQUEUE(entry, current); 59 DECLARE_WAITQUEUE(entry, current);
58 struct drm_lock *lock = data; 60 struct drm_lock *lock = data;
@@ -120,7 +122,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
120 sigaddset(&dev->sigmask, SIGTTOU); 122 sigaddset(&dev->sigmask, SIGTTOU);
121 dev->sigdata.context = lock->context; 123 dev->sigdata.context = lock->context;
122 dev->sigdata.lock = master->lock.hw_lock; 124 dev->sigdata.lock = master->lock.hw_lock;
123 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); 125 block_all_signals(drm_notifier, dev, &dev->sigmask);
124 } 126 }
125 127
126 if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) 128 if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
@@ -146,7 +148,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
146 * 148 *
147 * Transfer and free the lock. 149 * Transfer and free the lock.
148 */ 150 */
149int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) 151int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
150{ 152{
151 struct drm_lock *lock = data; 153 struct drm_lock *lock = data;
152 struct drm_master *master = file_priv->master; 154 struct drm_master *master = file_priv->master;
@@ -157,7 +159,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
157 return -EINVAL; 159 return -EINVAL;
158 } 160 }
159 161
160 if (drm_lock_free(&master->lock, lock->context)) { 162 if (drm_legacy_lock_free(&master->lock, lock->context)) {
161 /* FIXME: Should really bail out here. */ 163 /* FIXME: Should really bail out here. */
162 } 164 }
163 165
@@ -250,7 +252,7 @@ static int drm_lock_transfer(struct drm_lock_data *lock_data,
250 * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task 252 * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
251 * waiting on the lock queue. 253 * waiting on the lock queue.
252 */ 254 */
253int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) 255int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context)
254{ 256{
255 unsigned int old, new, prev; 257 unsigned int old, new, prev;
256 volatile unsigned int *lock = &lock_data->hw_lock->lock; 258 volatile unsigned int *lock = &lock_data->hw_lock->lock;
@@ -286,26 +288,27 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
286 * If the lock is not held, then let the signal proceed as usual. If the lock 288 * If the lock is not held, then let the signal proceed as usual. If the lock
287 * is held, then set the contended flag and keep the signal blocked. 289 * is held, then set the contended flag and keep the signal blocked.
288 * 290 *
289 * \param priv pointer to a drm_sigdata structure. 291 * \param priv pointer to a drm_device structure.
290 * \return one if the signal should be delivered normally, or zero if the 292 * \return one if the signal should be delivered normally, or zero if the
291 * signal should be blocked. 293 * signal should be blocked.
292 */ 294 */
293static int drm_notifier(void *priv) 295static int drm_notifier(void *priv)
294{ 296{
295 struct drm_sigdata *s = (struct drm_sigdata *) priv; 297 struct drm_device *dev = priv;
298 struct drm_hw_lock *lock = dev->sigdata.lock;
296 unsigned int old, new, prev; 299 unsigned int old, new, prev;
297 300
298 /* Allow signal delivery if lock isn't held */ 301 /* Allow signal delivery if lock isn't held */
299 if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock) 302 if (!lock || !_DRM_LOCK_IS_HELD(lock->lock)
300 || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) 303 || _DRM_LOCKING_CONTEXT(lock->lock) != dev->sigdata.context)
301 return 1; 304 return 1;
302 305
303 /* Otherwise, set flag to force call to 306 /* Otherwise, set flag to force call to
304 drmUnlock */ 307 drmUnlock */
305 do { 308 do {
306 old = s->lock->lock; 309 old = lock->lock;
307 new = old | _DRM_LOCK_CONT; 310 new = old | _DRM_LOCK_CONT;
308 prev = cmpxchg(&s->lock->lock, old, new); 311 prev = cmpxchg(&lock->lock, old, new);
309 } while (prev != old); 312 } while (prev != old);
310 return 0; 313 return 0;
311} 314}
@@ -323,7 +326,7 @@ static int drm_notifier(void *priv)
323 * having to worry about starvation. 326 * having to worry about starvation.
324 */ 327 */
325 328
326void drm_idlelock_take(struct drm_lock_data *lock_data) 329void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
327{ 330{
328 int ret; 331 int ret;
329 332
@@ -340,9 +343,9 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
340 } 343 }
341 spin_unlock_bh(&lock_data->spinlock); 344 spin_unlock_bh(&lock_data->spinlock);
342} 345}
343EXPORT_SYMBOL(drm_idlelock_take); 346EXPORT_SYMBOL(drm_legacy_idlelock_take);
344 347
345void drm_idlelock_release(struct drm_lock_data *lock_data) 348void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
346{ 349{
347 unsigned int old, prev; 350 unsigned int old, prev;
348 volatile unsigned int *lock = &lock_data->hw_lock->lock; 351 volatile unsigned int *lock = &lock_data->hw_lock->lock;
@@ -360,9 +363,10 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
360 } 363 }
361 spin_unlock_bh(&lock_data->spinlock); 364 spin_unlock_bh(&lock_data->spinlock);
362} 365}
363EXPORT_SYMBOL(drm_idlelock_release); 366EXPORT_SYMBOL(drm_legacy_idlelock_release);
364 367
365int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) 368int drm_legacy_i_have_hw_lock(struct drm_device *dev,
369 struct drm_file *file_priv)
366{ 370{
367 struct drm_master *master = file_priv->master; 371 struct drm_master *master = file_priv->master;
368 return (file_priv->lock_count && master->lock.hw_lock && 372 return (file_priv->lock_count && master->lock.hw_lock &&
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 00c67c0f2381..a521ef6ff807 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -36,8 +36,20 @@
36#include <linux/highmem.h> 36#include <linux/highmem.h>
37#include <linux/export.h> 37#include <linux/export.h>
38#include <drm/drmP.h> 38#include <drm/drmP.h>
39#include "drm_legacy.h"
39 40
40#if __OS_HAS_AGP 41#if __OS_HAS_AGP
42
43#ifdef HAVE_PAGE_AGP
44# include <asm/agp.h>
45#else
46# ifdef __powerpc__
47# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
48# else
49# define PAGE_AGP PAGE_KERNEL
50# endif
51#endif
52
41static void *agp_remap(unsigned long offset, unsigned long size, 53static void *agp_remap(unsigned long offset, unsigned long size,
42 struct drm_device * dev) 54 struct drm_device * dev)
43{ 55{
@@ -108,25 +120,25 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
108 120
109#endif /* agp */ 121#endif /* agp */
110 122
111void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev) 123void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev)
112{ 124{
113 if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) 125 if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
114 map->handle = agp_remap(map->offset, map->size, dev); 126 map->handle = agp_remap(map->offset, map->size, dev);
115 else 127 else
116 map->handle = ioremap(map->offset, map->size); 128 map->handle = ioremap(map->offset, map->size);
117} 129}
118EXPORT_SYMBOL(drm_core_ioremap); 130EXPORT_SYMBOL(drm_legacy_ioremap);
119 131
120void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) 132void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
121{ 133{
122 if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) 134 if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
123 map->handle = agp_remap(map->offset, map->size, dev); 135 map->handle = agp_remap(map->offset, map->size, dev);
124 else 136 else
125 map->handle = ioremap_wc(map->offset, map->size); 137 map->handle = ioremap_wc(map->offset, map->size);
126} 138}
127EXPORT_SYMBOL(drm_core_ioremap_wc); 139EXPORT_SYMBOL(drm_legacy_ioremap_wc);
128 140
129void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev) 141void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
130{ 142{
131 if (!map->handle || !map->size) 143 if (!map->handle || !map->size)
132 return; 144 return;
@@ -136,4 +148,4 @@ void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
136 else 148 else
137 iounmap(map->handle); 149 iounmap(map->handle);
138} 150}
139EXPORT_SYMBOL(drm_core_ioremapfree); 151EXPORT_SYMBOL(drm_legacy_ioremapfree);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 6aa6a9e95570..eb6dfe52cab2 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -231,6 +231,9 @@ ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
231 break; 231 break;
232 } 232 }
233 233
234 if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
235 msg.flags = MIPI_DSI_MSG_USE_LPM;
236
234 return ops->transfer(dsi->host, &msg); 237 return ops->transfer(dsi->host, &msg);
235} 238}
236EXPORT_SYMBOL(mipi_dsi_dcs_write); 239EXPORT_SYMBOL(mipi_dsi_dcs_write);
@@ -260,6 +263,9 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
260 if (!ops || !ops->transfer) 263 if (!ops || !ops->transfer)
261 return -ENOSYS; 264 return -ENOSYS;
262 265
266 if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
267 msg.flags = MIPI_DSI_MSG_USE_LPM;
268
263 return ops->transfer(dsi->host, &msg); 269 return ops->transfer(dsi->host, &msg);
264} 270}
265EXPORT_SYMBOL(mipi_dsi_dcs_read); 271EXPORT_SYMBOL(mipi_dsi_dcs_read);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index bedf1894e17e..d1b7d2006529 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1259,6 +1259,7 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
1259 if (!mode) 1259 if (!mode)
1260 return NULL; 1260 return NULL;
1261 1261
1262 mode->type |= DRM_MODE_TYPE_USERDEF;
1262 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 1263 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1263 return mode; 1264 return mode;
1264} 1265}
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 3a02e5e3e9f3..474e4d12a2d8 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -57,6 +57,212 @@
57 57
58 58
59/** 59/**
60 * __drm_modeset_lock_all - internal helper to grab all modeset locks
61 * @dev: DRM device
62 * @trylock: trylock mode for atomic contexts
63 *
64 * This is a special version of drm_modeset_lock_all() which can also be used in
65 * atomic contexts. Then @trylock must be set to true.
66 *
67 * Returns:
68 * 0 on success or negative error code on failure.
69 */
70int __drm_modeset_lock_all(struct drm_device *dev,
71 bool trylock)
72{
73 struct drm_mode_config *config = &dev->mode_config;
74 struct drm_modeset_acquire_ctx *ctx;
75 int ret;
76
77 ctx = kzalloc(sizeof(*ctx),
78 trylock ? GFP_ATOMIC : GFP_KERNEL);
79 if (!ctx)
80 return -ENOMEM;
81
82 if (trylock) {
83 if (!mutex_trylock(&config->mutex))
84 return -EBUSY;
85 } else {
86 mutex_lock(&config->mutex);
87 }
88
89 drm_modeset_acquire_init(ctx, 0);
90 ctx->trylock_only = trylock;
91
92retry:
93 ret = drm_modeset_lock(&config->connection_mutex, ctx);
94 if (ret)
95 goto fail;
96 ret = drm_modeset_lock_all_crtcs(dev, ctx);
97 if (ret)
98 goto fail;
99
100 WARN_ON(config->acquire_ctx);
101
102 /* now we hold the locks, so now that it is safe, stash the
103 * ctx for drm_modeset_unlock_all():
104 */
105 config->acquire_ctx = ctx;
106
107 drm_warn_on_modeset_not_all_locked(dev);
108
109 return 0;
110
111fail:
112 if (ret == -EDEADLK) {
113 drm_modeset_backoff(ctx);
114 goto retry;
115 }
116
117 return ret;
118}
119EXPORT_SYMBOL(__drm_modeset_lock_all);
120
121/**
122 * drm_modeset_lock_all - take all modeset locks
123 * @dev: drm device
124 *
125 * This function takes all modeset locks, suitable where a more fine-grained
126 * scheme isn't (yet) implemented. Locks must be dropped with
127 * drm_modeset_unlock_all.
128 */
129void drm_modeset_lock_all(struct drm_device *dev)
130{
131 WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
132}
133EXPORT_SYMBOL(drm_modeset_lock_all);
134
135/**
136 * drm_modeset_unlock_all - drop all modeset locks
137 * @dev: device
138 *
139 * This function drop all modeset locks taken by drm_modeset_lock_all.
140 */
141void drm_modeset_unlock_all(struct drm_device *dev)
142{
143 struct drm_mode_config *config = &dev->mode_config;
144 struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
145
146 if (WARN_ON(!ctx))
147 return;
148
149 config->acquire_ctx = NULL;
150 drm_modeset_drop_locks(ctx);
151 drm_modeset_acquire_fini(ctx);
152
153 kfree(ctx);
154
155 mutex_unlock(&dev->mode_config.mutex);
156}
157EXPORT_SYMBOL(drm_modeset_unlock_all);
158
159/**
160 * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx
161 * @crtc: drm crtc
162 *
163 * This function locks the given crtc using a hidden acquire context. This is
164 * necessary so that drivers internally using the atomic interfaces can grab
165 * further locks with the lock acquire context.
166 */
167void drm_modeset_lock_crtc(struct drm_crtc *crtc)
168{
169 struct drm_modeset_acquire_ctx *ctx;
170 int ret;
171
172 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
173 if (WARN_ON(!ctx))
174 return;
175
176 drm_modeset_acquire_init(ctx, 0);
177
178retry:
179 ret = drm_modeset_lock(&crtc->mutex, ctx);
180 if (ret)
181 goto fail;
182
183 WARN_ON(crtc->acquire_ctx);
184
185 /* now we hold the locks, so now that it is safe, stash the
186 * ctx for drm_modeset_unlock_crtc():
187 */
188 crtc->acquire_ctx = ctx;
189
190 return;
191
192fail:
193 if (ret == -EDEADLK) {
194 drm_modeset_backoff(ctx);
195 goto retry;
196 }
197}
198EXPORT_SYMBOL(drm_modeset_lock_crtc);
199
200/**
201 * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
202 * @crtc: drm crtc
203 *
204 * Legacy ioctl operations like cursor updates or page flips only have per-crtc
205 * locking, and store the acquire ctx in the corresponding crtc. All other
206 * legacy operations take all locks and use a global acquire context. This
207 * function grabs the right one.
208 */
209struct drm_modeset_acquire_ctx *
210drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
211{
212 if (crtc->acquire_ctx)
213 return crtc->acquire_ctx;
214
215 WARN_ON(!crtc->dev->mode_config.acquire_ctx);
216
217 return crtc->dev->mode_config.acquire_ctx;
218}
219EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
220
221/**
222 * drm_modeset_unlock_crtc - drop crtc lock
223 * @crtc: drm crtc
224 *
225 * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
226 * locks acquired through the hidden context.
227 */
228void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
229{
230 struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
231
232 if (WARN_ON(!ctx))
233 return;
234
235 crtc->acquire_ctx = NULL;
236 drm_modeset_drop_locks(ctx);
237 drm_modeset_acquire_fini(ctx);
238
239 kfree(ctx);
240}
241EXPORT_SYMBOL(drm_modeset_unlock_crtc);
242
243/**
244 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
245 * @dev: device
246 *
247 * Useful as a debug assert.
248 */
249void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
250{
251 struct drm_crtc *crtc;
252
253 /* Locking is currently fubar in the panic handler. */
254 if (oops_in_progress)
255 return;
256
257 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
258 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
259
260 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
261 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
262}
263EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
264
265/**
60 * drm_modeset_acquire_init - initialize acquire context 266 * drm_modeset_acquire_init - initialize acquire context
61 * @ctx: the acquire context 267 * @ctx: the acquire context
62 * @flags: for future 268 * @flags: for future
@@ -108,7 +314,12 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
108 314
109 WARN_ON(ctx->contended); 315 WARN_ON(ctx->contended);
110 316
111 if (interruptible && slow) { 317 if (ctx->trylock_only) {
318 if (!ww_mutex_trylock(&lock->mutex))
319 return -EBUSY;
320 else
321 return 0;
322 } else if (interruptible && slow) {
112 ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx); 323 ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
113 } else if (interruptible) { 324 } else if (interruptible) {
114 ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx); 325 ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 020cfd934854..fd29f03645b8 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -27,6 +27,7 @@
27#include <linux/dma-mapping.h> 27#include <linux/dma-mapping.h>
28#include <linux/export.h> 28#include <linux/export.h>
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include "drm_legacy.h"
30 31
31/** 32/**
32 * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA. 33 * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
@@ -81,7 +82,7 @@ EXPORT_SYMBOL(drm_pci_alloc);
81 * 82 *
82 * This function is for internal use in the Linux-specific DRM core code. 83 * This function is for internal use in the Linux-specific DRM core code.
83 */ 84 */
84void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 85void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
85{ 86{
86 unsigned long addr; 87 unsigned long addr;
87 size_t sz; 88 size_t sz;
@@ -105,7 +106,7 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
105 */ 106 */
106void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 107void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
107{ 108{
108 __drm_pci_free(dev, dmah); 109 __drm_legacy_pci_free(dev, dmah);
109 kfree(dmah); 110 kfree(dmah);
110} 111}
111 112
@@ -127,34 +128,20 @@ static int drm_get_pci_domain(struct drm_device *dev)
127 return pci_domain_nr(dev->pdev->bus); 128 return pci_domain_nr(dev->pdev->bus);
128} 129}
129 130
130static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) 131int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
131{ 132{
132 int len, ret; 133 master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
133 master->unique_len = 40; 134 drm_get_pci_domain(dev),
134 master->unique_size = master->unique_len; 135 dev->pdev->bus->number,
135 master->unique = kmalloc(master->unique_size, GFP_KERNEL); 136 PCI_SLOT(dev->pdev->devfn),
136 if (master->unique == NULL) 137 PCI_FUNC(dev->pdev->devfn));
138 if (!master->unique)
137 return -ENOMEM; 139 return -ENOMEM;
138 140
139 141 master->unique_len = strlen(master->unique);
140 len = snprintf(master->unique, master->unique_len,
141 "pci:%04x:%02x:%02x.%d",
142 drm_get_pci_domain(dev),
143 dev->pdev->bus->number,
144 PCI_SLOT(dev->pdev->devfn),
145 PCI_FUNC(dev->pdev->devfn));
146
147 if (len >= master->unique_len) {
148 DRM_ERROR("buffer overflow");
149 ret = -EINVAL;
150 goto err;
151 } else
152 master->unique_len = len;
153
154 return 0; 142 return 0;
155err:
156 return ret;
157} 143}
144EXPORT_SYMBOL(drm_pci_set_busid);
158 145
159int drm_pci_set_unique(struct drm_device *dev, 146int drm_pci_set_unique(struct drm_device *dev,
160 struct drm_master *master, 147 struct drm_master *master,
@@ -163,8 +150,7 @@ int drm_pci_set_unique(struct drm_device *dev,
163 int domain, bus, slot, func, ret; 150 int domain, bus, slot, func, ret;
164 151
165 master->unique_len = u->unique_len; 152 master->unique_len = u->unique_len;
166 master->unique_size = u->unique_len + 1; 153 master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
167 master->unique = kmalloc(master->unique_size, GFP_KERNEL);
168 if (!master->unique) { 154 if (!master->unique) {
169 ret = -ENOMEM; 155 ret = -ENOMEM;
170 goto err; 156 goto err;
@@ -269,10 +255,6 @@ void drm_pci_agp_destroy(struct drm_device *dev)
269 } 255 }
270} 256}
271 257
272static struct drm_bus drm_pci_bus = {
273 .set_busid = drm_pci_set_busid,
274};
275
276/** 258/**
277 * drm_get_pci_dev - Register a PCI device with the DRM subsystem 259 * drm_get_pci_dev - Register a PCI device with the DRM subsystem
278 * @pdev: PCI device 260 * @pdev: PCI device
@@ -353,8 +335,6 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
353 335
354 DRM_DEBUG("\n"); 336 DRM_DEBUG("\n");
355 337
356 driver->bus = &drm_pci_bus;
357
358 if (driver->driver_features & DRIVER_MODESET) 338 if (driver->driver_features & DRIVER_MODESET)
359 return pci_register_driver(pdriver); 339 return pci_register_driver(pdriver);
360 340
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index d5b76f148c12..5314c9d5fef4 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -68,42 +68,23 @@ err_free:
68 return ret; 68 return ret;
69} 69}
70 70
71static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master) 71int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
72{ 72{
73 int len, ret, id; 73 int id;
74
75 master->unique_len = 13 + strlen(dev->platformdev->name);
76 master->unique_size = master->unique_len;
77 master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
78
79 if (master->unique == NULL)
80 return -ENOMEM;
81 74
82 id = dev->platformdev->id; 75 id = dev->platformdev->id;
83 76 if (id < 0)
84 /* if only a single instance of the platform device, id will be
85 * set to -1.. use 0 instead to avoid a funny looking bus-id:
86 */
87 if (id == -1)
88 id = 0; 77 id = 0;
89 78
90 len = snprintf(master->unique, master->unique_len, 79 master->unique = kasprintf(GFP_KERNEL, "platform:%s:%02d",
91 "platform:%s:%02d", dev->platformdev->name, id); 80 dev->platformdev->name, id);
92 81 if (!master->unique)
93 if (len > master->unique_len) { 82 return -ENOMEM;
94 DRM_ERROR("Unique buffer overflowed\n");
95 ret = -EINVAL;
96 goto err;
97 }
98 83
84 master->unique_len = strlen(master->unique);
99 return 0; 85 return 0;
100err:
101 return ret;
102} 86}
103 87EXPORT_SYMBOL(drm_platform_set_busid);
104static struct drm_bus drm_platform_bus = {
105 .set_busid = drm_platform_set_busid,
106};
107 88
108/** 89/**
109 * drm_platform_init - Register a platform device with the DRM subsystem 90 * drm_platform_init - Register a platform device with the DRM subsystem
@@ -120,7 +101,6 @@ int drm_platform_init(struct drm_driver *driver, struct platform_device *platfor
120{ 101{
121 DRM_DEBUG("\n"); 102 DRM_DEBUG("\n");
122 103
123 driver->bus = &drm_platform_bus;
124 return drm_get_platform_dev(platform_device, driver); 104 return drm_get_platform_dev(platform_device, driver);
125} 105}
126EXPORT_SYMBOL(drm_platform_init); 106EXPORT_SYMBOL(drm_platform_init);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 99d578bad17e..78ca30808422 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -29,6 +29,9 @@
29#include <linux/export.h> 29#include <linux/export.h>
30#include <linux/dma-buf.h> 30#include <linux/dma-buf.h>
31#include <drm/drmP.h> 31#include <drm/drmP.h>
32#include <drm/drm_gem.h>
33
34#include "drm_internal.h"
32 35
33/* 36/*
34 * DMA-BUF/GEM Object references and lifetime overview: 37 * DMA-BUF/GEM Object references and lifetime overview:
@@ -522,7 +525,7 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
522 goto fail_detach; 525 goto fail_detach;
523 } 526 }
524 527
525 obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt); 528 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
526 if (IS_ERR(obj)) { 529 if (IS_ERR(obj)) {
527 ret = PTR_ERR(obj); 530 ret = PTR_ERR(obj);
528 goto fail_unmap; 531 goto fail_unmap;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index db7d250f7ac7..6857e9ad6339 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -82,6 +82,22 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
82 return; 82 return;
83} 83}
84 84
85static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
86{
87 struct drm_display_mode *mode;
88
89 if (!connector->cmdline_mode.specified)
90 return 0;
91
92 mode = drm_mode_create_from_cmdline_mode(connector->dev,
93 &connector->cmdline_mode);
94 if (mode == NULL)
95 return 0;
96
97 drm_mode_probed_add(connector, mode);
98 return 1;
99}
100
85static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, 101static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
86 uint32_t maxX, uint32_t maxY, bool merge_type_bits) 102 uint32_t maxX, uint32_t maxY, bool merge_type_bits)
87{ 103{
@@ -141,6 +157,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
141 157
142 if (count == 0 && connector->status == connector_status_connected) 158 if (count == 0 && connector->status == connector_status_connected)
143 count = drm_add_modes_noedid(connector, 1024, 768); 159 count = drm_add_modes_noedid(connector, 1024, 768);
160 count += drm_helper_probe_add_cmdline_mode(connector);
144 if (count == 0) 161 if (count == 0)
145 goto prune; 162 goto prune;
146 163
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index 1c78406f6e71..4f0f3b36d537 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -34,6 +34,7 @@
34#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <drm/drmP.h> 36#include <drm/drmP.h>
37#include "drm_legacy.h"
37 38
38#define DEBUG_SCATTER 0 39#define DEBUG_SCATTER 0
39 40
@@ -78,8 +79,8 @@ void drm_legacy_sg_cleanup(struct drm_device *dev)
78# define ScatterHandle(x) (unsigned int)(x) 79# define ScatterHandle(x) (unsigned int)(x)
79#endif 80#endif
80 81
81int drm_sg_alloc(struct drm_device *dev, void *data, 82int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
82 struct drm_file *file_priv) 83 struct drm_file *file_priv)
83{ 84{
84 struct drm_scatter_gather *request = data; 85 struct drm_scatter_gather *request = data;
85 struct drm_sg_mem *entry; 86 struct drm_sg_mem *entry;
@@ -194,8 +195,8 @@ int drm_sg_alloc(struct drm_device *dev, void *data,
194 return -ENOMEM; 195 return -ENOMEM;
195} 196}
196 197
197int drm_sg_free(struct drm_device *dev, void *data, 198int drm_legacy_sg_free(struct drm_device *dev, void *data,
198 struct drm_file *file_priv) 199 struct drm_file *file_priv)
199{ 200{
200 struct drm_scatter_gather *request = data; 201 struct drm_scatter_gather *request = data;
201 struct drm_sg_mem *entry; 202 struct drm_sg_mem *entry;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index ab1a5f6dde8a..cc3d6d6d67e0 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -21,6 +21,7 @@
21#include <drm/drm_sysfs.h> 21#include <drm/drm_sysfs.h>
22#include <drm/drm_core.h> 22#include <drm/drm_core.h>
23#include <drm/drmP.h> 23#include <drm/drmP.h>
24#include "drm_internal.h"
24 25
25#define to_drm_minor(d) dev_get_drvdata(d) 26#define to_drm_minor(d) dev_get_drvdata(d)
26#define to_drm_connector(d) dev_get_drvdata(d) 27#define to_drm_connector(d) dev_get_drvdata(d)
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
deleted file mode 100644
index f2fe94aab901..000000000000
--- a/drivers/gpu/drm/drm_usb.c
+++ /dev/null
@@ -1,88 +0,0 @@
1#include <drm/drmP.h>
2#include <drm/drm_usb.h>
3#include <linux/usb.h>
4#include <linux/module.h>
5
6int drm_get_usb_dev(struct usb_interface *interface,
7 const struct usb_device_id *id,
8 struct drm_driver *driver)
9{
10 struct drm_device *dev;
11 int ret;
12
13 DRM_DEBUG("\n");
14
15 dev = drm_dev_alloc(driver, &interface->dev);
16 if (!dev)
17 return -ENOMEM;
18
19 dev->usbdev = interface_to_usbdev(interface);
20 usb_set_intfdata(interface, dev);
21
22 ret = drm_dev_register(dev, 0);
23 if (ret)
24 goto err_free;
25
26 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
27 driver->name, driver->major, driver->minor, driver->patchlevel,
28 driver->date, dev->primary->index);
29
30 return 0;
31
32err_free:
33 drm_dev_unref(dev);
34 return ret;
35
36}
37EXPORT_SYMBOL(drm_get_usb_dev);
38
39static int drm_usb_set_busid(struct drm_device *dev,
40 struct drm_master *master)
41{
42 return 0;
43}
44
45static struct drm_bus drm_usb_bus = {
46 .set_busid = drm_usb_set_busid,
47};
48
49/**
50 * drm_usb_init - Register matching USB devices with the DRM subsystem
51 * @driver: DRM device driver
52 * @udriver: USB device driver
53 *
54 * Registers one or more devices matched by a USB driver with the DRM
55 * subsystem.
56 *
57 * Return: 0 on success or a negative error code on failure.
58 */
59int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
60{
61 int res;
62 DRM_DEBUG("\n");
63
64 driver->bus = &drm_usb_bus;
65
66 res = usb_register(udriver);
67 return res;
68}
69EXPORT_SYMBOL(drm_usb_init);
70
71/**
72 * drm_usb_exit - Unregister matching USB devices from the DRM subsystem
73 * @driver: DRM device driver
74 * @udriver: USB device driver
75 *
76 * Unregisters one or more devices matched by a USB driver from the DRM
77 * subsystem.
78 */
79void drm_usb_exit(struct drm_driver *driver,
80 struct usb_driver *udriver)
81{
82 usb_deregister(udriver);
83}
84EXPORT_SYMBOL(drm_usb_exit);
85
86MODULE_AUTHOR("David Airlie");
87MODULE_DESCRIPTION("USB DRM support");
88MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 24e045c4f531..4a2c328959e5 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -35,10 +35,19 @@
35 35
36#include <drm/drmP.h> 36#include <drm/drmP.h>
37#include <linux/export.h> 37#include <linux/export.h>
38#include <linux/seq_file.h>
38#if defined(__ia64__) 39#if defined(__ia64__)
39#include <linux/efi.h> 40#include <linux/efi.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
41#endif 42#endif
43#include <asm/pgtable.h>
44#include "drm_legacy.h"
45
46struct drm_vma_entry {
47 struct list_head head;
48 struct vm_area_struct *vma;
49 pid_t pid;
50};
42 51
43static void drm_vm_open(struct vm_area_struct *vma); 52static void drm_vm_open(struct vm_area_struct *vma);
44static void drm_vm_close(struct vm_area_struct *vma); 53static void drm_vm_close(struct vm_area_struct *vma);
@@ -48,15 +57,11 @@ static pgprot_t drm_io_prot(struct drm_local_map *map,
48{ 57{
49 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); 58 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
50 59
51#if defined(__i386__) || defined(__x86_64__) 60#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
52 if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING)) 61 if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
53 tmp = pgprot_noncached(tmp); 62 tmp = pgprot_noncached(tmp);
54 else 63 else
55 tmp = pgprot_writecombine(tmp); 64 tmp = pgprot_writecombine(tmp);
56#elif defined(__powerpc__)
57 pgprot_val(tmp) |= _PAGE_NO_CACHE;
58 if (map->type == _DRM_REGISTERS)
59 pgprot_val(tmp) |= _PAGE_GUARDED;
60#elif defined(__ia64__) 65#elif defined(__ia64__)
61 if (efi_range_is_wc(vma->vm_start, vma->vm_end - 66 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
62 vma->vm_start)) 67 vma->vm_start))
@@ -263,7 +268,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
263 dmah.vaddr = map->handle; 268 dmah.vaddr = map->handle;
264 dmah.busaddr = map->offset; 269 dmah.busaddr = map->offset;
265 dmah.size = map->size; 270 dmah.size = map->size;
266 __drm_pci_free(dev, &dmah); 271 __drm_legacy_pci_free(dev, &dmah);
267 break; 272 break;
268 } 273 }
269 kfree(map); 274 kfree(map);
@@ -412,7 +417,6 @@ void drm_vm_open_locked(struct drm_device *dev,
412 list_add(&vma_entry->head, &dev->vmalist); 417 list_add(&vma_entry->head, &dev->vmalist);
413 } 418 }
414} 419}
415EXPORT_SYMBOL_GPL(drm_vm_open_locked);
416 420
417static void drm_vm_open(struct vm_area_struct *vma) 421static void drm_vm_open(struct vm_area_struct *vma)
418{ 422{
@@ -532,7 +536,7 @@ static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
532 * according to the mapping type and remaps the pages. Finally sets the file 536 * according to the mapping type and remaps the pages. Finally sets the file
533 * pointer and calls vm_open(). 537 * pointer and calls vm_open().
534 */ 538 */
535int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) 539static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
536{ 540{
537 struct drm_file *priv = filp->private_data; 541 struct drm_file *priv = filp->private_data;
538 struct drm_device *dev = priv->minor->dev; 542 struct drm_device *dev = priv->minor->dev;
@@ -646,7 +650,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
646 return 0; 650 return 0;
647} 651}
648 652
649int drm_mmap(struct file *filp, struct vm_area_struct *vma) 653int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
650{ 654{
651 struct drm_file *priv = filp->private_data; 655 struct drm_file *priv = filp->private_data;
652 struct drm_device *dev = priv->minor->dev; 656 struct drm_device *dev = priv->minor->dev;
@@ -661,4 +665,69 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
661 665
662 return ret; 666 return ret;
663} 667}
664EXPORT_SYMBOL(drm_mmap); 668EXPORT_SYMBOL(drm_legacy_mmap);
669
670void drm_legacy_vma_flush(struct drm_device *dev)
671{
672 struct drm_vma_entry *vma, *vma_temp;
673
674 /* Clear vma list (only needed for legacy drivers) */
675 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
676 list_del(&vma->head);
677 kfree(vma);
678 }
679}
680
681int drm_vma_info(struct seq_file *m, void *data)
682{
683 struct drm_info_node *node = (struct drm_info_node *) m->private;
684 struct drm_device *dev = node->minor->dev;
685 struct drm_vma_entry *pt;
686 struct vm_area_struct *vma;
687 unsigned long vma_count = 0;
688#if defined(__i386__)
689 unsigned int pgprot;
690#endif
691
692 mutex_lock(&dev->struct_mutex);
693 list_for_each_entry(pt, &dev->vmalist, head)
694 vma_count++;
695
696 seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
697 vma_count, high_memory,
698 (void *)(unsigned long)virt_to_phys(high_memory));
699
700 list_for_each_entry(pt, &dev->vmalist, head) {
701 vma = pt->vma;
702 if (!vma)
703 continue;
704 seq_printf(m,
705 "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
706 pt->pid,
707 (void *)vma->vm_start, (void *)vma->vm_end,
708 vma->vm_flags & VM_READ ? 'r' : '-',
709 vma->vm_flags & VM_WRITE ? 'w' : '-',
710 vma->vm_flags & VM_EXEC ? 'x' : '-',
711 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
712 vma->vm_flags & VM_LOCKED ? 'l' : '-',
713 vma->vm_flags & VM_IO ? 'i' : '-',
714 vma->vm_pgoff);
715
716#if defined(__i386__)
717 pgprot = pgprot_val(vma->vm_page_prot);
718 seq_printf(m, " %c%c%c%c%c%c%c%c%c",
719 pgprot & _PAGE_PRESENT ? 'p' : '-',
720 pgprot & _PAGE_RW ? 'w' : 'r',
721 pgprot & _PAGE_USER ? 'u' : 's',
722 pgprot & _PAGE_PWT ? 't' : 'b',
723 pgprot & _PAGE_PCD ? 'u' : 'c',
724 pgprot & _PAGE_ACCESSED ? 'a' : '-',
725 pgprot & _PAGE_DIRTY ? 'd' : '-',
726 pgprot & _PAGE_PSE ? 'm' : 'k',
727 pgprot & _PAGE_GLOBAL ? 'g' : 'l');
728#endif
729 seq_printf(m, "\n");
730 }
731 mutex_unlock(&dev->struct_mutex);
732 return 0;
733}
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 4f3c7eb2d37d..cd50ece31601 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -329,8 +329,8 @@ static int exynos_dp_link_start(struct exynos_dp_device *dp)
329 return retval; 329 return retval;
330 330
331 for (lane = 0; lane < lane_count; lane++) 331 for (lane = 0; lane < lane_count; lane++)
332 buf[lane] = DP_TRAIN_PRE_EMPHASIS_0 | 332 buf[lane] = DP_TRAIN_PRE_EMPH_LEVEL_0 |
333 DP_TRAIN_VOLTAGE_SWING_400; 333 DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
334 334
335 retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET, 335 retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
336 lane_count, buf); 336 lane_count, buf);
@@ -937,6 +937,8 @@ static enum drm_connector_status exynos_dp_detect(
937 937
938static void exynos_dp_connector_destroy(struct drm_connector *connector) 938static void exynos_dp_connector_destroy(struct drm_connector *connector)
939{ 939{
940 drm_connector_unregister(connector);
941 drm_connector_cleanup(connector);
940} 942}
941 943
942static struct drm_connector_funcs exynos_dp_connector_funcs = { 944static struct drm_connector_funcs exynos_dp_connector_funcs = {
@@ -1358,8 +1360,8 @@ static void exynos_dp_unbind(struct device *dev, struct device *master,
1358 1360
1359 exynos_dp_dpms(display, DRM_MODE_DPMS_OFF); 1361 exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
1360 1362
1363 exynos_dp_connector_destroy(&dp->connector);
1361 encoder->funcs->destroy(encoder); 1364 encoder->funcs->destroy(encoder);
1362 drm_connector_cleanup(&dp->connector);
1363} 1365}
1364 1366
1365static const struct component_ops exynos_dp_ops = { 1367static const struct component_ops exynos_dp_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index b68e58f78cd1..8e38e9f8e542 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -32,7 +32,6 @@ enum exynos_crtc_mode {
32 * Exynos specific crtc structure. 32 * Exynos specific crtc structure.
33 * 33 *
34 * @drm_crtc: crtc object. 34 * @drm_crtc: crtc object.
35 * @drm_plane: pointer of private plane object for this crtc
36 * @manager: the manager associated with this crtc 35 * @manager: the manager associated with this crtc
37 * @pipe: a crtc index created at load() with a new crtc object creation 36 * @pipe: a crtc index created at load() with a new crtc object creation
38 * and the crtc object would be set to private->crtc array 37 * and the crtc object would be set to private->crtc array
@@ -46,7 +45,6 @@ enum exynos_crtc_mode {
46 */ 45 */
47struct exynos_drm_crtc { 46struct exynos_drm_crtc {
48 struct drm_crtc drm_crtc; 47 struct drm_crtc drm_crtc;
49 struct drm_plane *plane;
50 struct exynos_drm_manager *manager; 48 struct exynos_drm_manager *manager;
51 unsigned int pipe; 49 unsigned int pipe;
52 unsigned int dpms; 50 unsigned int dpms;
@@ -94,12 +92,12 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
94 92
95 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 93 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
96 94
97 exynos_plane_commit(exynos_crtc->plane); 95 exynos_plane_commit(crtc->primary);
98 96
99 if (manager->ops->commit) 97 if (manager->ops->commit)
100 manager->ops->commit(manager); 98 manager->ops->commit(manager);
101 99
102 exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON); 100 exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_ON);
103} 101}
104 102
105static bool 103static bool
@@ -123,10 +121,9 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
123{ 121{
124 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 122 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
125 struct exynos_drm_manager *manager = exynos_crtc->manager; 123 struct exynos_drm_manager *manager = exynos_crtc->manager;
126 struct drm_plane *plane = exynos_crtc->plane; 124 struct drm_framebuffer *fb = crtc->primary->fb;
127 unsigned int crtc_w; 125 unsigned int crtc_w;
128 unsigned int crtc_h; 126 unsigned int crtc_h;
129 int ret;
130 127
131 /* 128 /*
132 * copy the mode data adjusted by mode_fixup() into crtc->mode 129 * copy the mode data adjusted by mode_fixup() into crtc->mode
@@ -134,29 +131,21 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
134 */ 131 */
135 memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode)); 132 memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
136 133
137 crtc_w = crtc->primary->fb->width - x; 134 crtc_w = fb->width - x;
138 crtc_h = crtc->primary->fb->height - y; 135 crtc_h = fb->height - y;
139 136
140 if (manager->ops->mode_set) 137 if (manager->ops->mode_set)
141 manager->ops->mode_set(manager, &crtc->mode); 138 manager->ops->mode_set(manager, &crtc->mode);
142 139
143 ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h, 140 return exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
144 x, y, crtc_w, crtc_h); 141 crtc_w, crtc_h, x, y, crtc_w, crtc_h);
145 if (ret)
146 return ret;
147
148 plane->crtc = crtc;
149 plane->fb = crtc->primary->fb;
150 drm_framebuffer_reference(plane->fb);
151
152 return 0;
153} 142}
154 143
155static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y, 144static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
156 struct drm_framebuffer *old_fb) 145 struct drm_framebuffer *old_fb)
157{ 146{
158 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 147 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
159 struct drm_plane *plane = exynos_crtc->plane; 148 struct drm_framebuffer *fb = crtc->primary->fb;
160 unsigned int crtc_w; 149 unsigned int crtc_w;
161 unsigned int crtc_h; 150 unsigned int crtc_h;
162 int ret; 151 int ret;
@@ -167,11 +156,11 @@ static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
167 return -EPERM; 156 return -EPERM;
168 } 157 }
169 158
170 crtc_w = crtc->primary->fb->width - x; 159 crtc_w = fb->width - x;
171 crtc_h = crtc->primary->fb->height - y; 160 crtc_h = fb->height - y;
172 161
173 ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h, 162 ret = exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
174 x, y, crtc_w, crtc_h); 163 crtc_w, crtc_h, x, y, crtc_w, crtc_h);
175 if (ret) 164 if (ret)
176 return ret; 165 return ret;
177 166
@@ -304,8 +293,7 @@ static int exynos_drm_crtc_set_property(struct drm_crtc *crtc,
304 exynos_drm_crtc_commit(crtc); 293 exynos_drm_crtc_commit(crtc);
305 break; 294 break;
306 case CRTC_MODE_BLANK: 295 case CRTC_MODE_BLANK:
307 exynos_plane_dpms(exynos_crtc->plane, 296 exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_OFF);
308 DRM_MODE_DPMS_OFF);
309 break; 297 break;
310 default: 298 default:
311 break; 299 break;
@@ -351,8 +339,10 @@ static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
351int exynos_drm_crtc_create(struct exynos_drm_manager *manager) 339int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
352{ 340{
353 struct exynos_drm_crtc *exynos_crtc; 341 struct exynos_drm_crtc *exynos_crtc;
342 struct drm_plane *plane;
354 struct exynos_drm_private *private = manager->drm_dev->dev_private; 343 struct exynos_drm_private *private = manager->drm_dev->dev_private;
355 struct drm_crtc *crtc; 344 struct drm_crtc *crtc;
345 int ret;
356 346
357 exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL); 347 exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
358 if (!exynos_crtc) 348 if (!exynos_crtc)
@@ -364,11 +354,11 @@ int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
364 exynos_crtc->dpms = DRM_MODE_DPMS_OFF; 354 exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
365 exynos_crtc->manager = manager; 355 exynos_crtc->manager = manager;
366 exynos_crtc->pipe = manager->pipe; 356 exynos_crtc->pipe = manager->pipe;
367 exynos_crtc->plane = exynos_plane_init(manager->drm_dev, 357 plane = exynos_plane_init(manager->drm_dev, 1 << manager->pipe,
368 1 << manager->pipe, true); 358 DRM_PLANE_TYPE_PRIMARY);
369 if (!exynos_crtc->plane) { 359 if (IS_ERR(plane)) {
370 kfree(exynos_crtc); 360 ret = PTR_ERR(plane);
371 return -ENOMEM; 361 goto err_plane;
372 } 362 }
373 363
374 manager->crtc = &exynos_crtc->drm_crtc; 364 manager->crtc = &exynos_crtc->drm_crtc;
@@ -376,12 +366,22 @@ int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
376 366
377 private->crtc[manager->pipe] = crtc; 367 private->crtc[manager->pipe] = crtc;
378 368
379 drm_crtc_init(manager->drm_dev, crtc, &exynos_crtc_funcs); 369 ret = drm_crtc_init_with_planes(manager->drm_dev, crtc, plane, NULL,
370 &exynos_crtc_funcs);
371 if (ret < 0)
372 goto err_crtc;
373
380 drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs); 374 drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
381 375
382 exynos_drm_crtc_attach_mode_property(crtc); 376 exynos_drm_crtc_attach_mode_property(crtc);
383 377
384 return 0; 378 return 0;
379
380err_crtc:
381 plane->funcs->destroy(plane);
382err_plane:
383 kfree(exynos_crtc);
384 return ret;
385} 385}
386 386
387int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe) 387int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index fa08f05e3e34..96c87db388fb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -342,8 +342,12 @@ int exynos_dpi_remove(struct device *dev)
342 struct exynos_dpi *ctx = exynos_dpi_display.ctx; 342 struct exynos_dpi *ctx = exynos_dpi_display.ctx;
343 343
344 exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF); 344 exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
345
346 exynos_dpi_connector_destroy(&ctx->connector);
345 encoder->funcs->destroy(encoder); 347 encoder->funcs->destroy(encoder);
346 drm_connector_cleanup(&ctx->connector); 348
349 if (ctx->panel)
350 drm_panel_detach(ctx->panel);
347 351
348 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR); 352 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
349 353
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 0d74e9b99c4e..443a2069858a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -15,7 +15,6 @@
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17 17
18#include <linux/anon_inodes.h>
19#include <linux/component.h> 18#include <linux/component.h>
20 19
21#include <drm/exynos_drm.h> 20#include <drm/exynos_drm.h>
@@ -86,8 +85,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
86 struct drm_plane *plane; 85 struct drm_plane *plane;
87 unsigned long possible_crtcs = (1 << MAX_CRTC) - 1; 86 unsigned long possible_crtcs = (1 << MAX_CRTC) - 1;
88 87
89 plane = exynos_plane_init(dev, possible_crtcs, false); 88 plane = exynos_plane_init(dev, possible_crtcs,
90 if (!plane) 89 DRM_PLANE_TYPE_OVERLAY);
90 if (IS_ERR(plane))
91 goto err_mode_config_cleanup; 91 goto err_mode_config_cleanup;
92 } 92 }
93 93
@@ -116,6 +116,23 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
116 /* force connectors detection */ 116 /* force connectors detection */
117 drm_helper_hpd_irq_event(dev); 117 drm_helper_hpd_irq_event(dev);
118 118
119 /*
120 * enable drm irq mode.
121 * - with irq_enabled = true, we can use the vblank feature.
122 *
123 * P.S. note that we wouldn't use drm irq handler but
124 * just specific driver own one instead because
125 * drm framework supports only one irq handler.
126 */
127 dev->irq_enabled = true;
128
129 /*
130 * with vblank_disable_allowed = true, vblank interrupt will be disabled
131 * by drm timer once a current process gives up ownership of
132 * vblank event.(after drm_vblank_put function is called)
133 */
134 dev->vblank_disable_allowed = true;
135
119 return 0; 136 return 0;
120 137
121err_unbind_all: 138err_unbind_all:
@@ -136,23 +153,19 @@ static int exynos_drm_unload(struct drm_device *dev)
136 exynos_drm_device_subdrv_remove(dev); 153 exynos_drm_device_subdrv_remove(dev);
137 154
138 exynos_drm_fbdev_fini(dev); 155 exynos_drm_fbdev_fini(dev);
139 drm_vblank_cleanup(dev);
140 drm_kms_helper_poll_fini(dev); 156 drm_kms_helper_poll_fini(dev);
141 drm_mode_config_cleanup(dev);
142 157
158 component_unbind_all(dev->dev, dev);
159 drm_vblank_cleanup(dev);
160 drm_mode_config_cleanup(dev);
143 drm_release_iommu_mapping(dev); 161 drm_release_iommu_mapping(dev);
144 kfree(dev->dev_private);
145 162
146 component_unbind_all(dev->dev, dev); 163 kfree(dev->dev_private);
147 dev->dev_private = NULL; 164 dev->dev_private = NULL;
148 165
149 return 0; 166 return 0;
150} 167}
151 168
152static const struct file_operations exynos_drm_gem_fops = {
153 .mmap = exynos_drm_gem_mmap_buffer,
154};
155
156static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) 169static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
157{ 170{
158 struct drm_connector *connector; 171 struct drm_connector *connector;
@@ -191,7 +204,6 @@ static int exynos_drm_resume(struct drm_device *dev)
191static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 204static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
192{ 205{
193 struct drm_exynos_file_private *file_priv; 206 struct drm_exynos_file_private *file_priv;
194 struct file *anon_filp;
195 int ret; 207 int ret;
196 208
197 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 209 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
@@ -204,21 +216,8 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
204 if (ret) 216 if (ret)
205 goto err_file_priv_free; 217 goto err_file_priv_free;
206 218
207 anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
208 NULL, 0);
209 if (IS_ERR(anon_filp)) {
210 ret = PTR_ERR(anon_filp);
211 goto err_subdrv_close;
212 }
213
214 anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
215 file_priv->anon_filp = anon_filp;
216
217 return ret; 219 return ret;
218 220
219err_subdrv_close:
220 exynos_drm_subdrv_close(dev, file);
221
222err_file_priv_free: 221err_file_priv_free:
223 kfree(file_priv); 222 kfree(file_priv);
224 file->driver_priv = NULL; 223 file->driver_priv = NULL;
@@ -234,7 +233,6 @@ static void exynos_drm_preclose(struct drm_device *dev,
234static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) 233static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
235{ 234{
236 struct exynos_drm_private *private = dev->dev_private; 235 struct exynos_drm_private *private = dev->dev_private;
237 struct drm_exynos_file_private *file_priv;
238 struct drm_pending_vblank_event *v, *vt; 236 struct drm_pending_vblank_event *v, *vt;
239 struct drm_pending_event *e, *et; 237 struct drm_pending_event *e, *et;
240 unsigned long flags; 238 unsigned long flags;
@@ -260,10 +258,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
260 } 258 }
261 spin_unlock_irqrestore(&dev->event_lock, flags); 259 spin_unlock_irqrestore(&dev->event_lock, flags);
262 260
263 file_priv = file->driver_priv;
264 if (file_priv->anon_filp)
265 fput(file_priv->anon_filp);
266
267 kfree(file->driver_priv); 261 kfree(file->driver_priv);
268 file->driver_priv = NULL; 262 file->driver_priv = NULL;
269} 263}
@@ -282,11 +276,6 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
282static const struct drm_ioctl_desc exynos_ioctls[] = { 276static const struct drm_ioctl_desc exynos_ioctls[] = {
283 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, 277 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
284 DRM_UNLOCKED | DRM_AUTH), 278 DRM_UNLOCKED | DRM_AUTH),
285 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
286 exynos_drm_gem_map_offset_ioctl, DRM_UNLOCKED |
287 DRM_AUTH),
288 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
289 exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
290 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, 279 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
291 exynos_drm_gem_get_ioctl, DRM_UNLOCKED), 280 exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
292 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, 281 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
@@ -330,6 +319,7 @@ static struct drm_driver exynos_drm_driver = {
330 .preclose = exynos_drm_preclose, 319 .preclose = exynos_drm_preclose,
331 .lastclose = exynos_drm_lastclose, 320 .lastclose = exynos_drm_lastclose,
332 .postclose = exynos_drm_postclose, 321 .postclose = exynos_drm_postclose,
322 .set_busid = drm_platform_set_busid,
333 .get_vblank_counter = drm_vblank_count, 323 .get_vblank_counter = drm_vblank_count,
334 .enable_vblank = exynos_drm_crtc_enable_vblank, 324 .enable_vblank = exynos_drm_crtc_enable_vblank,
335 .disable_vblank = exynos_drm_crtc_disable_vblank, 325 .disable_vblank = exynos_drm_crtc_disable_vblank,
@@ -485,21 +475,20 @@ void exynos_drm_component_del(struct device *dev,
485 mutex_unlock(&drm_component_lock); 475 mutex_unlock(&drm_component_lock);
486} 476}
487 477
488static int compare_of(struct device *dev, void *data) 478static int compare_dev(struct device *dev, void *data)
489{ 479{
490 return dev == (struct device *)data; 480 return dev == (struct device *)data;
491} 481}
492 482
493static int exynos_drm_add_components(struct device *dev, struct master *m) 483static struct component_match *exynos_drm_match_add(struct device *dev)
494{ 484{
485 struct component_match *match = NULL;
495 struct component_dev *cdev; 486 struct component_dev *cdev;
496 unsigned int attach_cnt = 0; 487 unsigned int attach_cnt = 0;
497 488
498 mutex_lock(&drm_component_lock); 489 mutex_lock(&drm_component_lock);
499 490
500 list_for_each_entry(cdev, &drm_component_list, list) { 491 list_for_each_entry(cdev, &drm_component_list, list) {
501 int ret;
502
503 /* 492 /*
504 * Add components to master only in case that crtc and 493 * Add components to master only in case that crtc and
505 * encoder/connector device objects exist. 494 * encoder/connector device objects exist.
@@ -514,16 +503,10 @@ static int exynos_drm_add_components(struct device *dev, struct master *m)
514 /* 503 /*
515 * fimd and dpi modules have same device object so add 504 * fimd and dpi modules have same device object so add
516 * only crtc device object in this case. 505 * only crtc device object in this case.
517 *
518 * TODO. if dpi module follows driver-model driver then
519 * below codes can be removed.
520 */ 506 */
521 if (cdev->crtc_dev == cdev->conn_dev) { 507 if (cdev->crtc_dev == cdev->conn_dev) {
522 ret = component_master_add_child(m, compare_of, 508 component_match_add(dev, &match, compare_dev,
523 cdev->crtc_dev); 509 cdev->crtc_dev);
524 if (ret < 0)
525 return ret;
526
527 goto out_lock; 510 goto out_lock;
528 } 511 }
529 512
@@ -533,11 +516,8 @@ static int exynos_drm_add_components(struct device *dev, struct master *m)
533 * connector/encoder need pipe number of crtc when they 516 * connector/encoder need pipe number of crtc when they
534 * are created. 517 * are created.
535 */ 518 */
536 ret = component_master_add_child(m, compare_of, cdev->crtc_dev); 519 component_match_add(dev, &match, compare_dev, cdev->crtc_dev);
537 ret |= component_master_add_child(m, compare_of, 520 component_match_add(dev, &match, compare_dev, cdev->conn_dev);
538 cdev->conn_dev);
539 if (ret < 0)
540 return ret;
541 521
542out_lock: 522out_lock:
543 mutex_lock(&drm_component_lock); 523 mutex_lock(&drm_component_lock);
@@ -545,7 +525,7 @@ out_lock:
545 525
546 mutex_unlock(&drm_component_lock); 526 mutex_unlock(&drm_component_lock);
547 527
548 return attach_cnt ? 0 : -ENODEV; 528 return attach_cnt ? match : ERR_PTR(-EPROBE_DEFER);
549} 529}
550 530
551static int exynos_drm_bind(struct device *dev) 531static int exynos_drm_bind(struct device *dev)
@@ -559,13 +539,13 @@ static void exynos_drm_unbind(struct device *dev)
559} 539}
560 540
561static const struct component_master_ops exynos_drm_ops = { 541static const struct component_master_ops exynos_drm_ops = {
562 .add_components = exynos_drm_add_components,
563 .bind = exynos_drm_bind, 542 .bind = exynos_drm_bind,
564 .unbind = exynos_drm_unbind, 543 .unbind = exynos_drm_unbind,
565}; 544};
566 545
567static int exynos_drm_platform_probe(struct platform_device *pdev) 546static int exynos_drm_platform_probe(struct platform_device *pdev)
568{ 547{
548 struct component_match *match;
569 int ret; 549 int ret;
570 550
571 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 551 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
@@ -632,13 +612,23 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
632 goto err_unregister_ipp_drv; 612 goto err_unregister_ipp_drv;
633#endif 613#endif
634 614
635 ret = component_master_add(&pdev->dev, &exynos_drm_ops); 615 match = exynos_drm_match_add(&pdev->dev);
616 if (IS_ERR(match)) {
617 ret = PTR_ERR(match);
618 goto err_unregister_resources;
619 }
620
621 ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
622 match);
636 if (ret < 0) 623 if (ret < 0)
637 DRM_DEBUG_KMS("re-tried by last sub driver probed later.\n"); 624 goto err_unregister_resources;
638 625
639 return 0; 626 return ret;
627
628err_unregister_resources:
640 629
641#ifdef CONFIG_DRM_EXYNOS_IPP 630#ifdef CONFIG_DRM_EXYNOS_IPP
631 exynos_platform_device_ipp_unregister();
642err_unregister_ipp_drv: 632err_unregister_ipp_drv:
643 platform_driver_unregister(&ipp_driver); 633 platform_driver_unregister(&ipp_driver);
644err_unregister_gsc_drv: 634err_unregister_gsc_drv:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 69a6fa397d75..d22e640f59a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -240,7 +240,6 @@ struct exynos_drm_g2d_private {
240struct drm_exynos_file_private { 240struct drm_exynos_file_private {
241 struct exynos_drm_g2d_private *g2d_priv; 241 struct exynos_drm_g2d_private *g2d_priv;
242 struct device *ipp_dev; 242 struct device *ipp_dev;
243 struct file *anon_filp;
244}; 243};
245 244
246/* 245/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 442aa2d00132..24741d8758e8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -114,6 +114,8 @@
114#define DSIM_SYNC_INFORM (1 << 27) 114#define DSIM_SYNC_INFORM (1 << 27)
115#define DSIM_EOT_DISABLE (1 << 28) 115#define DSIM_EOT_DISABLE (1 << 28)
116#define DSIM_MFLUSH_VS (1 << 29) 116#define DSIM_MFLUSH_VS (1 << 29)
117/* This flag is valid only for exynos3250/3472/4415/5260/5430 */
118#define DSIM_CLKLANE_STOP (1 << 30)
117 119
118/* DSIM_ESCMODE */ 120/* DSIM_ESCMODE */
119#define DSIM_TX_TRIGGER_RST (1 << 4) 121#define DSIM_TX_TRIGGER_RST (1 << 4)
@@ -262,6 +264,7 @@ struct exynos_dsi_driver_data {
262 unsigned int plltmr_reg; 264 unsigned int plltmr_reg;
263 265
264 unsigned int has_freqband:1; 266 unsigned int has_freqband:1;
267 unsigned int has_clklane_stop:1;
265}; 268};
266 269
267struct exynos_dsi { 270struct exynos_dsi {
@@ -301,9 +304,16 @@ struct exynos_dsi {
301#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host) 304#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
302#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector) 305#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
303 306
307static struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
308 .plltmr_reg = 0x50,
309 .has_freqband = 1,
310 .has_clklane_stop = 1,
311};
312
304static struct exynos_dsi_driver_data exynos4_dsi_driver_data = { 313static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
305 .plltmr_reg = 0x50, 314 .plltmr_reg = 0x50,
306 .has_freqband = 1, 315 .has_freqband = 1,
316 .has_clklane_stop = 1,
307}; 317};
308 318
309static struct exynos_dsi_driver_data exynos5_dsi_driver_data = { 319static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
@@ -311,6 +321,8 @@ static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
311}; 321};
312 322
313static struct of_device_id exynos_dsi_of_match[] = { 323static struct of_device_id exynos_dsi_of_match[] = {
324 { .compatible = "samsung,exynos3250-mipi-dsi",
325 .data = &exynos3_dsi_driver_data },
314 { .compatible = "samsung,exynos4210-mipi-dsi", 326 { .compatible = "samsung,exynos4210-mipi-dsi",
315 .data = &exynos4_dsi_driver_data }, 327 .data = &exynos4_dsi_driver_data },
316 { .compatible = "samsung,exynos5410-mipi-dsi", 328 { .compatible = "samsung,exynos5410-mipi-dsi",
@@ -421,7 +433,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
421 if (!fout) { 433 if (!fout) {
422 dev_err(dsi->dev, 434 dev_err(dsi->dev,
423 "failed to find PLL PMS for requested frequency\n"); 435 "failed to find PLL PMS for requested frequency\n");
424 return -EFAULT; 436 return 0;
425 } 437 }
426 dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s); 438 dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
427 439
@@ -453,7 +465,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
453 do { 465 do {
454 if (timeout-- == 0) { 466 if (timeout-- == 0) {
455 dev_err(dsi->dev, "PLL failed to stabilize\n"); 467 dev_err(dsi->dev, "PLL failed to stabilize\n");
456 return -EFAULT; 468 return 0;
457 } 469 }
458 reg = readl(dsi->reg_base + DSIM_STATUS_REG); 470 reg = readl(dsi->reg_base + DSIM_STATUS_REG);
459 } while ((reg & DSIM_PLL_STABLE) == 0); 471 } while ((reg & DSIM_PLL_STABLE) == 0);
@@ -569,6 +581,7 @@ static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
569 581
570static int exynos_dsi_init_link(struct exynos_dsi *dsi) 582static int exynos_dsi_init_link(struct exynos_dsi *dsi)
571{ 583{
584 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
572 int timeout; 585 int timeout;
573 u32 reg; 586 u32 reg;
574 u32 lanes_mask; 587 u32 lanes_mask;
@@ -650,6 +663,20 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
650 reg |= DSIM_LANE_EN(lanes_mask); 663 reg |= DSIM_LANE_EN(lanes_mask);
651 writel(reg, dsi->reg_base + DSIM_CONFIG_REG); 664 writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
652 665
666 /*
667 * Use non-continuous clock mode if the periparal wants and
668 * host controller supports
669 *
670 * In non-continous clock mode, host controller will turn off
671 * the HS clock between high-speed transmissions to reduce
672 * power consumption.
673 */
674 if (driver_data->has_clklane_stop &&
675 dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
676 reg |= DSIM_CLKLANE_STOP;
677 writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
678 }
679
653 /* Check clock and data lane state are stop state */ 680 /* Check clock and data lane state are stop state */
654 timeout = 100; 681 timeout = 100;
655 do { 682 do {
@@ -1414,6 +1441,9 @@ exynos_dsi_detect(struct drm_connector *connector, bool force)
1414 1441
1415static void exynos_dsi_connector_destroy(struct drm_connector *connector) 1442static void exynos_dsi_connector_destroy(struct drm_connector *connector)
1416{ 1443{
1444 drm_connector_unregister(connector);
1445 drm_connector_cleanup(connector);
1446 connector->dev = NULL;
1417} 1447}
1418 1448
1419static struct drm_connector_funcs exynos_dsi_connector_funcs = { 1449static struct drm_connector_funcs exynos_dsi_connector_funcs = {
@@ -1634,10 +1664,10 @@ static void exynos_dsi_unbind(struct device *dev, struct device *master,
1634 1664
1635 exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF); 1665 exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
1636 1666
1637 mipi_dsi_host_unregister(&dsi->dsi_host); 1667 exynos_dsi_connector_destroy(&dsi->connector);
1638
1639 encoder->funcs->destroy(encoder); 1668 encoder->funcs->destroy(encoder);
1640 drm_connector_cleanup(&dsi->connector); 1669
1670 mipi_dsi_host_unregister(&dsi->dsi_host);
1641} 1671}
1642 1672
1643static const struct component_ops exynos_dsi_component_ops = { 1673static const struct component_ops exynos_dsi_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 65a22cad7b36..d346d1e6eda0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -165,6 +165,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
165 165
166 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); 166 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
167 if (ret) { 167 if (ret) {
168 kfree(exynos_fb);
168 DRM_ERROR("failed to initialize framebuffer\n"); 169 DRM_ERROR("failed to initialize framebuffer\n");
169 return ERR_PTR(ret); 170 return ERR_PTR(ret);
170 } 171 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 32e63f60e1d1..e12ea90c6237 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -123,6 +123,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
123 123
124 fbi->screen_base = buffer->kvaddr + offset; 124 fbi->screen_base = buffer->kvaddr + offset;
125 fbi->screen_size = size; 125 fbi->screen_size = size;
126 fbi->fix.smem_len = size;
126 127
127 return 0; 128 return 0;
128} 129}
@@ -353,9 +354,6 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
353 354
354 fbdev = to_exynos_fbdev(private->fb_helper); 355 fbdev = to_exynos_fbdev(private->fb_helper);
355 356
356 if (fbdev->exynos_gem_obj)
357 exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
358
359 exynos_drm_fbdev_destroy(dev, private->fb_helper); 357 exynos_drm_fbdev_destroy(dev, private->fb_helper);
360 kfree(fbdev); 358 kfree(fbdev);
361 private->fb_helper = NULL; 359 private->fb_helper = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index ec7cc9ea50df..68d38eb6774d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -336,9 +336,6 @@ static bool fimc_check_ovf(struct fimc_context *ctx)
336 fimc_set_bits(ctx, EXYNOS_CIWDOFST, 336 fimc_set_bits(ctx, EXYNOS_CIWDOFST,
337 EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB | 337 EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
338 EXYNOS_CIWDOFST_CLROVFICR); 338 EXYNOS_CIWDOFST_CLROVFICR);
339 fimc_clear_bits(ctx, EXYNOS_CIWDOFST,
340 EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
341 EXYNOS_CIWDOFST_CLROVFICR);
342 339
343 dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n", 340 dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
344 ctx->id, status); 341 ctx->id, status);
@@ -718,24 +715,24 @@ static int fimc_src_set_addr(struct device *dev,
718 case IPP_BUF_ENQUEUE: 715 case IPP_BUF_ENQUEUE:
719 config = &property->config[EXYNOS_DRM_OPS_SRC]; 716 config = &property->config[EXYNOS_DRM_OPS_SRC];
720 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y], 717 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
721 EXYNOS_CIIYSA(buf_id)); 718 EXYNOS_CIIYSA0);
722 719
723 if (config->fmt == DRM_FORMAT_YVU420) { 720 if (config->fmt == DRM_FORMAT_YVU420) {
724 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], 721 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
725 EXYNOS_CIICBSA(buf_id)); 722 EXYNOS_CIICBSA0);
726 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], 723 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
727 EXYNOS_CIICRSA(buf_id)); 724 EXYNOS_CIICRSA0);
728 } else { 725 } else {
729 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], 726 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
730 EXYNOS_CIICBSA(buf_id)); 727 EXYNOS_CIICBSA0);
731 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], 728 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
732 EXYNOS_CIICRSA(buf_id)); 729 EXYNOS_CIICRSA0);
733 } 730 }
734 break; 731 break;
735 case IPP_BUF_DEQUEUE: 732 case IPP_BUF_DEQUEUE:
736 fimc_write(ctx, 0x0, EXYNOS_CIIYSA(buf_id)); 733 fimc_write(ctx, 0x0, EXYNOS_CIIYSA0);
737 fimc_write(ctx, 0x0, EXYNOS_CIICBSA(buf_id)); 734 fimc_write(ctx, 0x0, EXYNOS_CIICBSA0);
738 fimc_write(ctx, 0x0, EXYNOS_CIICRSA(buf_id)); 735 fimc_write(ctx, 0x0, EXYNOS_CIICRSA0);
739 break; 736 break;
740 default: 737 default:
741 /* bypass */ 738 /* bypass */
@@ -1122,67 +1119,34 @@ static int fimc_dst_set_size(struct device *dev, int swap,
1122 return 0; 1119 return 0;
1123} 1120}
1124 1121
1125static int fimc_dst_get_buf_count(struct fimc_context *ctx) 1122static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1126{
1127 u32 cfg, buf_num;
1128
1129 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
1130
1131 buf_num = hweight32(cfg);
1132
1133 DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
1134
1135 return buf_num;
1136}
1137
1138static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1139 enum drm_exynos_ipp_buf_type buf_type) 1123 enum drm_exynos_ipp_buf_type buf_type)
1140{ 1124{
1141 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1142 bool enable;
1143 u32 cfg;
1144 u32 mask = 0x00000001 << buf_id;
1145 int ret = 0;
1146 unsigned long flags; 1125 unsigned long flags;
1126 u32 buf_num;
1127 u32 cfg;
1147 1128
1148 DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); 1129 DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
1149 1130
1150 spin_lock_irqsave(&ctx->lock, flags); 1131 spin_lock_irqsave(&ctx->lock, flags);
1151 1132
1152 /* mask register set */
1153 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ); 1133 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
1154 1134
1155 switch (buf_type) { 1135 if (buf_type == IPP_BUF_ENQUEUE)
1156 case IPP_BUF_ENQUEUE: 1136 cfg |= (1 << buf_id);
1157 enable = true; 1137 else
1158 break; 1138 cfg &= ~(1 << buf_id);
1159 case IPP_BUF_DEQUEUE:
1160 enable = false;
1161 break;
1162 default:
1163 dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
1164 ret = -EINVAL;
1165 goto err_unlock;
1166 }
1167 1139
1168 /* sequence id */
1169 cfg &= ~mask;
1170 cfg |= (enable << buf_id);
1171 fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ); 1140 fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ);
1172 1141
1173 /* interrupt enable */ 1142 buf_num = hweight32(cfg);
1174 if (buf_type == IPP_BUF_ENQUEUE &&
1175 fimc_dst_get_buf_count(ctx) >= FIMC_BUF_START)
1176 fimc_mask_irq(ctx, true);
1177 1143
1178 /* interrupt disable */ 1144 if (buf_type == IPP_BUF_ENQUEUE && buf_num >= FIMC_BUF_START)
1179 if (buf_type == IPP_BUF_DEQUEUE && 1145 fimc_mask_irq(ctx, true);
1180 fimc_dst_get_buf_count(ctx) <= FIMC_BUF_STOP) 1146 else if (buf_type == IPP_BUF_DEQUEUE && buf_num <= FIMC_BUF_STOP)
1181 fimc_mask_irq(ctx, false); 1147 fimc_mask_irq(ctx, false);
1182 1148
1183err_unlock:
1184 spin_unlock_irqrestore(&ctx->lock, flags); 1149 spin_unlock_irqrestore(&ctx->lock, flags);
1185 return ret;
1186} 1150}
1187 1151
1188static int fimc_dst_set_addr(struct device *dev, 1152static int fimc_dst_set_addr(struct device *dev,
@@ -1240,7 +1204,9 @@ static int fimc_dst_set_addr(struct device *dev,
1240 break; 1204 break;
1241 } 1205 }
1242 1206
1243 return fimc_dst_set_buf_seq(ctx, buf_id, buf_type); 1207 fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
1208
1209 return 0;
1244} 1210}
1245 1211
1246static struct exynos_drm_ipp_ops fimc_dst_ops = { 1212static struct exynos_drm_ipp_ops fimc_dst_ops = {
@@ -1291,14 +1257,11 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
1291 1257
1292 DRM_DEBUG_KMS("buf_id[%d]\n", buf_id); 1258 DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
1293 1259
1294 if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) { 1260 fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1295 DRM_ERROR("failed to dequeue.\n");
1296 return IRQ_HANDLED;
1297 }
1298 1261
1299 event_work->ippdrv = ippdrv; 1262 event_work->ippdrv = ippdrv;
1300 event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id; 1263 event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
1301 queue_work(ippdrv->event_workq, (struct work_struct *)event_work); 1264 queue_work(ippdrv->event_workq, &event_work->work);
1302 1265
1303 return IRQ_HANDLED; 1266 return IRQ_HANDLED;
1304} 1267}
@@ -1590,11 +1553,8 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1590 1553
1591 fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK); 1554 fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK);
1592 1555
1593 if (cmd == IPP_CMD_M2M) { 1556 if (cmd == IPP_CMD_M2M)
1594 fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
1595
1596 fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID); 1557 fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
1597 }
1598 1558
1599 return 0; 1559 return 0;
1600} 1560}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 5d09e33fef87..085b066a9993 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -104,6 +104,14 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
104 .has_limited_fmt = 1, 104 .has_limited_fmt = 1,
105}; 105};
106 106
107static struct fimd_driver_data exynos3_fimd_driver_data = {
108 .timing_base = 0x20000,
109 .lcdblk_offset = 0x210,
110 .lcdblk_bypass_shift = 1,
111 .has_shadowcon = 1,
112 .has_vidoutcon = 1,
113};
114
107static struct fimd_driver_data exynos4_fimd_driver_data = { 115static struct fimd_driver_data exynos4_fimd_driver_data = {
108 .timing_base = 0x0, 116 .timing_base = 0x0,
109 .lcdblk_offset = 0x210, 117 .lcdblk_offset = 0x210,
@@ -168,6 +176,8 @@ struct fimd_context {
168static const struct of_device_id fimd_driver_dt_match[] = { 176static const struct of_device_id fimd_driver_dt_match[] = {
169 { .compatible = "samsung,s3c6400-fimd", 177 { .compatible = "samsung,s3c6400-fimd",
170 .data = &s3c64xx_fimd_driver_data }, 178 .data = &s3c64xx_fimd_driver_data },
179 { .compatible = "samsung,exynos3250-fimd",
180 .data = &exynos3_fimd_driver_data },
171 { .compatible = "samsung,exynos4210-fimd", 181 { .compatible = "samsung,exynos4210-fimd",
172 .data = &exynos4_fimd_driver_data }, 182 .data = &exynos4_fimd_driver_data },
173 { .compatible = "samsung,exynos5250-fimd", 183 { .compatible = "samsung,exynos5250-fimd",
@@ -204,7 +214,6 @@ static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
204 DRM_DEBUG_KMS("vblank wait timed out.\n"); 214 DRM_DEBUG_KMS("vblank wait timed out.\n");
205} 215}
206 216
207
208static void fimd_clear_channel(struct exynos_drm_manager *mgr) 217static void fimd_clear_channel(struct exynos_drm_manager *mgr)
209{ 218{
210 struct fimd_context *ctx = mgr->ctx; 219 struct fimd_context *ctx = mgr->ctx;
@@ -214,17 +223,31 @@ static void fimd_clear_channel(struct exynos_drm_manager *mgr)
214 223
215 /* Check if any channel is enabled. */ 224 /* Check if any channel is enabled. */
216 for (win = 0; win < WINDOWS_NR; win++) { 225 for (win = 0; win < WINDOWS_NR; win++) {
217 u32 val = readl(ctx->regs + SHADOWCON); 226 u32 val = readl(ctx->regs + WINCON(win));
218 if (val & SHADOWCON_CHx_ENABLE(win)) { 227
219 val &= ~SHADOWCON_CHx_ENABLE(win); 228 if (val & WINCONx_ENWIN) {
220 writel(val, ctx->regs + SHADOWCON); 229 /* wincon */
230 val &= ~WINCONx_ENWIN;
231 writel(val, ctx->regs + WINCON(win));
232
233 /* unprotect windows */
234 if (ctx->driver_data->has_shadowcon) {
235 val = readl(ctx->regs + SHADOWCON);
236 val &= ~SHADOWCON_CHx_ENABLE(win);
237 writel(val, ctx->regs + SHADOWCON);
238 }
221 ch_enabled = 1; 239 ch_enabled = 1;
222 } 240 }
223 } 241 }
224 242
225 /* Wait for vsync, as disable channel takes effect at next vsync */ 243 /* Wait for vsync, as disable channel takes effect at next vsync */
226 if (ch_enabled) 244 if (ch_enabled) {
245 unsigned int state = ctx->suspended;
246
247 ctx->suspended = 0;
227 fimd_wait_for_vblank(mgr); 248 fimd_wait_for_vblank(mgr);
249 ctx->suspended = state;
250 }
228} 251}
229 252
230static int fimd_mgr_initialize(struct exynos_drm_manager *mgr, 253static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
@@ -237,23 +260,6 @@ static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
237 mgr->drm_dev = ctx->drm_dev = drm_dev; 260 mgr->drm_dev = ctx->drm_dev = drm_dev;
238 mgr->pipe = ctx->pipe = priv->pipe++; 261 mgr->pipe = ctx->pipe = priv->pipe++;
239 262
240 /*
241 * enable drm irq mode.
242 * - with irq_enabled = true, we can use the vblank feature.
243 *
244 * P.S. note that we wouldn't use drm irq handler but
245 * just specific driver own one instead because
246 * drm framework supports only one irq handler.
247 */
248 drm_dev->irq_enabled = true;
249
250 /*
251 * with vblank_disable_allowed = true, vblank interrupt will be disabled
252 * by drm timer once a current process gives up ownership of
253 * vblank event.(after drm_vblank_put function is called)
254 */
255 drm_dev->vblank_disable_allowed = true;
256
257 /* attach this sub driver to iommu mapping if supported. */ 263 /* attach this sub driver to iommu mapping if supported. */
258 if (is_drm_iommu_supported(ctx->drm_dev)) { 264 if (is_drm_iommu_supported(ctx->drm_dev)) {
259 /* 265 /*
@@ -1051,7 +1057,6 @@ static void fimd_unbind(struct device *dev, struct device *master,
1051{ 1057{
1052 struct exynos_drm_manager *mgr = dev_get_drvdata(dev); 1058 struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
1053 struct fimd_context *ctx = fimd_manager.ctx; 1059 struct fimd_context *ctx = fimd_manager.ctx;
1054 struct drm_crtc *crtc = mgr->crtc;
1055 1060
1056 fimd_dpms(mgr, DRM_MODE_DPMS_OFF); 1061 fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
1057 1062
@@ -1059,8 +1064,6 @@ static void fimd_unbind(struct device *dev, struct device *master,
1059 exynos_dpi_remove(dev); 1064 exynos_dpi_remove(dev);
1060 1065
1061 fimd_mgr_remove(mgr); 1066 fimd_mgr_remove(mgr);
1062
1063 crtc->funcs->destroy(crtc);
1064} 1067}
1065 1068
1066static const struct component_ops fimd_component_ops = { 1069static const struct component_ops fimd_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 15db80138382..0d5b9698d384 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -318,40 +318,16 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
318 drm_gem_object_unreference_unlocked(obj); 318 drm_gem_object_unreference_unlocked(obj);
319} 319}
320 320
321int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, 321int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
322 struct drm_file *file_priv)
323{
324 struct drm_exynos_gem_map_off *args = data;
325
326 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
327 args->handle, (unsigned long)args->offset);
328
329 if (!(dev->driver->driver_features & DRIVER_GEM)) {
330 DRM_ERROR("does not support GEM.\n");
331 return -ENODEV;
332 }
333
334 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
335 &args->offset);
336}
337
338int exynos_drm_gem_mmap_buffer(struct file *filp,
339 struct vm_area_struct *vma) 322 struct vm_area_struct *vma)
340{ 323{
341 struct drm_gem_object *obj = filp->private_data; 324 struct drm_device *drm_dev = exynos_gem_obj->base.dev;
342 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
343 struct drm_device *drm_dev = obj->dev;
344 struct exynos_drm_gem_buf *buffer; 325 struct exynos_drm_gem_buf *buffer;
345 unsigned long vm_size; 326 unsigned long vm_size;
346 int ret; 327 int ret;
347 328
348 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 329 vma->vm_flags &= ~VM_PFNMAP;
349 330 vma->vm_pgoff = 0;
350 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
351 vma->vm_private_data = obj;
352 vma->vm_ops = drm_dev->driver->gem_vm_ops;
353
354 update_vm_cache_attr(exynos_gem_obj, vma);
355 331
356 vm_size = vma->vm_end - vma->vm_start; 332 vm_size = vma->vm_end - vma->vm_start;
357 333
@@ -373,60 +349,6 @@ int exynos_drm_gem_mmap_buffer(struct file *filp,
373 return ret; 349 return ret;
374 } 350 }
375 351
376 /*
377 * take a reference to this mapping of the object. And this reference
378 * is unreferenced by the corresponding vm_close call.
379 */
380 drm_gem_object_reference(obj);
381
382 drm_vm_open_locked(drm_dev, vma);
383
384 return 0;
385}
386
387int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
388 struct drm_file *file_priv)
389{
390 struct drm_exynos_file_private *exynos_file_priv;
391 struct drm_exynos_gem_mmap *args = data;
392 struct drm_gem_object *obj;
393 struct file *anon_filp;
394 unsigned long addr;
395
396 if (!(dev->driver->driver_features & DRIVER_GEM)) {
397 DRM_ERROR("does not support GEM.\n");
398 return -ENODEV;
399 }
400
401 mutex_lock(&dev->struct_mutex);
402
403 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
404 if (!obj) {
405 DRM_ERROR("failed to lookup gem object.\n");
406 mutex_unlock(&dev->struct_mutex);
407 return -EINVAL;
408 }
409
410 exynos_file_priv = file_priv->driver_priv;
411 anon_filp = exynos_file_priv->anon_filp;
412 anon_filp->private_data = obj;
413
414 addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE,
415 MAP_SHARED, 0);
416
417 drm_gem_object_unreference(obj);
418
419 if (IS_ERR_VALUE(addr)) {
420 mutex_unlock(&dev->struct_mutex);
421 return (int)addr;
422 }
423
424 mutex_unlock(&dev->struct_mutex);
425
426 args->mapped = addr;
427
428 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
429
430 return 0; 352 return 0;
431} 353}
432 354
@@ -710,16 +632,20 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
710 exynos_gem_obj = to_exynos_gem_obj(obj); 632 exynos_gem_obj = to_exynos_gem_obj(obj);
711 633
712 ret = check_gem_flags(exynos_gem_obj->flags); 634 ret = check_gem_flags(exynos_gem_obj->flags);
713 if (ret) { 635 if (ret)
714 drm_gem_vm_close(vma); 636 goto err_close_vm;
715 drm_gem_free_mmap_offset(obj);
716 return ret;
717 }
718
719 vma->vm_flags &= ~VM_PFNMAP;
720 vma->vm_flags |= VM_MIXEDMAP;
721 637
722 update_vm_cache_attr(exynos_gem_obj, vma); 638 update_vm_cache_attr(exynos_gem_obj, vma);
723 639
640 ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
641 if (ret)
642 goto err_close_vm;
643
644 return ret;
645
646err_close_vm:
647 drm_gem_vm_close(vma);
648 drm_gem_free_mmap_offset(obj);
649
724 return ret; 650 return ret;
725} 651}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 1592c0ba7de8..ec58fe9c40df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -12,6 +12,8 @@
12#ifndef _EXYNOS_DRM_GEM_H_ 12#ifndef _EXYNOS_DRM_GEM_H_
13#define _EXYNOS_DRM_GEM_H_ 13#define _EXYNOS_DRM_GEM_H_
14 14
15#include <drm/drm_gem.h>
16
15#define to_exynos_gem_obj(x) container_of(x,\ 17#define to_exynos_gem_obj(x) container_of(x,\
16 struct exynos_drm_gem_obj, base) 18 struct exynos_drm_gem_obj, base)
17 19
@@ -111,20 +113,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
111 unsigned int gem_handle, 113 unsigned int gem_handle,
112 struct drm_file *filp); 114 struct drm_file *filp);
113 115
114/* get buffer offset to map to user space. */
115int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
116 struct drm_file *file_priv);
117
118/*
119 * mmap the physically continuous memory that a gem object contains
120 * to user space.
121 */
122int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
123 struct drm_file *file_priv);
124
125int exynos_drm_gem_mmap_buffer(struct file *filp,
126 struct vm_area_struct *vma);
127
128/* map user space allocated by malloc to pages. */ 116/* map user space allocated by malloc to pages. */
129int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, 117int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
130 struct drm_file *file_priv); 118 struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 9e3ff1672965..c6a013fc321c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1326,8 +1326,7 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
1326 buf_id[EXYNOS_DRM_OPS_SRC]; 1326 buf_id[EXYNOS_DRM_OPS_SRC];
1327 event_work->buf_id[EXYNOS_DRM_OPS_DST] = 1327 event_work->buf_id[EXYNOS_DRM_OPS_DST] =
1328 buf_id[EXYNOS_DRM_OPS_DST]; 1328 buf_id[EXYNOS_DRM_OPS_DST];
1329 queue_work(ippdrv->event_workq, 1329 queue_work(ippdrv->event_workq, &event_work->work);
1330 (struct work_struct *)event_work);
1331 } 1330 }
1332 1331
1333 return IRQ_HANDLED; 1332 return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index c411399070d6..00d74b18f7cb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -75,7 +75,6 @@ struct drm_exynos_ipp_mem_node {
75 u32 prop_id; 75 u32 prop_id;
76 u32 buf_id; 76 u32 buf_id;
77 struct drm_exynos_ipp_buf_info buf_info; 77 struct drm_exynos_ipp_buf_info buf_info;
78 struct drm_file *filp;
79}; 78};
80 79
81/* 80/*
@@ -319,44 +318,6 @@ static void ipp_print_property(struct drm_exynos_ipp_property *property,
319 sz->hsize, sz->vsize, config->flip, config->degree); 318 sz->hsize, sz->vsize, config->flip, config->degree);
320} 319}
321 320
322static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
323{
324 struct exynos_drm_ippdrv *ippdrv;
325 struct drm_exynos_ipp_cmd_node *c_node;
326 u32 prop_id = property->prop_id;
327
328 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
329
330 ippdrv = ipp_find_drv_by_handle(prop_id);
331 if (IS_ERR(ippdrv)) {
332 DRM_ERROR("failed to get ipp driver.\n");
333 return -EINVAL;
334 }
335
336 /*
337 * Find command node using command list in ippdrv.
338 * when we find this command no using prop_id.
339 * return property information set in this command node.
340 */
341 mutex_lock(&ippdrv->cmd_lock);
342 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
343 if ((c_node->property.prop_id == prop_id) &&
344 (c_node->state == IPP_STATE_STOP)) {
345 mutex_unlock(&ippdrv->cmd_lock);
346 DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
347 property->cmd, (int)ippdrv);
348
349 c_node->property = *property;
350 return 0;
351 }
352 }
353 mutex_unlock(&ippdrv->cmd_lock);
354
355 DRM_ERROR("failed to search property.\n");
356
357 return -EINVAL;
358}
359
360static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void) 321static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
361{ 322{
362 struct drm_exynos_ipp_cmd_work *cmd_work; 323 struct drm_exynos_ipp_cmd_work *cmd_work;
@@ -392,6 +353,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
392 struct drm_exynos_ipp_property *property = data; 353 struct drm_exynos_ipp_property *property = data;
393 struct exynos_drm_ippdrv *ippdrv; 354 struct exynos_drm_ippdrv *ippdrv;
394 struct drm_exynos_ipp_cmd_node *c_node; 355 struct drm_exynos_ipp_cmd_node *c_node;
356 u32 prop_id;
395 int ret, i; 357 int ret, i;
396 358
397 if (!ctx) { 359 if (!ctx) {
@@ -404,6 +366,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
404 return -EINVAL; 366 return -EINVAL;
405 } 367 }
406 368
369 prop_id = property->prop_id;
370
407 /* 371 /*
408 * This is log print for user application property. 372 * This is log print for user application property.
409 * user application set various property. 373 * user application set various property.
@@ -412,14 +376,24 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
412 ipp_print_property(property, i); 376 ipp_print_property(property, i);
413 377
414 /* 378 /*
415 * set property ioctl generated new prop_id. 379 * In case prop_id is not zero try to set existing property.
416 * but in this case already asigned prop_id using old set property.
417 * e.g PAUSE state. this case supports find current prop_id and use it
418 * instead of allocation.
419 */ 380 */
420 if (property->prop_id) { 381 if (prop_id) {
421 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 382 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id);
422 return ipp_find_and_set_property(property); 383
384 if (!c_node || c_node->filp != file) {
385 DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id);
386 return -EINVAL;
387 }
388
389 if (c_node->state != IPP_STATE_STOP) {
390 DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id);
391 return -EINVAL;
392 }
393
394 c_node->property = *property;
395
396 return 0;
423 } 397 }
424 398
425 /* find ipp driver using ipp id */ 399 /* find ipp driver using ipp id */
@@ -445,9 +419,9 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
445 property->prop_id, property->cmd, (int)ippdrv); 419 property->prop_id, property->cmd, (int)ippdrv);
446 420
447 /* stored property information and ippdrv in private data */ 421 /* stored property information and ippdrv in private data */
448 c_node->dev = dev;
449 c_node->property = *property; 422 c_node->property = *property;
450 c_node->state = IPP_STATE_IDLE; 423 c_node->state = IPP_STATE_IDLE;
424 c_node->filp = file;
451 425
452 c_node->start_work = ipp_create_cmd_work(); 426 c_node->start_work = ipp_create_cmd_work();
453 if (IS_ERR(c_node->start_work)) { 427 if (IS_ERR(c_node->start_work)) {
@@ -499,105 +473,37 @@ err_clear:
499 return ret; 473 return ret;
500} 474}
501 475
502static void ipp_clean_cmd_node(struct ipp_context *ctx, 476static int ipp_put_mem_node(struct drm_device *drm_dev,
503 struct drm_exynos_ipp_cmd_node *c_node)
504{
505 /* delete list */
506 list_del(&c_node->list);
507
508 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
509 c_node->property.prop_id);
510
511 /* destroy mutex */
512 mutex_destroy(&c_node->lock);
513 mutex_destroy(&c_node->mem_lock);
514 mutex_destroy(&c_node->event_lock);
515
516 /* free command node */
517 kfree(c_node->start_work);
518 kfree(c_node->stop_work);
519 kfree(c_node->event_work);
520 kfree(c_node);
521}
522
523static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
524{
525 switch (c_node->property.cmd) {
526 case IPP_CMD_WB:
527 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
528 case IPP_CMD_OUTPUT:
529 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
530 case IPP_CMD_M2M:
531 default:
532 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
533 !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
534 }
535}
536
537static struct drm_exynos_ipp_mem_node
538 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
539 struct drm_exynos_ipp_queue_buf *qbuf)
540{
541 struct drm_exynos_ipp_mem_node *m_node;
542 struct list_head *head;
543 int count = 0;
544
545 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
546
547 /* source/destination memory list */
548 head = &c_node->mem_list[qbuf->ops_id];
549
550 /* find memory node from memory list */
551 list_for_each_entry(m_node, head, list) {
552 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
553
554 /* compare buffer id */
555 if (m_node->buf_id == qbuf->buf_id)
556 return m_node;
557 }
558
559 return NULL;
560}
561
562static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
563 struct drm_exynos_ipp_cmd_node *c_node, 477 struct drm_exynos_ipp_cmd_node *c_node,
564 struct drm_exynos_ipp_mem_node *m_node) 478 struct drm_exynos_ipp_mem_node *m_node)
565{ 479{
566 struct exynos_drm_ipp_ops *ops = NULL; 480 int i;
567 int ret = 0;
568 481
569 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 482 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
570 483
571 if (!m_node) { 484 if (!m_node) {
572 DRM_ERROR("invalid queue node.\n"); 485 DRM_ERROR("invalid dequeue node.\n");
573 return -EFAULT; 486 return -EFAULT;
574 } 487 }
575 488
576 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 489 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
577 490
578 /* get operations callback */ 491 /* put gem buffer */
579 ops = ippdrv->ops[m_node->ops_id]; 492 for_each_ipp_planar(i) {
580 if (!ops) { 493 unsigned long handle = m_node->buf_info.handles[i];
581 DRM_ERROR("not support ops.\n"); 494 if (handle)
582 return -EFAULT; 495 exynos_drm_gem_put_dma_addr(drm_dev, handle,
496 c_node->filp);
583 } 497 }
584 498
585 /* set address and enable irq */ 499 list_del(&m_node->list);
586 if (ops->set_addr) { 500 kfree(m_node);
587 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
588 m_node->buf_id, IPP_BUF_ENQUEUE);
589 if (ret) {
590 DRM_ERROR("failed to set addr.\n");
591 return ret;
592 }
593 }
594 501
595 return ret; 502 return 0;
596} 503}
597 504
598static struct drm_exynos_ipp_mem_node 505static struct drm_exynos_ipp_mem_node
599 *ipp_get_mem_node(struct drm_device *drm_dev, 506 *ipp_get_mem_node(struct drm_device *drm_dev,
600 struct drm_file *file,
601 struct drm_exynos_ipp_cmd_node *c_node, 507 struct drm_exynos_ipp_cmd_node *c_node,
602 struct drm_exynos_ipp_queue_buf *qbuf) 508 struct drm_exynos_ipp_queue_buf *qbuf)
603{ 509{
@@ -615,6 +521,7 @@ static struct drm_exynos_ipp_mem_node
615 m_node->ops_id = qbuf->ops_id; 521 m_node->ops_id = qbuf->ops_id;
616 m_node->prop_id = qbuf->prop_id; 522 m_node->prop_id = qbuf->prop_id;
617 m_node->buf_id = qbuf->buf_id; 523 m_node->buf_id = qbuf->buf_id;
524 INIT_LIST_HEAD(&m_node->list);
618 525
619 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); 526 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
620 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); 527 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
@@ -627,10 +534,11 @@ static struct drm_exynos_ipp_mem_node
627 dma_addr_t *addr; 534 dma_addr_t *addr;
628 535
629 addr = exynos_drm_gem_get_dma_addr(drm_dev, 536 addr = exynos_drm_gem_get_dma_addr(drm_dev,
630 qbuf->handle[i], file); 537 qbuf->handle[i], c_node->filp);
631 if (IS_ERR(addr)) { 538 if (IS_ERR(addr)) {
632 DRM_ERROR("failed to get addr.\n"); 539 DRM_ERROR("failed to get addr.\n");
633 goto err_clear; 540 ipp_put_mem_node(drm_dev, c_node, m_node);
541 return ERR_PTR(-EFAULT);
634 } 542 }
635 543
636 buf_info->handles[i] = qbuf->handle[i]; 544 buf_info->handles[i] = qbuf->handle[i];
@@ -640,46 +548,30 @@ static struct drm_exynos_ipp_mem_node
640 } 548 }
641 } 549 }
642 550
643 m_node->filp = file;
644 mutex_lock(&c_node->mem_lock); 551 mutex_lock(&c_node->mem_lock);
645 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); 552 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
646 mutex_unlock(&c_node->mem_lock); 553 mutex_unlock(&c_node->mem_lock);
647 554
648 return m_node; 555 return m_node;
649
650err_clear:
651 kfree(m_node);
652 return ERR_PTR(-EFAULT);
653} 556}
654 557
655static int ipp_put_mem_node(struct drm_device *drm_dev, 558static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
656 struct drm_exynos_ipp_cmd_node *c_node, 559 struct drm_exynos_ipp_cmd_node *c_node, int ops)
657 struct drm_exynos_ipp_mem_node *m_node)
658{ 560{
659 int i; 561 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
660 562 struct list_head *head = &c_node->mem_list[ops];
661 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
662 563
663 if (!m_node) { 564 mutex_lock(&c_node->mem_lock);
664 DRM_ERROR("invalid dequeue node.\n");
665 return -EFAULT;
666 }
667 565
668 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 566 list_for_each_entry_safe(m_node, tm_node, head, list) {
567 int ret;
669 568
670 /* put gem buffer */ 569 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
671 for_each_ipp_planar(i) { 570 if (ret)
672 unsigned long handle = m_node->buf_info.handles[i]; 571 DRM_ERROR("failed to put m_node.\n");
673 if (handle)
674 exynos_drm_gem_put_dma_addr(drm_dev, handle,
675 m_node->filp);
676 } 572 }
677 573
678 /* delete list in queue */ 574 mutex_unlock(&c_node->mem_lock);
679 list_del(&m_node->list);
680 kfree(m_node);
681
682 return 0;
683} 575}
684 576
685static void ipp_free_event(struct drm_pending_event *event) 577static void ipp_free_event(struct drm_pending_event *event)
@@ -688,7 +580,6 @@ static void ipp_free_event(struct drm_pending_event *event)
688} 580}
689 581
690static int ipp_get_event(struct drm_device *drm_dev, 582static int ipp_get_event(struct drm_device *drm_dev,
691 struct drm_file *file,
692 struct drm_exynos_ipp_cmd_node *c_node, 583 struct drm_exynos_ipp_cmd_node *c_node,
693 struct drm_exynos_ipp_queue_buf *qbuf) 584 struct drm_exynos_ipp_queue_buf *qbuf)
694{ 585{
@@ -700,7 +591,7 @@ static int ipp_get_event(struct drm_device *drm_dev,
700 e = kzalloc(sizeof(*e), GFP_KERNEL); 591 e = kzalloc(sizeof(*e), GFP_KERNEL);
701 if (!e) { 592 if (!e) {
702 spin_lock_irqsave(&drm_dev->event_lock, flags); 593 spin_lock_irqsave(&drm_dev->event_lock, flags);
703 file->event_space += sizeof(e->event); 594 c_node->filp->event_space += sizeof(e->event);
704 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 595 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
705 return -ENOMEM; 596 return -ENOMEM;
706 } 597 }
@@ -712,7 +603,7 @@ static int ipp_get_event(struct drm_device *drm_dev,
712 e->event.prop_id = qbuf->prop_id; 603 e->event.prop_id = qbuf->prop_id;
713 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id; 604 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
714 e->base.event = &e->event.base; 605 e->base.event = &e->event.base;
715 e->base.file_priv = file; 606 e->base.file_priv = c_node->filp;
716 e->base.destroy = ipp_free_event; 607 e->base.destroy = ipp_free_event;
717 mutex_lock(&c_node->event_lock); 608 mutex_lock(&c_node->event_lock);
718 list_add_tail(&e->base.link, &c_node->event_list); 609 list_add_tail(&e->base.link, &c_node->event_list);
@@ -757,6 +648,115 @@ out_unlock:
757 return; 648 return;
758} 649}
759 650
651static void ipp_clean_cmd_node(struct ipp_context *ctx,
652 struct drm_exynos_ipp_cmd_node *c_node)
653{
654 int i;
655
656 /* cancel works */
657 cancel_work_sync(&c_node->start_work->work);
658 cancel_work_sync(&c_node->stop_work->work);
659 cancel_work_sync(&c_node->event_work->work);
660
661 /* put event */
662 ipp_put_event(c_node, NULL);
663
664 for_each_ipp_ops(i)
665 ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i);
666
667 /* delete list */
668 list_del(&c_node->list);
669
670 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
671 c_node->property.prop_id);
672
673 /* destroy mutex */
674 mutex_destroy(&c_node->lock);
675 mutex_destroy(&c_node->mem_lock);
676 mutex_destroy(&c_node->event_lock);
677
678 /* free command node */
679 kfree(c_node->start_work);
680 kfree(c_node->stop_work);
681 kfree(c_node->event_work);
682 kfree(c_node);
683}
684
685static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
686{
687 switch (c_node->property.cmd) {
688 case IPP_CMD_WB:
689 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
690 case IPP_CMD_OUTPUT:
691 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
692 case IPP_CMD_M2M:
693 default:
694 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
695 !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
696 }
697}
698
699static struct drm_exynos_ipp_mem_node
700 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
701 struct drm_exynos_ipp_queue_buf *qbuf)
702{
703 struct drm_exynos_ipp_mem_node *m_node;
704 struct list_head *head;
705 int count = 0;
706
707 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
708
709 /* source/destination memory list */
710 head = &c_node->mem_list[qbuf->ops_id];
711
712 /* find memory node from memory list */
713 list_for_each_entry(m_node, head, list) {
714 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
715
716 /* compare buffer id */
717 if (m_node->buf_id == qbuf->buf_id)
718 return m_node;
719 }
720
721 return NULL;
722}
723
724static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
725 struct drm_exynos_ipp_cmd_node *c_node,
726 struct drm_exynos_ipp_mem_node *m_node)
727{
728 struct exynos_drm_ipp_ops *ops = NULL;
729 int ret = 0;
730
731 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
732
733 if (!m_node) {
734 DRM_ERROR("invalid queue node.\n");
735 return -EFAULT;
736 }
737
738 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
739
740 /* get operations callback */
741 ops = ippdrv->ops[m_node->ops_id];
742 if (!ops) {
743 DRM_ERROR("not support ops.\n");
744 return -EFAULT;
745 }
746
747 /* set address and enable irq */
748 if (ops->set_addr) {
749 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
750 m_node->buf_id, IPP_BUF_ENQUEUE);
751 if (ret) {
752 DRM_ERROR("failed to set addr.\n");
753 return ret;
754 }
755 }
756
757 return ret;
758}
759
760static void ipp_handle_cmd_work(struct device *dev, 760static void ipp_handle_cmd_work(struct device *dev,
761 struct exynos_drm_ippdrv *ippdrv, 761 struct exynos_drm_ippdrv *ippdrv,
762 struct drm_exynos_ipp_cmd_work *cmd_work, 762 struct drm_exynos_ipp_cmd_work *cmd_work,
@@ -766,7 +766,7 @@ static void ipp_handle_cmd_work(struct device *dev,
766 766
767 cmd_work->ippdrv = ippdrv; 767 cmd_work->ippdrv = ippdrv;
768 cmd_work->c_node = c_node; 768 cmd_work->c_node = c_node;
769 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work); 769 queue_work(ctx->cmd_workq, &cmd_work->work);
770} 770}
771 771
772static int ipp_queue_buf_with_run(struct device *dev, 772static int ipp_queue_buf_with_run(struct device *dev,
@@ -872,7 +872,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
872 /* find command node */ 872 /* find command node */
873 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 873 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
874 qbuf->prop_id); 874 qbuf->prop_id);
875 if (!c_node) { 875 if (!c_node || c_node->filp != file) {
876 DRM_ERROR("failed to get command node.\n"); 876 DRM_ERROR("failed to get command node.\n");
877 return -ENODEV; 877 return -ENODEV;
878 } 878 }
@@ -881,7 +881,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
881 switch (qbuf->buf_type) { 881 switch (qbuf->buf_type) {
882 case IPP_BUF_ENQUEUE: 882 case IPP_BUF_ENQUEUE:
883 /* get memory node */ 883 /* get memory node */
884 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf); 884 m_node = ipp_get_mem_node(drm_dev, c_node, qbuf);
885 if (IS_ERR(m_node)) { 885 if (IS_ERR(m_node)) {
886 DRM_ERROR("failed to get m_node.\n"); 886 DRM_ERROR("failed to get m_node.\n");
887 return PTR_ERR(m_node); 887 return PTR_ERR(m_node);
@@ -894,7 +894,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
894 */ 894 */
895 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) { 895 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
896 /* get event for destination buffer */ 896 /* get event for destination buffer */
897 ret = ipp_get_event(drm_dev, file, c_node, qbuf); 897 ret = ipp_get_event(drm_dev, c_node, qbuf);
898 if (ret) { 898 if (ret) {
899 DRM_ERROR("failed to get event.\n"); 899 DRM_ERROR("failed to get event.\n");
900 goto err_clean_node; 900 goto err_clean_node;
@@ -1007,7 +1007,7 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1007 1007
1008 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 1008 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1009 cmd_ctrl->prop_id); 1009 cmd_ctrl->prop_id);
1010 if (!c_node) { 1010 if (!c_node || c_node->filp != file) {
1011 DRM_ERROR("invalid command node list.\n"); 1011 DRM_ERROR("invalid command node list.\n");
1012 return -ENODEV; 1012 return -ENODEV;
1013 } 1013 }
@@ -1257,80 +1257,39 @@ static int ipp_stop_property(struct drm_device *drm_dev,
1257 struct exynos_drm_ippdrv *ippdrv, 1257 struct exynos_drm_ippdrv *ippdrv,
1258 struct drm_exynos_ipp_cmd_node *c_node) 1258 struct drm_exynos_ipp_cmd_node *c_node)
1259{ 1259{
1260 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1261 struct drm_exynos_ipp_property *property = &c_node->property; 1260 struct drm_exynos_ipp_property *property = &c_node->property;
1262 struct list_head *head; 1261 int i;
1263 int ret = 0, i;
1264 1262
1265 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1263 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1266 1264
1267 /* put event */ 1265 /* stop operations */
1268 ipp_put_event(c_node, NULL); 1266 if (ippdrv->stop)
1269 1267 ippdrv->stop(ippdrv->dev, property->cmd);
1270 mutex_lock(&c_node->mem_lock);
1271 1268
1272 /* check command */ 1269 /* check command */
1273 switch (property->cmd) { 1270 switch (property->cmd) {
1274 case IPP_CMD_M2M: 1271 case IPP_CMD_M2M:
1275 for_each_ipp_ops(i) { 1272 for_each_ipp_ops(i)
1276 /* source/destination memory list */ 1273 ipp_clean_mem_nodes(drm_dev, c_node, i);
1277 head = &c_node->mem_list[i];
1278
1279 list_for_each_entry_safe(m_node, tm_node,
1280 head, list) {
1281 ret = ipp_put_mem_node(drm_dev, c_node,
1282 m_node);
1283 if (ret) {
1284 DRM_ERROR("failed to put m_node.\n");
1285 goto err_clear;
1286 }
1287 }
1288 }
1289 break; 1274 break;
1290 case IPP_CMD_WB: 1275 case IPP_CMD_WB:
1291 /* destination memory list */ 1276 ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST);
1292 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1293
1294 list_for_each_entry_safe(m_node, tm_node, head, list) {
1295 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1296 if (ret) {
1297 DRM_ERROR("failed to put m_node.\n");
1298 goto err_clear;
1299 }
1300 }
1301 break; 1277 break;
1302 case IPP_CMD_OUTPUT: 1278 case IPP_CMD_OUTPUT:
1303 /* source memory list */ 1279 ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC);
1304 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1305
1306 list_for_each_entry_safe(m_node, tm_node, head, list) {
1307 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1308 if (ret) {
1309 DRM_ERROR("failed to put m_node.\n");
1310 goto err_clear;
1311 }
1312 }
1313 break; 1280 break;
1314 default: 1281 default:
1315 DRM_ERROR("invalid operations.\n"); 1282 DRM_ERROR("invalid operations.\n");
1316 ret = -EINVAL; 1283 return -EINVAL;
1317 goto err_clear;
1318 } 1284 }
1319 1285
1320err_clear: 1286 return 0;
1321 mutex_unlock(&c_node->mem_lock);
1322
1323 /* stop operations */
1324 if (ippdrv->stop)
1325 ippdrv->stop(ippdrv->dev, property->cmd);
1326
1327 return ret;
1328} 1287}
1329 1288
1330void ipp_sched_cmd(struct work_struct *work) 1289void ipp_sched_cmd(struct work_struct *work)
1331{ 1290{
1332 struct drm_exynos_ipp_cmd_work *cmd_work = 1291 struct drm_exynos_ipp_cmd_work *cmd_work =
1333 (struct drm_exynos_ipp_cmd_work *)work; 1292 container_of(work, struct drm_exynos_ipp_cmd_work, work);
1334 struct exynos_drm_ippdrv *ippdrv; 1293 struct exynos_drm_ippdrv *ippdrv;
1335 struct drm_exynos_ipp_cmd_node *c_node; 1294 struct drm_exynos_ipp_cmd_node *c_node;
1336 struct drm_exynos_ipp_property *property; 1295 struct drm_exynos_ipp_property *property;
@@ -1543,7 +1502,7 @@ err_event_unlock:
1543void ipp_sched_event(struct work_struct *work) 1502void ipp_sched_event(struct work_struct *work)
1544{ 1503{
1545 struct drm_exynos_ipp_event_work *event_work = 1504 struct drm_exynos_ipp_event_work *event_work =
1546 (struct drm_exynos_ipp_event_work *)work; 1505 container_of(work, struct drm_exynos_ipp_event_work, work);
1547 struct exynos_drm_ippdrv *ippdrv; 1506 struct exynos_drm_ippdrv *ippdrv;
1548 struct drm_exynos_ipp_cmd_node *c_node; 1507 struct drm_exynos_ipp_cmd_node *c_node;
1549 int ret; 1508 int ret;
@@ -1646,11 +1605,11 @@ err:
1646 1605
1647static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 1606static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1648{ 1607{
1649 struct exynos_drm_ippdrv *ippdrv; 1608 struct exynos_drm_ippdrv *ippdrv, *t;
1650 struct ipp_context *ctx = get_ipp_context(dev); 1609 struct ipp_context *ctx = get_ipp_context(dev);
1651 1610
1652 /* get ipp driver entry */ 1611 /* get ipp driver entry */
1653 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1612 list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
1654 if (is_drm_iommu_supported(drm_dev)) 1613 if (is_drm_iommu_supported(drm_dev))
1655 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1614 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1656 1615
@@ -1677,14 +1636,11 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1677static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, 1636static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1678 struct drm_file *file) 1637 struct drm_file *file)
1679{ 1638{
1680 struct drm_exynos_file_private *file_priv = file->driver_priv;
1681 struct exynos_drm_ippdrv *ippdrv = NULL; 1639 struct exynos_drm_ippdrv *ippdrv = NULL;
1682 struct ipp_context *ctx = get_ipp_context(dev); 1640 struct ipp_context *ctx = get_ipp_context(dev);
1683 struct drm_exynos_ipp_cmd_node *c_node, *tc_node; 1641 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1684 int count = 0; 1642 int count = 0;
1685 1643
1686 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
1687
1688 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1644 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1689 mutex_lock(&ippdrv->cmd_lock); 1645 mutex_lock(&ippdrv->cmd_lock);
1690 list_for_each_entry_safe(c_node, tc_node, 1646 list_for_each_entry_safe(c_node, tc_node,
@@ -1692,7 +1648,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1692 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", 1648 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1693 count++, (int)ippdrv); 1649 count++, (int)ippdrv);
1694 1650
1695 if (c_node->dev == file_priv->ipp_dev) { 1651 if (c_node->filp == file) {
1696 /* 1652 /*
1697 * userland goto unnormal state. process killed. 1653 * userland goto unnormal state. process killed.
1698 * and close the file. 1654 * and close the file.
@@ -1808,63 +1764,12 @@ static int ipp_remove(struct platform_device *pdev)
1808 return 0; 1764 return 0;
1809} 1765}
1810 1766
1811static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1812{
1813 DRM_DEBUG_KMS("enable[%d]\n", enable);
1814
1815 return 0;
1816}
1817
1818#ifdef CONFIG_PM_SLEEP
1819static int ipp_suspend(struct device *dev)
1820{
1821 struct ipp_context *ctx = get_ipp_context(dev);
1822
1823 if (pm_runtime_suspended(dev))
1824 return 0;
1825
1826 return ipp_power_ctrl(ctx, false);
1827}
1828
1829static int ipp_resume(struct device *dev)
1830{
1831 struct ipp_context *ctx = get_ipp_context(dev);
1832
1833 if (!pm_runtime_suspended(dev))
1834 return ipp_power_ctrl(ctx, true);
1835
1836 return 0;
1837}
1838#endif
1839
1840#ifdef CONFIG_PM_RUNTIME
1841static int ipp_runtime_suspend(struct device *dev)
1842{
1843 struct ipp_context *ctx = get_ipp_context(dev);
1844
1845 return ipp_power_ctrl(ctx, false);
1846}
1847
1848static int ipp_runtime_resume(struct device *dev)
1849{
1850 struct ipp_context *ctx = get_ipp_context(dev);
1851
1852 return ipp_power_ctrl(ctx, true);
1853}
1854#endif
1855
1856static const struct dev_pm_ops ipp_pm_ops = {
1857 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1858 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1859};
1860
1861struct platform_driver ipp_driver = { 1767struct platform_driver ipp_driver = {
1862 .probe = ipp_probe, 1768 .probe = ipp_probe,
1863 .remove = ipp_remove, 1769 .remove = ipp_remove,
1864 .driver = { 1770 .driver = {
1865 .name = "exynos-drm-ipp", 1771 .name = "exynos-drm-ipp",
1866 .owner = THIS_MODULE, 1772 .owner = THIS_MODULE,
1867 .pm = &ipp_pm_ops,
1868 }, 1773 },
1869}; 1774};
1870 1775
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 6f48d62aeb30..2a61547a39d0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -48,7 +48,6 @@ struct drm_exynos_ipp_cmd_work {
48/* 48/*
49 * A structure of command node. 49 * A structure of command node.
50 * 50 *
51 * @dev: IPP device.
52 * @list: list head to command queue information. 51 * @list: list head to command queue information.
53 * @event_list: list head of event. 52 * @event_list: list head of event.
54 * @mem_list: list head to source,destination memory queue information. 53 * @mem_list: list head to source,destination memory queue information.
@@ -62,9 +61,9 @@ struct drm_exynos_ipp_cmd_work {
62 * @stop_work: stop command work structure. 61 * @stop_work: stop command work structure.
63 * @event_work: event work structure. 62 * @event_work: event work structure.
64 * @state: state of command node. 63 * @state: state of command node.
64 * @filp: associated file pointer.
65 */ 65 */
66struct drm_exynos_ipp_cmd_node { 66struct drm_exynos_ipp_cmd_node {
67 struct device *dev;
68 struct list_head list; 67 struct list_head list;
69 struct list_head event_list; 68 struct list_head event_list;
70 struct list_head mem_list[EXYNOS_DRM_OPS_MAX]; 69 struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
@@ -78,6 +77,7 @@ struct drm_exynos_ipp_cmd_node {
78 struct drm_exynos_ipp_cmd_work *stop_work; 77 struct drm_exynos_ipp_cmd_work *stop_work;
79 struct drm_exynos_ipp_event_work *event_work; 78 struct drm_exynos_ipp_event_work *event_work;
80 enum drm_exynos_ipp_state state; 79 enum drm_exynos_ipp_state state;
80 struct drm_file *filp;
81}; 81};
82 82
83/* 83/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 8371cbd7631d..c7045a663763 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -139,6 +139,8 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
139 overlay->crtc_x, overlay->crtc_y, 139 overlay->crtc_x, overlay->crtc_y,
140 overlay->crtc_width, overlay->crtc_height); 140 overlay->crtc_width, overlay->crtc_height);
141 141
142 plane->crtc = crtc;
143
142 exynos_drm_crtc_plane_mode_set(crtc, overlay); 144 exynos_drm_crtc_plane_mode_set(crtc, overlay);
143 145
144 return 0; 146 return 0;
@@ -187,8 +189,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
187 if (ret < 0) 189 if (ret < 0)
188 return ret; 190 return ret;
189 191
190 plane->crtc = crtc;
191
192 exynos_plane_commit(plane); 192 exynos_plane_commit(plane);
193 exynos_plane_dpms(plane, DRM_MODE_DPMS_ON); 193 exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
194 194
@@ -254,25 +254,26 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
254} 254}
255 255
256struct drm_plane *exynos_plane_init(struct drm_device *dev, 256struct drm_plane *exynos_plane_init(struct drm_device *dev,
257 unsigned long possible_crtcs, bool priv) 257 unsigned long possible_crtcs,
258 enum drm_plane_type type)
258{ 259{
259 struct exynos_plane *exynos_plane; 260 struct exynos_plane *exynos_plane;
260 int err; 261 int err;
261 262
262 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL); 263 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
263 if (!exynos_plane) 264 if (!exynos_plane)
264 return NULL; 265 return ERR_PTR(-ENOMEM);
265 266
266 err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs, 267 err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs,
267 &exynos_plane_funcs, formats, ARRAY_SIZE(formats), 268 &exynos_plane_funcs, formats,
268 priv); 269 ARRAY_SIZE(formats), type);
269 if (err) { 270 if (err) {
270 DRM_ERROR("failed to initialize plane\n"); 271 DRM_ERROR("failed to initialize plane\n");
271 kfree(exynos_plane); 272 kfree(exynos_plane);
272 return NULL; 273 return ERR_PTR(err);
273 } 274 }
274 275
275 if (priv) 276 if (type == DRM_PLANE_TYPE_PRIMARY)
276 exynos_plane->overlay.zpos = DEFAULT_ZPOS; 277 exynos_plane->overlay.zpos = DEFAULT_ZPOS;
277 else 278 else
278 exynos_plane_attach_zpos_property(&exynos_plane->base); 279 exynos_plane_attach_zpos_property(&exynos_plane->base);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 84d464c90d3d..0d1986b115f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -17,4 +17,5 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
17void exynos_plane_commit(struct drm_plane *plane); 17void exynos_plane_commit(struct drm_plane *plane);
18void exynos_plane_dpms(struct drm_plane *plane, int mode); 18void exynos_plane_dpms(struct drm_plane *plane, int mode);
19struct drm_plane *exynos_plane_init(struct drm_device *dev, 19struct drm_plane *exynos_plane_init(struct drm_device *dev,
20 unsigned long possible_crtcs, bool priv); 20 unsigned long possible_crtcs,
21 enum drm_plane_type type);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 55af6b41c1df..b6a37d4f5b13 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -156,8 +156,7 @@ static irqreturn_t rotator_irq_handler(int irq, void *arg)
156 event_work->ippdrv = ippdrv; 156 event_work->ippdrv = ippdrv;
157 event_work->buf_id[EXYNOS_DRM_OPS_DST] = 157 event_work->buf_id[EXYNOS_DRM_OPS_DST] =
158 rot->cur_buf_id[EXYNOS_DRM_OPS_DST]; 158 rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
159 queue_work(ippdrv->event_workq, 159 queue_work(ippdrv->event_workq, &event_work->work);
160 (struct work_struct *)event_work);
161 } else { 160 } else {
162 DRM_ERROR("the SFR is set illegally\n"); 161 DRM_ERROR("the SFR is set illegally\n");
163 } 162 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 9528d81d8004..d565207040a2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -303,23 +303,6 @@ static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
303 mgr->drm_dev = ctx->drm_dev = drm_dev; 303 mgr->drm_dev = ctx->drm_dev = drm_dev;
304 mgr->pipe = ctx->pipe = priv->pipe++; 304 mgr->pipe = ctx->pipe = priv->pipe++;
305 305
306 /*
307 * enable drm irq mode.
308 * - with irq_enabled = 1, we can use the vblank feature.
309 *
310 * P.S. note that we wouldn't use drm irq handler but
311 * just specific driver own one instead because
312 * drm framework supports only one irq handler.
313 */
314 drm_dev->irq_enabled = 1;
315
316 /*
317 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
318 * by drm timer once a current process gives up ownership of
319 * vblank event.(after drm_vblank_put function is called)
320 */
321 drm_dev->vblank_disable_allowed = 1;
322
323 return 0; 306 return 0;
324} 307}
325 308
@@ -648,7 +631,6 @@ static int vidi_remove(struct platform_device *pdev)
648 struct exynos_drm_manager *mgr = platform_get_drvdata(pdev); 631 struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
649 struct vidi_context *ctx = mgr->ctx; 632 struct vidi_context *ctx = mgr->ctx;
650 struct drm_encoder *encoder = ctx->encoder; 633 struct drm_encoder *encoder = ctx->encoder;
651 struct drm_crtc *crtc = mgr->crtc;
652 634
653 if (ctx->raw_edid != (struct edid *)fake_edid_info) { 635 if (ctx->raw_edid != (struct edid *)fake_edid_info) {
654 kfree(ctx->raw_edid); 636 kfree(ctx->raw_edid);
@@ -657,7 +639,6 @@ static int vidi_remove(struct platform_device *pdev)
657 return -EINVAL; 639 return -EINVAL;
658 } 640 }
659 641
660 crtc->funcs->destroy(crtc);
661 encoder->funcs->destroy(encoder); 642 encoder->funcs->destroy(encoder);
662 drm_connector_cleanup(&ctx->connector); 643 drm_connector_cleanup(&ctx->connector);
663 644
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 562966db2aa1..7910fb37d9bb 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1040,6 +1040,8 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
1040 1040
1041static void hdmi_connector_destroy(struct drm_connector *connector) 1041static void hdmi_connector_destroy(struct drm_connector *connector)
1042{ 1042{
1043 drm_connector_unregister(connector);
1044 drm_connector_cleanup(connector);
1043} 1045}
1044 1046
1045static struct drm_connector_funcs hdmi_connector_funcs = { 1047static struct drm_connector_funcs hdmi_connector_funcs = {
@@ -2314,8 +2316,8 @@ static void hdmi_unbind(struct device *dev, struct device *master, void *data)
2314 struct drm_encoder *encoder = display->encoder; 2316 struct drm_encoder *encoder = display->encoder;
2315 struct hdmi_context *hdata = display->ctx; 2317 struct hdmi_context *hdata = display->ctx;
2316 2318
2319 hdmi_connector_destroy(&hdata->connector);
2317 encoder->funcs->destroy(encoder); 2320 encoder->funcs->destroy(encoder);
2318 drm_connector_cleanup(&hdata->connector);
2319} 2321}
2320 2322
2321static const struct component_ops hdmi_component_ops = { 2323static const struct component_ops hdmi_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e8b4ec84b312..a41c84ee3a2d 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1302,15 +1302,12 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
1302static void mixer_unbind(struct device *dev, struct device *master, void *data) 1302static void mixer_unbind(struct device *dev, struct device *master, void *data)
1303{ 1303{
1304 struct exynos_drm_manager *mgr = dev_get_drvdata(dev); 1304 struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
1305 struct drm_crtc *crtc = mgr->crtc;
1306 1305
1307 dev_info(dev, "remove successful\n"); 1306 dev_info(dev, "remove successful\n");
1308 1307
1309 mixer_mgr_remove(mgr); 1308 mixer_mgr_remove(mgr);
1310 1309
1311 pm_runtime_disable(dev); 1310 pm_runtime_disable(dev);
1312
1313 crtc->funcs->destroy(crtc);
1314} 1311}
1315 1312
1316static const struct component_ops mixer_component_ops = { 1313static const struct component_ops mixer_component_ops = {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index a4cc0e60a1be..9f158eab517a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1089,7 +1089,7 @@ static char *link_train_names[] = {
1089}; 1089};
1090#endif 1090#endif
1091 1091
1092#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 1092#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
1093/* 1093/*
1094static uint8_t 1094static uint8_t
1095cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing) 1095cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
@@ -1276,7 +1276,7 @@ cdv_intel_dp_set_vswing_premph(struct gma_encoder *encoder, uint8_t signal_level
1276 cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]); 1276 cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]);
1277 1277
1278 /* ;gfx_dpio_set_reg(0x814c, 0x40802040) */ 1278 /* ;gfx_dpio_set_reg(0x814c, 0x40802040) */
1279 if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_1200) 1279 if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
1280 cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040); 1280 cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040);
1281 else 1281 else
1282 cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040); 1282 cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index d0dd3bea8aa5..ddd90ddbc200 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -540,7 +540,8 @@ static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
540static int psbfb_probe(struct drm_fb_helper *helper, 540static int psbfb_probe(struct drm_fb_helper *helper,
541 struct drm_fb_helper_surface_size *sizes) 541 struct drm_fb_helper_surface_size *sizes)
542{ 542{
543 struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper; 543 struct psb_fbdev *psb_fbdev =
544 container_of(helper, struct psb_fbdev, psb_fb_helper);
544 struct drm_device *dev = psb_fbdev->psb_fb_helper.dev; 545 struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
545 struct drm_psb_private *dev_priv = dev->dev_private; 546 struct drm_psb_private *dev_priv = dev->dev_private;
546 int bytespp; 547 int bytespp;
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index f5860a739bd8..cdbb350c9d5d 100644
--- a/drivers/gpu/drm/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -21,6 +21,7 @@
21#define _PSB_GTT_H_ 21#define _PSB_GTT_H_
22 22
23#include <drm/drmP.h> 23#include <drm/drmP.h>
24#include <drm/drm_gem.h>
24 25
25/* This wants cleaning up with respect to the psb_dev and un-needed stuff */ 26/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
26struct psb_gtt { 27struct psb_gtt {
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index d3497348c4d5..63bde4e86c6a 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -116,30 +116,30 @@ parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
116 116
117 switch (edp_link_params->preemphasis) { 117 switch (edp_link_params->preemphasis) {
118 case 0: 118 case 0:
119 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; 119 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
120 break; 120 break;
121 case 1: 121 case 1:
122 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; 122 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
123 break; 123 break;
124 case 2: 124 case 2:
125 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; 125 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
126 break; 126 break;
127 case 3: 127 case 3:
128 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; 128 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
129 break; 129 break;
130 } 130 }
131 switch (edp_link_params->vswing) { 131 switch (edp_link_params->vswing) {
132 case 0: 132 case 0:
133 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; 133 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
134 break; 134 break;
135 case 1: 135 case 1:
136 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; 136 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
137 break; 137 break;
138 case 2: 138 case 2:
139 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; 139 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
140 break; 140 break;
141 case 3: 141 case 3:
142 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; 142 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
143 break; 143 break;
144 } 144 }
145 DRM_DEBUG_KMS("VBT reports EDP: VSwing %d, Preemph %d\n", 145 DRM_DEBUG_KMS("VBT reports EDP: VSwing %d, Preemph %d\n",
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index eec993f93b1a..6ec3a905fdd2 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -476,6 +476,7 @@ static struct drm_driver driver = {
476 .unload = psb_driver_unload, 476 .unload = psb_driver_unload,
477 .lastclose = psb_driver_lastclose, 477 .lastclose = psb_driver_lastclose,
478 .preclose = psb_driver_preclose, 478 .preclose = psb_driver_preclose,
479 .set_busid = drm_pci_set_busid,
479 480
480 .num_ioctls = ARRAY_SIZE(psb_ioctls), 481 .num_ioctls = ARRAY_SIZE(psb_ioctls),
481 .device_is_agp = psb_driver_device_is_agp, 482 .device_is_agp = psb_driver_device_is_agp,
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index bae897de9468..d91856779beb 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -213,7 +213,7 @@ static int i810_dma_cleanup(struct drm_device *dev)
213 (drm_i810_private_t *) dev->dev_private; 213 (drm_i810_private_t *) dev->dev_private;
214 214
215 if (dev_priv->ring.virtual_start) 215 if (dev_priv->ring.virtual_start)
216 drm_core_ioremapfree(&dev_priv->ring.map, dev); 216 drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
217 if (dev_priv->hw_status_page) { 217 if (dev_priv->hw_status_page) {
218 pci_free_consistent(dev->pdev, PAGE_SIZE, 218 pci_free_consistent(dev->pdev, PAGE_SIZE,
219 dev_priv->hw_status_page, 219 dev_priv->hw_status_page,
@@ -227,7 +227,7 @@ static int i810_dma_cleanup(struct drm_device *dev)
227 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 227 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
228 228
229 if (buf_priv->kernel_virtual && buf->total) 229 if (buf_priv->kernel_virtual && buf->total)
230 drm_core_ioremapfree(&buf_priv->map, dev); 230 drm_legacy_ioremapfree(&buf_priv->map, dev);
231 } 231 }
232 } 232 }
233 return 0; 233 return 0;
@@ -306,7 +306,7 @@ static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_pr
306 buf_priv->map.flags = 0; 306 buf_priv->map.flags = 0;
307 buf_priv->map.mtrr = 0; 307 buf_priv->map.mtrr = 0;
308 308
309 drm_core_ioremap(&buf_priv->map, dev); 309 drm_legacy_ioremap(&buf_priv->map, dev);
310 buf_priv->kernel_virtual = buf_priv->map.handle; 310 buf_priv->kernel_virtual = buf_priv->map.handle;
311 311
312 } 312 }
@@ -334,7 +334,7 @@ static int i810_dma_initialize(struct drm_device *dev,
334 DRM_ERROR("can not find sarea!\n"); 334 DRM_ERROR("can not find sarea!\n");
335 return -EINVAL; 335 return -EINVAL;
336 } 336 }
337 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); 337 dev_priv->mmio_map = drm_legacy_findmap(dev, init->mmio_offset);
338 if (!dev_priv->mmio_map) { 338 if (!dev_priv->mmio_map) {
339 dev->dev_private = (void *)dev_priv; 339 dev->dev_private = (void *)dev_priv;
340 i810_dma_cleanup(dev); 340 i810_dma_cleanup(dev);
@@ -342,7 +342,7 @@ static int i810_dma_initialize(struct drm_device *dev,
342 return -EINVAL; 342 return -EINVAL;
343 } 343 }
344 dev->agp_buffer_token = init->buffers_offset; 344 dev->agp_buffer_token = init->buffers_offset;
345 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 345 dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset);
346 if (!dev->agp_buffer_map) { 346 if (!dev->agp_buffer_map) {
347 dev->dev_private = (void *)dev_priv; 347 dev->dev_private = (void *)dev_priv;
348 i810_dma_cleanup(dev); 348 i810_dma_cleanup(dev);
@@ -363,7 +363,7 @@ static int i810_dma_initialize(struct drm_device *dev,
363 dev_priv->ring.map.flags = 0; 363 dev_priv->ring.map.flags = 0;
364 dev_priv->ring.map.mtrr = 0; 364 dev_priv->ring.map.mtrr = 0;
365 365
366 drm_core_ioremap(&dev_priv->ring.map, dev); 366 drm_legacy_ioremap(&dev_priv->ring.map, dev);
367 367
368 if (dev_priv->ring.map.handle == NULL) { 368 if (dev_priv->ring.map.handle == NULL) {
369 dev->dev_private = (void *)dev_priv; 369 dev->dev_private = (void *)dev_priv;
@@ -1215,9 +1215,9 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1215 } 1215 }
1216 1216
1217 if (file_priv->master && file_priv->master->lock.hw_lock) { 1217 if (file_priv->master && file_priv->master->lock.hw_lock) {
1218 drm_idlelock_take(&file_priv->master->lock); 1218 drm_legacy_idlelock_take(&file_priv->master->lock);
1219 i810_driver_reclaim_buffers(dev, file_priv); 1219 i810_driver_reclaim_buffers(dev, file_priv);
1220 drm_idlelock_release(&file_priv->master->lock); 1220 drm_legacy_idlelock_release(&file_priv->master->lock);
1221 } else { 1221 } else {
1222 /* master disappeared, clean up stuff anyway and hope nothing 1222 /* master disappeared, clean up stuff anyway and hope nothing
1223 * goes wrong */ 1223 * goes wrong */
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 441ccf8f5bdc..44f4a131c8dd 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -47,7 +47,7 @@ static const struct file_operations i810_driver_fops = {
47 .open = drm_open, 47 .open = drm_open,
48 .release = drm_release, 48 .release = drm_release,
49 .unlocked_ioctl = drm_ioctl, 49 .unlocked_ioctl = drm_ioctl,
50 .mmap = drm_mmap, 50 .mmap = drm_legacy_mmap,
51 .poll = drm_poll, 51 .poll = drm_poll,
52#ifdef CONFIG_COMPAT 52#ifdef CONFIG_COMPAT
53 .compat_ioctl = drm_compat_ioctl, 53 .compat_ioctl = drm_compat_ioctl,
@@ -63,6 +63,7 @@ static struct drm_driver driver = {
63 .load = i810_driver_load, 63 .load = i810_driver_load,
64 .lastclose = i810_driver_lastclose, 64 .lastclose = i810_driver_lastclose,
65 .preclose = i810_driver_preclose, 65 .preclose = i810_driver_preclose,
66 .set_busid = drm_pci_set_busid,
66 .device_is_agp = i810_driver_device_is_agp, 67 .device_is_agp = i810_driver_device_is_agp,
67 .dma_quiescent = i810_driver_dma_quiescent, 68 .dma_quiescent = i810_driver_dma_quiescent,
68 .ioctls = i810_ioctls, 69 .ioctls = i810_ioctls,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index d4d16eddd651..93ec5dc4e7d3 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -32,6 +32,8 @@
32#ifndef _I810_DRV_H_ 32#ifndef _I810_DRV_H_
33#define _I810_DRV_H_ 33#define _I810_DRV_H_
34 34
35#include <drm/drm_legacy.h>
36
35/* General customization: 37/* General customization:
36 */ 38 */
37 39
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 91bd167e1cb7..c1dd485aeb6c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -31,6 +31,7 @@ i915-y += i915_cmd_parser.o \
31 i915_gpu_error.o \ 31 i915_gpu_error.o \
32 i915_irq.o \ 32 i915_irq.o \
33 i915_trace_points.o \ 33 i915_trace_points.o \
34 intel_lrc.o \
34 intel_ringbuffer.o \ 35 intel_ringbuffer.o \
35 intel_uncore.o 36 intel_uncore.o
36 37
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index 74f2af7c2d3e..441630434d34 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -60,16 +60,297 @@
60 60
61#define NS2501_REGC 0x0c 61#define NS2501_REGC 0x0c
62 62
63enum {
64 MODE_640x480,
65 MODE_800x600,
66 MODE_1024x768,
67};
68
69struct ns2501_reg {
70 uint8_t offset;
71 uint8_t value;
72};
73
74/*
75 * Magic values based on what the BIOS on
76 * Fujitsu-Siemens Lifebook S6010 programs (1024x768 panel).
77 */
78static const struct ns2501_reg regs_1024x768[][86] = {
79 [MODE_640x480] = {
80 [0] = { .offset = 0x0a, .value = 0x81, },
81 [1] = { .offset = 0x18, .value = 0x07, },
82 [2] = { .offset = 0x19, .value = 0x00, },
83 [3] = { .offset = 0x1a, .value = 0x00, },
84 [4] = { .offset = 0x1b, .value = 0x11, },
85 [5] = { .offset = 0x1c, .value = 0x54, },
86 [6] = { .offset = 0x1d, .value = 0x03, },
87 [7] = { .offset = 0x1e, .value = 0x02, },
88 [8] = { .offset = 0xf3, .value = 0x90, },
89 [9] = { .offset = 0xf9, .value = 0x00, },
90 [10] = { .offset = 0xc1, .value = 0x90, },
91 [11] = { .offset = 0xc2, .value = 0x00, },
92 [12] = { .offset = 0xc3, .value = 0x0f, },
93 [13] = { .offset = 0xc4, .value = 0x03, },
94 [14] = { .offset = 0xc5, .value = 0x16, },
95 [15] = { .offset = 0xc6, .value = 0x00, },
96 [16] = { .offset = 0xc7, .value = 0x02, },
97 [17] = { .offset = 0xc8, .value = 0x02, },
98 [18] = { .offset = 0xf4, .value = 0x00, },
99 [19] = { .offset = 0x80, .value = 0xff, },
100 [20] = { .offset = 0x81, .value = 0x07, },
101 [21] = { .offset = 0x82, .value = 0x3d, },
102 [22] = { .offset = 0x83, .value = 0x05, },
103 [23] = { .offset = 0x94, .value = 0x00, },
104 [24] = { .offset = 0x95, .value = 0x00, },
105 [25] = { .offset = 0x96, .value = 0x05, },
106 [26] = { .offset = 0x97, .value = 0x00, },
107 [27] = { .offset = 0x9a, .value = 0x88, },
108 [28] = { .offset = 0x9b, .value = 0x00, },
109 [29] = { .offset = 0x98, .value = 0x00, },
110 [30] = { .offset = 0x99, .value = 0x00, },
111 [31] = { .offset = 0xf7, .value = 0x88, },
112 [32] = { .offset = 0xf8, .value = 0x0a, },
113 [33] = { .offset = 0x9c, .value = 0x24, },
114 [34] = { .offset = 0x9d, .value = 0x00, },
115 [35] = { .offset = 0x9e, .value = 0x25, },
116 [36] = { .offset = 0x9f, .value = 0x03, },
117 [37] = { .offset = 0xa0, .value = 0x28, },
118 [38] = { .offset = 0xa1, .value = 0x01, },
119 [39] = { .offset = 0xa2, .value = 0x28, },
120 [40] = { .offset = 0xa3, .value = 0x05, },
121 [41] = { .offset = 0xb6, .value = 0x09, },
122 [42] = { .offset = 0xb8, .value = 0x00, },
123 [43] = { .offset = 0xb9, .value = 0xa0, },
124 [44] = { .offset = 0xba, .value = 0x00, },
125 [45] = { .offset = 0xbb, .value = 0x20, },
126 [46] = { .offset = 0x10, .value = 0x00, },
127 [47] = { .offset = 0x11, .value = 0xa0, },
128 [48] = { .offset = 0x12, .value = 0x02, },
129 [49] = { .offset = 0x20, .value = 0x00, },
130 [50] = { .offset = 0x22, .value = 0x00, },
131 [51] = { .offset = 0x23, .value = 0x00, },
132 [52] = { .offset = 0x24, .value = 0x00, },
133 [53] = { .offset = 0x25, .value = 0x00, },
134 [54] = { .offset = 0x8c, .value = 0x10, },
135 [55] = { .offset = 0x8d, .value = 0x02, },
136 [56] = { .offset = 0x8e, .value = 0x10, },
137 [57] = { .offset = 0x8f, .value = 0x00, },
138 [58] = { .offset = 0x90, .value = 0xff, },
139 [59] = { .offset = 0x91, .value = 0x07, },
140 [60] = { .offset = 0x92, .value = 0xa0, },
141 [61] = { .offset = 0x93, .value = 0x02, },
142 [62] = { .offset = 0xa5, .value = 0x00, },
143 [63] = { .offset = 0xa6, .value = 0x00, },
144 [64] = { .offset = 0xa7, .value = 0x00, },
145 [65] = { .offset = 0xa8, .value = 0x00, },
146 [66] = { .offset = 0xa9, .value = 0x04, },
147 [67] = { .offset = 0xaa, .value = 0x70, },
148 [68] = { .offset = 0xab, .value = 0x4f, },
149 [69] = { .offset = 0xac, .value = 0x00, },
150 [70] = { .offset = 0xa4, .value = 0x84, },
151 [71] = { .offset = 0x7e, .value = 0x18, },
152 [72] = { .offset = 0x84, .value = 0x00, },
153 [73] = { .offset = 0x85, .value = 0x00, },
154 [74] = { .offset = 0x86, .value = 0x00, },
155 [75] = { .offset = 0x87, .value = 0x00, },
156 [76] = { .offset = 0x88, .value = 0x00, },
157 [77] = { .offset = 0x89, .value = 0x00, },
158 [78] = { .offset = 0x8a, .value = 0x00, },
159 [79] = { .offset = 0x8b, .value = 0x00, },
160 [80] = { .offset = 0x26, .value = 0x00, },
161 [81] = { .offset = 0x27, .value = 0x00, },
162 [82] = { .offset = 0xad, .value = 0x00, },
163 [83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
164 [84] = { .offset = 0x41, .value = 0x00, },
165 [85] = { .offset = 0xc0, .value = 0x05, },
166 },
167 [MODE_800x600] = {
168 [0] = { .offset = 0x0a, .value = 0x81, },
169 [1] = { .offset = 0x18, .value = 0x07, },
170 [2] = { .offset = 0x19, .value = 0x00, },
171 [3] = { .offset = 0x1a, .value = 0x00, },
172 [4] = { .offset = 0x1b, .value = 0x19, },
173 [5] = { .offset = 0x1c, .value = 0x64, },
174 [6] = { .offset = 0x1d, .value = 0x02, },
175 [7] = { .offset = 0x1e, .value = 0x02, },
176 [8] = { .offset = 0xf3, .value = 0x90, },
177 [9] = { .offset = 0xf9, .value = 0x00, },
178 [10] = { .offset = 0xc1, .value = 0xd7, },
179 [11] = { .offset = 0xc2, .value = 0x00, },
180 [12] = { .offset = 0xc3, .value = 0xf8, },
181 [13] = { .offset = 0xc4, .value = 0x03, },
182 [14] = { .offset = 0xc5, .value = 0x1a, },
183 [15] = { .offset = 0xc6, .value = 0x00, },
184 [16] = { .offset = 0xc7, .value = 0x73, },
185 [17] = { .offset = 0xc8, .value = 0x02, },
186 [18] = { .offset = 0xf4, .value = 0x00, },
187 [19] = { .offset = 0x80, .value = 0x27, },
188 [20] = { .offset = 0x81, .value = 0x03, },
189 [21] = { .offset = 0x82, .value = 0x41, },
190 [22] = { .offset = 0x83, .value = 0x05, },
191 [23] = { .offset = 0x94, .value = 0x00, },
192 [24] = { .offset = 0x95, .value = 0x00, },
193 [25] = { .offset = 0x96, .value = 0x05, },
194 [26] = { .offset = 0x97, .value = 0x00, },
195 [27] = { .offset = 0x9a, .value = 0x88, },
196 [28] = { .offset = 0x9b, .value = 0x00, },
197 [29] = { .offset = 0x98, .value = 0x00, },
198 [30] = { .offset = 0x99, .value = 0x00, },
199 [31] = { .offset = 0xf7, .value = 0x88, },
200 [32] = { .offset = 0xf8, .value = 0x06, },
201 [33] = { .offset = 0x9c, .value = 0x23, },
202 [34] = { .offset = 0x9d, .value = 0x00, },
203 [35] = { .offset = 0x9e, .value = 0x25, },
204 [36] = { .offset = 0x9f, .value = 0x03, },
205 [37] = { .offset = 0xa0, .value = 0x28, },
206 [38] = { .offset = 0xa1, .value = 0x01, },
207 [39] = { .offset = 0xa2, .value = 0x28, },
208 [40] = { .offset = 0xa3, .value = 0x05, },
209 [41] = { .offset = 0xb6, .value = 0x09, },
210 [42] = { .offset = 0xb8, .value = 0x30, },
211 [43] = { .offset = 0xb9, .value = 0xc8, },
212 [44] = { .offset = 0xba, .value = 0x00, },
213 [45] = { .offset = 0xbb, .value = 0x20, },
214 [46] = { .offset = 0x10, .value = 0x20, },
215 [47] = { .offset = 0x11, .value = 0xc8, },
216 [48] = { .offset = 0x12, .value = 0x02, },
217 [49] = { .offset = 0x20, .value = 0x00, },
218 [50] = { .offset = 0x22, .value = 0x00, },
219 [51] = { .offset = 0x23, .value = 0x00, },
220 [52] = { .offset = 0x24, .value = 0x00, },
221 [53] = { .offset = 0x25, .value = 0x00, },
222 [54] = { .offset = 0x8c, .value = 0x10, },
223 [55] = { .offset = 0x8d, .value = 0x02, },
224 [56] = { .offset = 0x8e, .value = 0x04, },
225 [57] = { .offset = 0x8f, .value = 0x00, },
226 [58] = { .offset = 0x90, .value = 0xff, },
227 [59] = { .offset = 0x91, .value = 0x07, },
228 [60] = { .offset = 0x92, .value = 0xa0, },
229 [61] = { .offset = 0x93, .value = 0x02, },
230 [62] = { .offset = 0xa5, .value = 0x00, },
231 [63] = { .offset = 0xa6, .value = 0x00, },
232 [64] = { .offset = 0xa7, .value = 0x00, },
233 [65] = { .offset = 0xa8, .value = 0x00, },
234 [66] = { .offset = 0xa9, .value = 0x83, },
235 [67] = { .offset = 0xaa, .value = 0x40, },
236 [68] = { .offset = 0xab, .value = 0x32, },
237 [69] = { .offset = 0xac, .value = 0x00, },
238 [70] = { .offset = 0xa4, .value = 0x80, },
239 [71] = { .offset = 0x7e, .value = 0x18, },
240 [72] = { .offset = 0x84, .value = 0x00, },
241 [73] = { .offset = 0x85, .value = 0x00, },
242 [74] = { .offset = 0x86, .value = 0x00, },
243 [75] = { .offset = 0x87, .value = 0x00, },
244 [76] = { .offset = 0x88, .value = 0x00, },
245 [77] = { .offset = 0x89, .value = 0x00, },
246 [78] = { .offset = 0x8a, .value = 0x00, },
247 [79] = { .offset = 0x8b, .value = 0x00, },
248 [80] = { .offset = 0x26, .value = 0x00, },
249 [81] = { .offset = 0x27, .value = 0x00, },
250 [82] = { .offset = 0xad, .value = 0x00, },
251 [83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
252 [84] = { .offset = 0x41, .value = 0x00, },
253 [85] = { .offset = 0xc0, .value = 0x07, },
254 },
255 [MODE_1024x768] = {
256 [0] = { .offset = 0x0a, .value = 0x81, },
257 [1] = { .offset = 0x18, .value = 0x07, },
258 [2] = { .offset = 0x19, .value = 0x00, },
259 [3] = { .offset = 0x1a, .value = 0x00, },
260 [4] = { .offset = 0x1b, .value = 0x11, },
261 [5] = { .offset = 0x1c, .value = 0x54, },
262 [6] = { .offset = 0x1d, .value = 0x03, },
263 [7] = { .offset = 0x1e, .value = 0x02, },
264 [8] = { .offset = 0xf3, .value = 0x90, },
265 [9] = { .offset = 0xf9, .value = 0x00, },
266 [10] = { .offset = 0xc1, .value = 0x90, },
267 [11] = { .offset = 0xc2, .value = 0x00, },
268 [12] = { .offset = 0xc3, .value = 0x0f, },
269 [13] = { .offset = 0xc4, .value = 0x03, },
270 [14] = { .offset = 0xc5, .value = 0x16, },
271 [15] = { .offset = 0xc6, .value = 0x00, },
272 [16] = { .offset = 0xc7, .value = 0x02, },
273 [17] = { .offset = 0xc8, .value = 0x02, },
274 [18] = { .offset = 0xf4, .value = 0x00, },
275 [19] = { .offset = 0x80, .value = 0xff, },
276 [20] = { .offset = 0x81, .value = 0x07, },
277 [21] = { .offset = 0x82, .value = 0x3d, },
278 [22] = { .offset = 0x83, .value = 0x05, },
279 [23] = { .offset = 0x94, .value = 0x00, },
280 [24] = { .offset = 0x95, .value = 0x00, },
281 [25] = { .offset = 0x96, .value = 0x05, },
282 [26] = { .offset = 0x97, .value = 0x00, },
283 [27] = { .offset = 0x9a, .value = 0x88, },
284 [28] = { .offset = 0x9b, .value = 0x00, },
285 [29] = { .offset = 0x98, .value = 0x00, },
286 [30] = { .offset = 0x99, .value = 0x00, },
287 [31] = { .offset = 0xf7, .value = 0x88, },
288 [32] = { .offset = 0xf8, .value = 0x0a, },
289 [33] = { .offset = 0x9c, .value = 0x24, },
290 [34] = { .offset = 0x9d, .value = 0x00, },
291 [35] = { .offset = 0x9e, .value = 0x25, },
292 [36] = { .offset = 0x9f, .value = 0x03, },
293 [37] = { .offset = 0xa0, .value = 0x28, },
294 [38] = { .offset = 0xa1, .value = 0x01, },
295 [39] = { .offset = 0xa2, .value = 0x28, },
296 [40] = { .offset = 0xa3, .value = 0x05, },
297 [41] = { .offset = 0xb6, .value = 0x09, },
298 [42] = { .offset = 0xb8, .value = 0x00, },
299 [43] = { .offset = 0xb9, .value = 0xa0, },
300 [44] = { .offset = 0xba, .value = 0x00, },
301 [45] = { .offset = 0xbb, .value = 0x20, },
302 [46] = { .offset = 0x10, .value = 0x00, },
303 [47] = { .offset = 0x11, .value = 0xa0, },
304 [48] = { .offset = 0x12, .value = 0x02, },
305 [49] = { .offset = 0x20, .value = 0x00, },
306 [50] = { .offset = 0x22, .value = 0x00, },
307 [51] = { .offset = 0x23, .value = 0x00, },
308 [52] = { .offset = 0x24, .value = 0x00, },
309 [53] = { .offset = 0x25, .value = 0x00, },
310 [54] = { .offset = 0x8c, .value = 0x10, },
311 [55] = { .offset = 0x8d, .value = 0x02, },
312 [56] = { .offset = 0x8e, .value = 0x10, },
313 [57] = { .offset = 0x8f, .value = 0x00, },
314 [58] = { .offset = 0x90, .value = 0xff, },
315 [59] = { .offset = 0x91, .value = 0x07, },
316 [60] = { .offset = 0x92, .value = 0xa0, },
317 [61] = { .offset = 0x93, .value = 0x02, },
318 [62] = { .offset = 0xa5, .value = 0x00, },
319 [63] = { .offset = 0xa6, .value = 0x00, },
320 [64] = { .offset = 0xa7, .value = 0x00, },
321 [65] = { .offset = 0xa8, .value = 0x00, },
322 [66] = { .offset = 0xa9, .value = 0x04, },
323 [67] = { .offset = 0xaa, .value = 0x70, },
324 [68] = { .offset = 0xab, .value = 0x4f, },
325 [69] = { .offset = 0xac, .value = 0x00, },
326 [70] = { .offset = 0xa4, .value = 0x84, },
327 [71] = { .offset = 0x7e, .value = 0x18, },
328 [72] = { .offset = 0x84, .value = 0x00, },
329 [73] = { .offset = 0x85, .value = 0x00, },
330 [74] = { .offset = 0x86, .value = 0x00, },
331 [75] = { .offset = 0x87, .value = 0x00, },
332 [76] = { .offset = 0x88, .value = 0x00, },
333 [77] = { .offset = 0x89, .value = 0x00, },
334 [78] = { .offset = 0x8a, .value = 0x00, },
335 [79] = { .offset = 0x8b, .value = 0x00, },
336 [80] = { .offset = 0x26, .value = 0x00, },
337 [81] = { .offset = 0x27, .value = 0x00, },
338 [82] = { .offset = 0xad, .value = 0x00, },
339 [83] = { .offset = 0x08, .value = 0x34, }, /* 0x35 */
340 [84] = { .offset = 0x41, .value = 0x00, },
341 [85] = { .offset = 0xc0, .value = 0x01, },
342 },
343};
344
345static const struct ns2501_reg regs_init[] = {
346 [0] = { .offset = 0x35, .value = 0xff, },
347 [1] = { .offset = 0x34, .value = 0x00, },
348 [2] = { .offset = 0x08, .value = 0x30, },
349};
350
63struct ns2501_priv { 351struct ns2501_priv {
64 //I2CDevRec d;
65 bool quiet; 352 bool quiet;
66 int reg_8_shadow; 353 const struct ns2501_reg *regs;
67 int reg_8_set;
68 // Shadow registers for i915
69 int dvoc;
70 int pll_a;
71 int srcdim;
72 int fw_blc;
73}; 354};
74 355
75#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr)) 356#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
@@ -205,11 +486,9 @@ static bool ns2501_init(struct intel_dvo_device *dvo,
205 goto out; 486 goto out;
206 } 487 }
207 ns->quiet = false; 488 ns->quiet = false;
208 ns->reg_8_set = 0;
209 ns->reg_8_shadow =
210 NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
211 489
212 DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n"); 490 DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
491
213 return true; 492 return true;
214 493
215out: 494out:
@@ -242,9 +521,9 @@ static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
242 * of the panel in here so we could always accept it 521 * of the panel in here so we could always accept it
243 * by disabling the scaler. 522 * by disabling the scaler.
244 */ 523 */
245 if ((mode->hdisplay == 800 && mode->vdisplay == 600) || 524 if ((mode->hdisplay == 640 && mode->vdisplay == 480 && mode->clock == 25175) ||
246 (mode->hdisplay == 640 && mode->vdisplay == 480) || 525 (mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 40000) ||
247 (mode->hdisplay == 1024 && mode->vdisplay == 768)) { 526 (mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 65000)) {
248 return MODE_OK; 527 return MODE_OK;
249 } else { 528 } else {
250 return MODE_ONE_SIZE; /* Is this a reasonable error? */ 529 return MODE_ONE_SIZE; /* Is this a reasonable error? */
@@ -255,180 +534,30 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
255 struct drm_display_mode *mode, 534 struct drm_display_mode *mode,
256 struct drm_display_mode *adjusted_mode) 535 struct drm_display_mode *adjusted_mode)
257{ 536{
258 bool ok;
259 int retries = 10;
260 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); 537 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
538 int mode_idx, i;
261 539
262 DRM_DEBUG_KMS 540 DRM_DEBUG_KMS
263 ("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n", 541 ("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
264 mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal); 542 mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
265 543
266 /* 544 if (mode->hdisplay == 640 && mode->vdisplay == 480)
267 * Where do I find the native resolution for which scaling is not required??? 545 mode_idx = MODE_640x480;
268 * 546 else if (mode->hdisplay == 800 && mode->vdisplay == 600)
269 * First trigger the DVO on as otherwise the chip does not appear on the i2c 547 mode_idx = MODE_800x600;
270 * bus. 548 else if (mode->hdisplay == 1024 && mode->vdisplay == 768)
271 */ 549 mode_idx = MODE_1024x768;
272 do { 550 else
273 ok = true; 551 return;
274
275 if (mode->hdisplay == 800 && mode->vdisplay == 600) {
276 /* mode 277 */
277 ns->reg_8_shadow &= ~NS2501_8_BPAS;
278 DRM_DEBUG_KMS("switching to 800x600\n");
279
280 /*
281 * No, I do not know where this data comes from.
282 * It is just what the video bios left in the DVO, so
283 * I'm just copying it here over.
284 * This also means that I cannot support any other modes
285 * except the ones supported by the bios.
286 */
287 ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
288 ok &= ns2501_writeb(dvo, 0x1b, 0x19);
289 ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
290 ok &= ns2501_writeb(dvo, 0x1d, 0x02);
291
292 ok &= ns2501_writeb(dvo, 0x34, 0x03);
293 ok &= ns2501_writeb(dvo, 0x35, 0xff);
294 552
295 ok &= ns2501_writeb(dvo, 0x80, 0x27); 553 /* Hopefully doing it every time won't hurt... */
296 ok &= ns2501_writeb(dvo, 0x81, 0x03); 554 for (i = 0; i < ARRAY_SIZE(regs_init); i++)
297 ok &= ns2501_writeb(dvo, 0x82, 0x41); 555 ns2501_writeb(dvo, regs_init[i].offset, regs_init[i].value);
298 ok &= ns2501_writeb(dvo, 0x83, 0x05);
299 556
300 ok &= ns2501_writeb(dvo, 0x8d, 0x02); 557 ns->regs = regs_1024x768[mode_idx];
301 ok &= ns2501_writeb(dvo, 0x8e, 0x04);
302 ok &= ns2501_writeb(dvo, 0x8f, 0x00);
303 558
304 ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */ 559 for (i = 0; i < 84; i++)
305 ok &= ns2501_writeb(dvo, 0x91, 0x07); 560 ns2501_writeb(dvo, ns->regs[i].offset, ns->regs[i].value);
306 ok &= ns2501_writeb(dvo, 0x94, 0x00);
307 ok &= ns2501_writeb(dvo, 0x95, 0x00);
308
309 ok &= ns2501_writeb(dvo, 0x96, 0x00);
310
311 ok &= ns2501_writeb(dvo, 0x99, 0x00);
312 ok &= ns2501_writeb(dvo, 0x9a, 0x88);
313
314 ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
315 ok &= ns2501_writeb(dvo, 0x9d, 0x00);
316 ok &= ns2501_writeb(dvo, 0x9e, 0x25);
317 ok &= ns2501_writeb(dvo, 0x9f, 0x03);
318
319 ok &= ns2501_writeb(dvo, 0xa4, 0x80);
320
321 ok &= ns2501_writeb(dvo, 0xb6, 0x00);
322
323 ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
324 ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
325
326 ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
327 ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
328
329 ok &= ns2501_writeb(dvo, 0xc2, 0x00);
330 ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
331
332 ok &= ns2501_writeb(dvo, 0xc4, 0x03);
333 ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
334
335 ok &= ns2501_writeb(dvo, 0xc6, 0x00);
336 ok &= ns2501_writeb(dvo, 0xc7, 0x73);
337 ok &= ns2501_writeb(dvo, 0xc8, 0x02);
338
339 } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
340 /* mode 274 */
341 DRM_DEBUG_KMS("switching to 640x480\n");
342 /*
343 * No, I do not know where this data comes from.
344 * It is just what the video bios left in the DVO, so
345 * I'm just copying it here over.
346 * This also means that I cannot support any other modes
347 * except the ones supported by the bios.
348 */
349 ns->reg_8_shadow &= ~NS2501_8_BPAS;
350
351 ok &= ns2501_writeb(dvo, 0x11, 0xa0);
352 ok &= ns2501_writeb(dvo, 0x1b, 0x11);
353 ok &= ns2501_writeb(dvo, 0x1c, 0x54);
354 ok &= ns2501_writeb(dvo, 0x1d, 0x03);
355
356 ok &= ns2501_writeb(dvo, 0x34, 0x03);
357 ok &= ns2501_writeb(dvo, 0x35, 0xff);
358
359 ok &= ns2501_writeb(dvo, 0x80, 0xff);
360 ok &= ns2501_writeb(dvo, 0x81, 0x07);
361 ok &= ns2501_writeb(dvo, 0x82, 0x3d);
362 ok &= ns2501_writeb(dvo, 0x83, 0x05);
363
364 ok &= ns2501_writeb(dvo, 0x8d, 0x02);
365 ok &= ns2501_writeb(dvo, 0x8e, 0x10);
366 ok &= ns2501_writeb(dvo, 0x8f, 0x00);
367
368 ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
369 ok &= ns2501_writeb(dvo, 0x91, 0x07);
370 ok &= ns2501_writeb(dvo, 0x94, 0x00);
371 ok &= ns2501_writeb(dvo, 0x95, 0x00);
372
373 ok &= ns2501_writeb(dvo, 0x96, 0x05);
374
375 ok &= ns2501_writeb(dvo, 0x99, 0x00);
376 ok &= ns2501_writeb(dvo, 0x9a, 0x88);
377
378 ok &= ns2501_writeb(dvo, 0x9c, 0x24);
379 ok &= ns2501_writeb(dvo, 0x9d, 0x00);
380 ok &= ns2501_writeb(dvo, 0x9e, 0x25);
381 ok &= ns2501_writeb(dvo, 0x9f, 0x03);
382
383 ok &= ns2501_writeb(dvo, 0xa4, 0x84);
384
385 ok &= ns2501_writeb(dvo, 0xb6, 0x09);
386
387 ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
388 ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
389
390 ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
391 ok &= ns2501_writeb(dvo, 0xc1, 0x90);
392
393 ok &= ns2501_writeb(dvo, 0xc2, 0x00);
394 ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
395
396 ok &= ns2501_writeb(dvo, 0xc4, 0x03);
397 ok &= ns2501_writeb(dvo, 0xc5, 0x16);
398
399 ok &= ns2501_writeb(dvo, 0xc6, 0x00);
400 ok &= ns2501_writeb(dvo, 0xc7, 0x02);
401 ok &= ns2501_writeb(dvo, 0xc8, 0x02);
402
403 } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
404 /* mode 280 */
405 DRM_DEBUG_KMS("switching to 1024x768\n");
406 /*
407 * This might or might not work, actually. I'm silently
408 * assuming here that the native panel resolution is
409 * 1024x768. If not, then this leaves the scaler disabled
410 * generating a picture that is likely not the expected.
411 *
412 * Problem is that I do not know where to take the panel
413 * dimensions from.
414 *
415 * Enable the bypass, scaling not required.
416 *
417 * The scaler registers are irrelevant here....
418 *
419 */
420 ns->reg_8_shadow |= NS2501_8_BPAS;
421 ok &= ns2501_writeb(dvo, 0x37, 0x44);
422 } else {
423 /*
424 * Data not known. Bummer!
425 * Hopefully, the code should not go here
426 * as mode_OK delivered no other modes.
427 */
428 ns->reg_8_shadow |= NS2501_8_BPAS;
429 }
430 ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
431 } while (!ok && retries--);
432} 561}
433 562
434/* set the NS2501 power state */ 563/* set the NS2501 power state */
@@ -439,60 +568,46 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
439 if (!ns2501_readb(dvo, NS2501_REG8, &ch)) 568 if (!ns2501_readb(dvo, NS2501_REG8, &ch))
440 return false; 569 return false;
441 570
442 if (ch & NS2501_8_PD) 571 return ch & NS2501_8_PD;
443 return true;
444 else
445 return false;
446} 572}
447 573
448/* set the NS2501 power state */ 574/* set the NS2501 power state */
449static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable) 575static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
450{ 576{
451 bool ok;
452 int retries = 10;
453 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); 577 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
454 unsigned char ch;
455 578
456 DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable); 579 DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
457 580
458 ch = ns->reg_8_shadow; 581 if (enable) {
582 if (WARN_ON(ns->regs[83].offset != 0x08 ||
583 ns->regs[84].offset != 0x41 ||
584 ns->regs[85].offset != 0xc0))
585 return;
459 586
460 if (enable) 587 ns2501_writeb(dvo, 0xc0, ns->regs[85].value | 0x08);
461 ch |= NS2501_8_PD;
462 else
463 ch &= ~NS2501_8_PD;
464
465 if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
466 ns->reg_8_set = 1;
467 ns->reg_8_shadow = ch;
468
469 do {
470 ok = true;
471 ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
472 ok &=
473 ns2501_writeb(dvo, 0x34,
474 enable ? 0x03 : 0x00);
475 ok &=
476 ns2501_writeb(dvo, 0x35,
477 enable ? 0xff : 0x00);
478 } while (!ok && retries--);
479 }
480}
481 588
482static void ns2501_dump_regs(struct intel_dvo_device *dvo) 589 ns2501_writeb(dvo, 0x41, ns->regs[84].value);
483{ 590
484 uint8_t val; 591 ns2501_writeb(dvo, 0x34, 0x01);
485 592 msleep(15);
486 ns2501_readb(dvo, NS2501_FREQ_LO, &val); 593
487 DRM_DEBUG_KMS("NS2501_FREQ_LO: 0x%02x\n", val); 594 ns2501_writeb(dvo, 0x08, 0x35);
488 ns2501_readb(dvo, NS2501_FREQ_HI, &val); 595 if (!(ns->regs[83].value & NS2501_8_BPAS))
489 DRM_DEBUG_KMS("NS2501_FREQ_HI: 0x%02x\n", val); 596 ns2501_writeb(dvo, 0x08, 0x31);
490 ns2501_readb(dvo, NS2501_REG8, &val); 597 msleep(200);
491 DRM_DEBUG_KMS("NS2501_REG8: 0x%02x\n", val); 598
492 ns2501_readb(dvo, NS2501_REG9, &val); 599 ns2501_writeb(dvo, 0x34, 0x03);
493 DRM_DEBUG_KMS("NS2501_REG9: 0x%02x\n", val); 600
494 ns2501_readb(dvo, NS2501_REGC, &val); 601 ns2501_writeb(dvo, 0xc0, ns->regs[85].value);
495 DRM_DEBUG_KMS("NS2501_REGC: 0x%02x\n", val); 602 } else {
603 ns2501_writeb(dvo, 0x34, 0x01);
604 msleep(200);
605
606 ns2501_writeb(dvo, 0x08, 0x34);
607 msleep(15);
608
609 ns2501_writeb(dvo, 0x34, 0x00);
610 }
496} 611}
497 612
498static void ns2501_destroy(struct intel_dvo_device *dvo) 613static void ns2501_destroy(struct intel_dvo_device *dvo)
@@ -512,6 +627,5 @@ struct intel_dvo_dev_ops ns2501_ops = {
512 .mode_set = ns2501_mode_set, 627 .mode_set = ns2501_mode_set,
513 .dpms = ns2501_dpms, 628 .dpms = ns2501_dpms,
514 .get_hw_state = ns2501_get_hw_state, 629 .get_hw_state = ns2501_get_hw_state,
515 .dump_regs = ns2501_dump_regs,
516 .destroy = ns2501_destroy, 630 .destroy = ns2501_destroy,
517}; 631};
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 4b7ed5289217..593b657d3e59 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -844,8 +844,6 @@ finish:
844 */ 844 */
845bool i915_needs_cmd_parser(struct intel_engine_cs *ring) 845bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
846{ 846{
847 struct drm_i915_private *dev_priv = ring->dev->dev_private;
848
849 if (!ring->needs_cmd_parser) 847 if (!ring->needs_cmd_parser)
850 return false; 848 return false;
851 849
@@ -854,7 +852,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
854 * disabled. That will cause all of the parser's PPGTT checks to 852 * disabled. That will cause all of the parser's PPGTT checks to
855 * fail. For now, disable parsing when PPGTT is off. 853 * fail. For now, disable parsing when PPGTT is off.
856 */ 854 */
857 if (!dev_priv->mm.aliasing_ppgtt) 855 if (USES_PPGTT(ring->dev))
858 return false; 856 return false;
859 857
860 return (i915.enable_cmd_parser == 1); 858 return (i915.enable_cmd_parser == 1);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9e737b771c40..063b44817e08 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -136,7 +136,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 obj->last_read_seqno, 136 obj->last_read_seqno,
137 obj->last_write_seqno, 137 obj->last_write_seqno,
138 obj->last_fenced_seqno, 138 obj->last_fenced_seqno,
139 i915_cache_level_str(obj->cache_level), 139 i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
140 obj->dirty ? " dirty" : "", 140 obj->dirty ? " dirty" : "",
141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
142 if (obj->base.name) 142 if (obj->base.name)
@@ -333,7 +333,7 @@ static int per_file_stats(int id, void *ptr, void *data)
333 } 333 }
334 334
335 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); 335 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
336 if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv) 336 if (ppgtt->file_priv != stats->file_priv)
337 continue; 337 continue;
338 338
339 if (obj->ring) /* XXX per-vma statistic */ 339 if (obj->ring) /* XXX per-vma statistic */
@@ -515,6 +515,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
515{ 515{
516 struct drm_info_node *node = m->private; 516 struct drm_info_node *node = m->private;
517 struct drm_device *dev = node->minor->dev; 517 struct drm_device *dev = node->minor->dev;
518 struct drm_i915_private *dev_priv = dev->dev_private;
518 unsigned long flags; 519 unsigned long flags;
519 struct intel_crtc *crtc; 520 struct intel_crtc *crtc;
520 int ret; 521 int ret;
@@ -534,6 +535,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
534 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 535 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
535 pipe, plane); 536 pipe, plane);
536 } else { 537 } else {
538 u32 addr;
539
537 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 540 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
538 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 541 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
539 pipe, plane); 542 pipe, plane);
@@ -541,23 +544,35 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
541 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 544 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
542 pipe, plane); 545 pipe, plane);
543 } 546 }
547 if (work->flip_queued_ring) {
548 seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n",
549 work->flip_queued_ring->name,
550 work->flip_queued_seqno,
551 dev_priv->next_seqno,
552 work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
553 i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
554 work->flip_queued_seqno));
555 } else
556 seq_printf(m, "Flip not associated with any ring\n");
557 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
558 work->flip_queued_vblank,
559 work->flip_ready_vblank,
560 drm_vblank_count(dev, crtc->pipe));
544 if (work->enable_stall_check) 561 if (work->enable_stall_check)
545 seq_puts(m, "Stall check enabled, "); 562 seq_puts(m, "Stall check enabled, ");
546 else 563 else
547 seq_puts(m, "Stall check waiting for page flip ioctl, "); 564 seq_puts(m, "Stall check waiting for page flip ioctl, ");
548 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 565 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
549 566
550 if (work->old_fb_obj) { 567 if (INTEL_INFO(dev)->gen >= 4)
551 struct drm_i915_gem_object *obj = work->old_fb_obj; 568 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
552 if (obj) 569 else
553 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", 570 addr = I915_READ(DSPADDR(crtc->plane));
554 i915_gem_obj_ggtt_offset(obj)); 571 seq_printf(m, "Current scanout address 0x%08x\n", addr);
555 } 572
556 if (work->pending_flip_obj) { 573 if (work->pending_flip_obj) {
557 struct drm_i915_gem_object *obj = work->pending_flip_obj; 574 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
558 if (obj) 575 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
559 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
560 i915_gem_obj_ggtt_offset(obj));
561 } 576 }
562 } 577 }
563 spin_unlock_irqrestore(&dev->event_lock, flags); 578 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -650,7 +665,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
650 intel_runtime_pm_get(dev_priv); 665 intel_runtime_pm_get(dev_priv);
651 666
652 if (IS_CHERRYVIEW(dev)) { 667 if (IS_CHERRYVIEW(dev)) {
653 int i;
654 seq_printf(m, "Master Interrupt Control:\t%08x\n", 668 seq_printf(m, "Master Interrupt Control:\t%08x\n",
655 I915_READ(GEN8_MASTER_IRQ)); 669 I915_READ(GEN8_MASTER_IRQ));
656 670
@@ -662,7 +676,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
662 I915_READ(VLV_IIR_RW)); 676 I915_READ(VLV_IIR_RW));
663 seq_printf(m, "Display IMR:\t%08x\n", 677 seq_printf(m, "Display IMR:\t%08x\n",
664 I915_READ(VLV_IMR)); 678 I915_READ(VLV_IMR));
665 for_each_pipe(pipe) 679 for_each_pipe(dev_priv, pipe)
666 seq_printf(m, "Pipe %c stat:\t%08x\n", 680 seq_printf(m, "Pipe %c stat:\t%08x\n",
667 pipe_name(pipe), 681 pipe_name(pipe),
668 I915_READ(PIPESTAT(pipe))); 682 I915_READ(PIPESTAT(pipe)));
@@ -702,7 +716,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
702 i, I915_READ(GEN8_GT_IER(i))); 716 i, I915_READ(GEN8_GT_IER(i)));
703 } 717 }
704 718
705 for_each_pipe(pipe) { 719 for_each_pipe(dev_priv, pipe) {
720 if (!intel_display_power_enabled(dev_priv,
721 POWER_DOMAIN_PIPE(pipe))) {
722 seq_printf(m, "Pipe %c power disabled\n",
723 pipe_name(pipe));
724 continue;
725 }
706 seq_printf(m, "Pipe %c IMR:\t%08x\n", 726 seq_printf(m, "Pipe %c IMR:\t%08x\n",
707 pipe_name(pipe), 727 pipe_name(pipe),
708 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 728 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
@@ -743,7 +763,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
743 I915_READ(VLV_IIR_RW)); 763 I915_READ(VLV_IIR_RW));
744 seq_printf(m, "Display IMR:\t%08x\n", 764 seq_printf(m, "Display IMR:\t%08x\n",
745 I915_READ(VLV_IMR)); 765 I915_READ(VLV_IMR));
746 for_each_pipe(pipe) 766 for_each_pipe(dev_priv, pipe)
747 seq_printf(m, "Pipe %c stat:\t%08x\n", 767 seq_printf(m, "Pipe %c stat:\t%08x\n",
748 pipe_name(pipe), 768 pipe_name(pipe),
749 I915_READ(PIPESTAT(pipe))); 769 I915_READ(PIPESTAT(pipe)));
@@ -779,7 +799,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
779 I915_READ(IIR)); 799 I915_READ(IIR));
780 seq_printf(m, "Interrupt mask: %08x\n", 800 seq_printf(m, "Interrupt mask: %08x\n",
781 I915_READ(IMR)); 801 I915_READ(IMR));
782 for_each_pipe(pipe) 802 for_each_pipe(dev_priv, pipe)
783 seq_printf(m, "Pipe %c stat: %08x\n", 803 seq_printf(m, "Pipe %c stat: %08x\n",
784 pipe_name(pipe), 804 pipe_name(pipe),
785 I915_READ(PIPESTAT(pipe))); 805 I915_READ(PIPESTAT(pipe)));
@@ -927,7 +947,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
927 ssize_t ret_count = 0; 947 ssize_t ret_count = 0;
928 int ret; 948 int ret;
929 949
930 ret = i915_error_state_buf_init(&error_str, count, *pos); 950 ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
931 if (ret) 951 if (ret)
932 return ret; 952 return ret;
933 953
@@ -1024,6 +1044,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1024 u32 rpstat, cagf, reqf; 1044 u32 rpstat, cagf, reqf;
1025 u32 rpupei, rpcurup, rpprevup; 1045 u32 rpupei, rpcurup, rpprevup;
1026 u32 rpdownei, rpcurdown, rpprevdown; 1046 u32 rpdownei, rpcurdown, rpprevdown;
1047 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1027 int max_freq; 1048 int max_freq;
1028 1049
1029 /* RPSTAT1 is in the GT power well */ 1050 /* RPSTAT1 is in the GT power well */
@@ -1061,12 +1082,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1061 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 1082 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
1062 mutex_unlock(&dev->struct_mutex); 1083 mutex_unlock(&dev->struct_mutex);
1063 1084
1085 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1086 pm_ier = I915_READ(GEN6_PMIER);
1087 pm_imr = I915_READ(GEN6_PMIMR);
1088 pm_isr = I915_READ(GEN6_PMISR);
1089 pm_iir = I915_READ(GEN6_PMIIR);
1090 pm_mask = I915_READ(GEN6_PMINTRMSK);
1091 } else {
1092 pm_ier = I915_READ(GEN8_GT_IER(2));
1093 pm_imr = I915_READ(GEN8_GT_IMR(2));
1094 pm_isr = I915_READ(GEN8_GT_ISR(2));
1095 pm_iir = I915_READ(GEN8_GT_IIR(2));
1096 pm_mask = I915_READ(GEN6_PMINTRMSK);
1097 }
1064 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1098 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1065 I915_READ(GEN6_PMIER), 1099 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1066 I915_READ(GEN6_PMIMR),
1067 I915_READ(GEN6_PMISR),
1068 I915_READ(GEN6_PMIIR),
1069 I915_READ(GEN6_PMINTRMSK));
1070 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1100 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1071 seq_printf(m, "Render p-state ratio: %d\n", 1101 seq_printf(m, "Render p-state ratio: %d\n",
1072 (gt_perf_status & 0xff00) >> 8); 1102 (gt_perf_status & 0xff00) >> 8);
@@ -1365,7 +1395,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
1365 1395
1366 if (IS_VALLEYVIEW(dev)) 1396 if (IS_VALLEYVIEW(dev))
1367 return vlv_drpc_info(m); 1397 return vlv_drpc_info(m);
1368 else if (IS_GEN6(dev) || IS_GEN7(dev)) 1398 else if (INTEL_INFO(dev)->gen >= 6)
1369 return gen6_drpc_info(m); 1399 return gen6_drpc_info(m);
1370 else 1400 else
1371 return ironlake_drpc_info(m); 1401 return ironlake_drpc_info(m);
@@ -1433,6 +1463,47 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1433 return 0; 1463 return 0;
1434} 1464}
1435 1465
1466static int i915_fbc_fc_get(void *data, u64 *val)
1467{
1468 struct drm_device *dev = data;
1469 struct drm_i915_private *dev_priv = dev->dev_private;
1470
1471 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1472 return -ENODEV;
1473
1474 drm_modeset_lock_all(dev);
1475 *val = dev_priv->fbc.false_color;
1476 drm_modeset_unlock_all(dev);
1477
1478 return 0;
1479}
1480
1481static int i915_fbc_fc_set(void *data, u64 val)
1482{
1483 struct drm_device *dev = data;
1484 struct drm_i915_private *dev_priv = dev->dev_private;
1485 u32 reg;
1486
1487 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1488 return -ENODEV;
1489
1490 drm_modeset_lock_all(dev);
1491
1492 reg = I915_READ(ILK_DPFC_CONTROL);
1493 dev_priv->fbc.false_color = val;
1494
1495 I915_WRITE(ILK_DPFC_CONTROL, val ?
1496 (reg | FBC_CTL_FALSE_COLOR) :
1497 (reg & ~FBC_CTL_FALSE_COLOR));
1498
1499 drm_modeset_unlock_all(dev);
1500 return 0;
1501}
1502
1503DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
1504 i915_fbc_fc_get, i915_fbc_fc_set,
1505 "%llu\n");
1506
1436static int i915_ips_status(struct seq_file *m, void *unused) 1507static int i915_ips_status(struct seq_file *m, void *unused)
1437{ 1508{
1438 struct drm_info_node *node = m->private; 1509 struct drm_info_node *node = m->private;
@@ -1630,6 +1701,14 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1630 return 0; 1701 return 0;
1631} 1702}
1632 1703
1704static void describe_ctx_ringbuf(struct seq_file *m,
1705 struct intel_ringbuffer *ringbuf)
1706{
1707 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1708 ringbuf->space, ringbuf->head, ringbuf->tail,
1709 ringbuf->last_retired_head);
1710}
1711
1633static int i915_context_status(struct seq_file *m, void *unused) 1712static int i915_context_status(struct seq_file *m, void *unused)
1634{ 1713{
1635 struct drm_info_node *node = m->private; 1714 struct drm_info_node *node = m->private;
@@ -1656,16 +1735,168 @@ static int i915_context_status(struct seq_file *m, void *unused)
1656 } 1735 }
1657 1736
1658 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1737 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1659 if (ctx->legacy_hw_ctx.rcs_state == NULL) 1738 if (!i915.enable_execlists &&
1739 ctx->legacy_hw_ctx.rcs_state == NULL)
1660 continue; 1740 continue;
1661 1741
1662 seq_puts(m, "HW context "); 1742 seq_puts(m, "HW context ");
1663 describe_ctx(m, ctx); 1743 describe_ctx(m, ctx);
1664 for_each_ring(ring, dev_priv, i) 1744 for_each_ring(ring, dev_priv, i) {
1745 if (ring->default_context == ctx)
1746 seq_printf(m, "(default context %s) ",
1747 ring->name);
1748 }
1749
1750 if (i915.enable_execlists) {
1751 seq_putc(m, '\n');
1752 for_each_ring(ring, dev_priv, i) {
1753 struct drm_i915_gem_object *ctx_obj =
1754 ctx->engine[i].state;
1755 struct intel_ringbuffer *ringbuf =
1756 ctx->engine[i].ringbuf;
1757
1758 seq_printf(m, "%s: ", ring->name);
1759 if (ctx_obj)
1760 describe_obj(m, ctx_obj);
1761 if (ringbuf)
1762 describe_ctx_ringbuf(m, ringbuf);
1763 seq_putc(m, '\n');
1764 }
1765 } else {
1766 describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1767 }
1768
1769 seq_putc(m, '\n');
1770 }
1771
1772 mutex_unlock(&dev->struct_mutex);
1773
1774 return 0;
1775}
1776
1777static int i915_dump_lrc(struct seq_file *m, void *unused)
1778{
1779 struct drm_info_node *node = (struct drm_info_node *) m->private;
1780 struct drm_device *dev = node->minor->dev;
1781 struct drm_i915_private *dev_priv = dev->dev_private;
1782 struct intel_engine_cs *ring;
1783 struct intel_context *ctx;
1784 int ret, i;
1785
1786 if (!i915.enable_execlists) {
1787 seq_printf(m, "Logical Ring Contexts are disabled\n");
1788 return 0;
1789 }
1790
1791 ret = mutex_lock_interruptible(&dev->struct_mutex);
1792 if (ret)
1793 return ret;
1794
1795 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1796 for_each_ring(ring, dev_priv, i) {
1797 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
1798
1665 if (ring->default_context == ctx) 1799 if (ring->default_context == ctx)
1666 seq_printf(m, "(default context %s) ", ring->name); 1800 continue;
1801
1802 if (ctx_obj) {
1803 struct page *page = i915_gem_object_get_page(ctx_obj, 1);
1804 uint32_t *reg_state = kmap_atomic(page);
1805 int j;
1806
1807 seq_printf(m, "CONTEXT: %s %u\n", ring->name,
1808 intel_execlists_ctx_id(ctx_obj));
1809
1810 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
1811 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1812 i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
1813 reg_state[j], reg_state[j + 1],
1814 reg_state[j + 2], reg_state[j + 3]);
1815 }
1816 kunmap_atomic(reg_state);
1817
1818 seq_putc(m, '\n');
1819 }
1820 }
1821 }
1822
1823 mutex_unlock(&dev->struct_mutex);
1824
1825 return 0;
1826}
1827
1828static int i915_execlists(struct seq_file *m, void *data)
1829{
1830 struct drm_info_node *node = (struct drm_info_node *)m->private;
1831 struct drm_device *dev = node->minor->dev;
1832 struct drm_i915_private *dev_priv = dev->dev_private;
1833 struct intel_engine_cs *ring;
1834 u32 status_pointer;
1835 u8 read_pointer;
1836 u8 write_pointer;
1837 u32 status;
1838 u32 ctx_id;
1839 struct list_head *cursor;
1840 int ring_id, i;
1841 int ret;
1842
1843 if (!i915.enable_execlists) {
1844 seq_puts(m, "Logical Ring Contexts are disabled\n");
1845 return 0;
1846 }
1847
1848 ret = mutex_lock_interruptible(&dev->struct_mutex);
1849 if (ret)
1850 return ret;
1851
1852 for_each_ring(ring, dev_priv, ring_id) {
1853 struct intel_ctx_submit_request *head_req = NULL;
1854 int count = 0;
1855 unsigned long flags;
1856
1857 seq_printf(m, "%s\n", ring->name);
1858
1859 status = I915_READ(RING_EXECLIST_STATUS(ring));
1860 ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4);
1861 seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
1862 status, ctx_id);
1863
1864 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
1865 seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
1866
1867 read_pointer = ring->next_context_status_buffer;
1868 write_pointer = status_pointer & 0x07;
1869 if (read_pointer > write_pointer)
1870 write_pointer += 6;
1871 seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
1872 read_pointer, write_pointer);
1873
1874 for (i = 0; i < 6; i++) {
1875 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i);
1876 ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4);
1877
1878 seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
1879 i, status, ctx_id);
1880 }
1881
1882 spin_lock_irqsave(&ring->execlist_lock, flags);
1883 list_for_each(cursor, &ring->execlist_queue)
1884 count++;
1885 head_req = list_first_entry_or_null(&ring->execlist_queue,
1886 struct intel_ctx_submit_request, execlist_link);
1887 spin_unlock_irqrestore(&ring->execlist_lock, flags);
1888
1889 seq_printf(m, "\t%d requests in queue\n", count);
1890 if (head_req) {
1891 struct drm_i915_gem_object *ctx_obj;
1892
1893 ctx_obj = head_req->ctx->engine[ring_id].state;
1894 seq_printf(m, "\tHead request id: %u\n",
1895 intel_execlists_ctx_id(ctx_obj));
1896 seq_printf(m, "\tHead request tail: %u\n",
1897 head_req->tail);
1898 }
1667 1899
1668 describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1669 seq_putc(m, '\n'); 1900 seq_putc(m, '\n');
1670 } 1901 }
1671 1902
@@ -1774,7 +2005,13 @@ static int per_file_ctx(int id, void *ptr, void *data)
1774{ 2005{
1775 struct intel_context *ctx = ptr; 2006 struct intel_context *ctx = ptr;
1776 struct seq_file *m = data; 2007 struct seq_file *m = data;
1777 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx); 2008 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2009
2010 if (!ppgtt) {
2011 seq_printf(m, " no ppgtt for context %d\n",
2012 ctx->user_handle);
2013 return 0;
2014 }
1778 2015
1779 if (i915_gem_context_is_default(ctx)) 2016 if (i915_gem_context_is_default(ctx))
1780 seq_puts(m, " default context:\n"); 2017 seq_puts(m, " default context:\n");
@@ -1834,8 +2071,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1834 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 2071 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1835 2072
1836 ppgtt->debug_dump(ppgtt, m); 2073 ppgtt->debug_dump(ppgtt, m);
1837 } else 2074 }
1838 return;
1839 2075
1840 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2076 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
1841 struct drm_i915_file_private *file_priv = file->driver_priv; 2077 struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -2406,6 +2642,40 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2406 return 0; 2642 return 0;
2407} 2643}
2408 2644
2645static int i915_wa_registers(struct seq_file *m, void *unused)
2646{
2647 int i;
2648 int ret;
2649 struct drm_info_node *node = (struct drm_info_node *) m->private;
2650 struct drm_device *dev = node->minor->dev;
2651 struct drm_i915_private *dev_priv = dev->dev_private;
2652
2653 ret = mutex_lock_interruptible(&dev->struct_mutex);
2654 if (ret)
2655 return ret;
2656
2657 intel_runtime_pm_get(dev_priv);
2658
2659 seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs);
2660 for (i = 0; i < dev_priv->num_wa_regs; ++i) {
2661 u32 addr, mask;
2662
2663 addr = dev_priv->intel_wa_regs[i].addr;
2664 mask = dev_priv->intel_wa_regs[i].mask;
2665 dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask;
2666 if (dev_priv->intel_wa_regs[i].addr)
2667 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2668 dev_priv->intel_wa_regs[i].addr,
2669 dev_priv->intel_wa_regs[i].value,
2670 dev_priv->intel_wa_regs[i].mask);
2671 }
2672
2673 intel_runtime_pm_put(dev_priv);
2674 mutex_unlock(&dev->struct_mutex);
2675
2676 return 0;
2677}
2678
2409struct pipe_crc_info { 2679struct pipe_crc_info {
2410 const char *name; 2680 const char *name;
2411 struct drm_device *dev; 2681 struct drm_device *dev;
@@ -2667,8 +2937,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
2667 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2937 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2668 2938
2669 drm_modeset_lock_all(dev); 2939 drm_modeset_lock_all(dev);
2670 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 2940 for_each_intel_encoder(dev, encoder) {
2671 base.head) {
2672 if (!encoder->base.crtc) 2941 if (!encoder->base.crtc)
2673 continue; 2942 continue;
2674 2943
@@ -3557,9 +3826,6 @@ i915_drop_caches_set(void *data, u64 val)
3557{ 3826{
3558 struct drm_device *dev = data; 3827 struct drm_device *dev = data;
3559 struct drm_i915_private *dev_priv = dev->dev_private; 3828 struct drm_i915_private *dev_priv = dev->dev_private;
3560 struct drm_i915_gem_object *obj, *next;
3561 struct i915_address_space *vm;
3562 struct i915_vma *vma, *x;
3563 int ret; 3829 int ret;
3564 3830
3565 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 3831 DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -3579,29 +3845,11 @@ i915_drop_caches_set(void *data, u64 val)
3579 if (val & (DROP_RETIRE | DROP_ACTIVE)) 3845 if (val & (DROP_RETIRE | DROP_ACTIVE))
3580 i915_gem_retire_requests(dev); 3846 i915_gem_retire_requests(dev);
3581 3847
3582 if (val & DROP_BOUND) { 3848 if (val & DROP_BOUND)
3583 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 3849 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
3584 list_for_each_entry_safe(vma, x, &vm->inactive_list,
3585 mm_list) {
3586 if (vma->pin_count)
3587 continue;
3588 3850
3589 ret = i915_vma_unbind(vma); 3851 if (val & DROP_UNBOUND)
3590 if (ret) 3852 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
3591 goto unlock;
3592 }
3593 }
3594 }
3595
3596 if (val & DROP_UNBOUND) {
3597 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
3598 global_list)
3599 if (obj->pages_pin_count == 0) {
3600 ret = i915_gem_object_put_pages(obj);
3601 if (ret)
3602 goto unlock;
3603 }
3604 }
3605 3853
3606unlock: 3854unlock:
3607 mutex_unlock(&dev->struct_mutex); 3855 mutex_unlock(&dev->struct_mutex);
@@ -3923,6 +4171,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
3923 {"i915_opregion", i915_opregion, 0}, 4171 {"i915_opregion", i915_opregion, 0},
3924 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 4172 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
3925 {"i915_context_status", i915_context_status, 0}, 4173 {"i915_context_status", i915_context_status, 0},
4174 {"i915_dump_lrc", i915_dump_lrc, 0},
4175 {"i915_execlists", i915_execlists, 0},
3926 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 4176 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
3927 {"i915_swizzle_info", i915_swizzle_info, 0}, 4177 {"i915_swizzle_info", i915_swizzle_info, 0},
3928 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 4178 {"i915_ppgtt_info", i915_ppgtt_info, 0},
@@ -3936,6 +4186,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
3936 {"i915_semaphore_status", i915_semaphore_status, 0}, 4186 {"i915_semaphore_status", i915_semaphore_status, 0},
3937 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 4187 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
3938 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 4188 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4189 {"i915_wa_registers", i915_wa_registers, 0},
3939}; 4190};
3940#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 4191#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
3941 4192
@@ -3957,6 +4208,7 @@ static const struct i915_debugfs_files {
3957 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 4208 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
3958 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 4209 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
3959 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 4210 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4211 {"i915_fbc_false_color", &i915_fbc_fc_fops},
3960}; 4212};
3961 4213
3962void intel_display_crc_init(struct drm_device *dev) 4214void intel_display_crc_init(struct drm_device *dev)
@@ -3964,7 +4216,7 @@ void intel_display_crc_init(struct drm_device *dev)
3964 struct drm_i915_private *dev_priv = dev->dev_private; 4216 struct drm_i915_private *dev_priv = dev->dev_private;
3965 enum pipe pipe; 4217 enum pipe pipe;
3966 4218
3967 for_each_pipe(pipe) { 4219 for_each_pipe(dev_priv, pipe) {
3968 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 4220 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3969 4221
3970 pipe_crc->opened = false; 4222 pipe_crc->opened = false;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9933c26017ed..1403b01e8216 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -28,9 +28,11 @@
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 30
31#include <linux/async.h>
31#include <drm/drmP.h> 32#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h> 33#include <drm/drm_crtc_helper.h>
33#include <drm/drm_fb_helper.h> 34#include <drm/drm_fb_helper.h>
35#include <drm/drm_legacy.h>
34#include "intel_drv.h" 36#include "intel_drv.h"
35#include <drm/i915_drm.h> 37#include <drm/i915_drm.h>
36#include "i915_drv.h" 38#include "i915_drv.h"
@@ -196,7 +198,7 @@ static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
196 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 198 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
197 int ret; 199 int ret;
198 200
199 master_priv->sarea = drm_getsarea(dev); 201 master_priv->sarea = drm_legacy_getsarea(dev);
200 if (master_priv->sarea) { 202 if (master_priv->sarea) {
201 master_priv->sarea_priv = (drm_i915_sarea_t *) 203 master_priv->sarea_priv = (drm_i915_sarea_t *)
202 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 204 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
@@ -999,7 +1001,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
999 value = HAS_WT(dev); 1001 value = HAS_WT(dev);
1000 break; 1002 break;
1001 case I915_PARAM_HAS_ALIASING_PPGTT: 1003 case I915_PARAM_HAS_ALIASING_PPGTT:
1002 value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev); 1004 value = USES_PPGTT(dev);
1003 break; 1005 break;
1004 case I915_PARAM_HAS_WAIT_TIMEOUT: 1006 case I915_PARAM_HAS_WAIT_TIMEOUT:
1005 value = 1; 1007 value = 1;
@@ -1355,8 +1357,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
1355 if (ret) 1357 if (ret)
1356 goto cleanup_irq; 1358 goto cleanup_irq;
1357 1359
1358 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1359
1360 intel_modeset_gem_init(dev); 1360 intel_modeset_gem_init(dev);
1361 1361
1362 /* Always safe in the mode setting case. */ 1362 /* Always safe in the mode setting case. */
@@ -1382,7 +1382,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1382 * scanning against hotplug events. Hence do this first and ignore the 1382 * scanning against hotplug events. Hence do this first and ignore the
1383 * tiny window where we will loose hotplug notifactions. 1383 * tiny window where we will loose hotplug notifactions.
1384 */ 1384 */
1385 intel_fbdev_initial_config(dev); 1385 async_schedule(intel_fbdev_initial_config, dev_priv);
1386 1386
1387 drm_kms_helper_poll_init(dev); 1387 drm_kms_helper_poll_init(dev);
1388 1388
@@ -1393,7 +1393,6 @@ cleanup_gem:
1393 i915_gem_cleanup_ringbuffer(dev); 1393 i915_gem_cleanup_ringbuffer(dev);
1394 i915_gem_context_fini(dev); 1394 i915_gem_context_fini(dev);
1395 mutex_unlock(&dev->struct_mutex); 1395 mutex_unlock(&dev->struct_mutex);
1396 WARN_ON(dev_priv->mm.aliasing_ppgtt);
1397cleanup_irq: 1396cleanup_irq:
1398 drm_irq_uninstall(dev); 1397 drm_irq_uninstall(dev);
1399cleanup_gem_stolen: 1398cleanup_gem_stolen:
@@ -1536,10 +1535,10 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
1536 info = (struct intel_device_info *)&dev_priv->info; 1535 info = (struct intel_device_info *)&dev_priv->info;
1537 1536
1538 if (IS_VALLEYVIEW(dev)) 1537 if (IS_VALLEYVIEW(dev))
1539 for_each_pipe(pipe) 1538 for_each_pipe(dev_priv, pipe)
1540 info->num_sprites[pipe] = 2; 1539 info->num_sprites[pipe] = 2;
1541 else 1540 else
1542 for_each_pipe(pipe) 1541 for_each_pipe(dev_priv, pipe)
1543 info->num_sprites[pipe] = 1; 1542 info->num_sprites[pipe] = 1;
1544 1543
1545 if (i915.disable_display) { 1544 if (i915.disable_display) {
@@ -1608,9 +1607,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1608 dev->dev_private = dev_priv; 1607 dev->dev_private = dev_priv;
1609 dev_priv->dev = dev; 1608 dev_priv->dev = dev;
1610 1609
1611 /* copy initial configuration to dev_priv->info */ 1610 /* Setup the write-once "constant" device info */
1612 device_info = (struct intel_device_info *)&dev_priv->info; 1611 device_info = (struct intel_device_info *)&dev_priv->info;
1613 *device_info = *info; 1612 memcpy(device_info, info, sizeof(dev_priv->info));
1613 device_info->device_id = dev->pdev->device;
1614 1614
1615 spin_lock_init(&dev_priv->irq_lock); 1615 spin_lock_init(&dev_priv->irq_lock);
1616 spin_lock_init(&dev_priv->gpu_error.lock); 1616 spin_lock_init(&dev_priv->gpu_error.lock);
@@ -1822,7 +1822,7 @@ out_mtrrfree:
1822 arch_phys_wc_del(dev_priv->gtt.mtrr); 1822 arch_phys_wc_del(dev_priv->gtt.mtrr);
1823 io_mapping_free(dev_priv->gtt.mappable); 1823 io_mapping_free(dev_priv->gtt.mappable);
1824out_gtt: 1824out_gtt:
1825 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); 1825 i915_global_gtt_cleanup(dev);
1826out_regs: 1826out_regs:
1827 intel_uncore_fini(dev); 1827 intel_uncore_fini(dev);
1828 pci_iounmap(dev->pdev, dev_priv->regs); 1828 pci_iounmap(dev->pdev, dev_priv->regs);
@@ -1869,7 +1869,6 @@ int i915_driver_unload(struct drm_device *dev)
1869 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1869 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1870 intel_fbdev_fini(dev); 1870 intel_fbdev_fini(dev);
1871 intel_modeset_cleanup(dev); 1871 intel_modeset_cleanup(dev);
1872 cancel_work_sync(&dev_priv->console_resume_work);
1873 1872
1874 /* 1873 /*
1875 * free the memory space allocated for the child device 1874 * free the memory space allocated for the child device
@@ -1902,7 +1901,6 @@ int i915_driver_unload(struct drm_device *dev)
1902 mutex_lock(&dev->struct_mutex); 1901 mutex_lock(&dev->struct_mutex);
1903 i915_gem_cleanup_ringbuffer(dev); 1902 i915_gem_cleanup_ringbuffer(dev);
1904 i915_gem_context_fini(dev); 1903 i915_gem_context_fini(dev);
1905 WARN_ON(dev_priv->mm.aliasing_ppgtt);
1906 mutex_unlock(&dev->struct_mutex); 1904 mutex_unlock(&dev->struct_mutex);
1907 i915_gem_cleanup_stolen(dev); 1905 i915_gem_cleanup_stolen(dev);
1908 1906
@@ -1910,8 +1908,6 @@ int i915_driver_unload(struct drm_device *dev)
1910 i915_free_hws(dev); 1908 i915_free_hws(dev);
1911 } 1909 }
1912 1910
1913 WARN_ON(!list_empty(&dev_priv->vm_list));
1914
1915 drm_vblank_cleanup(dev); 1911 drm_vblank_cleanup(dev);
1916 1912
1917 intel_teardown_gmbus(dev); 1913 intel_teardown_gmbus(dev);
@@ -1921,7 +1917,7 @@ int i915_driver_unload(struct drm_device *dev)
1921 destroy_workqueue(dev_priv->wq); 1917 destroy_workqueue(dev_priv->wq);
1922 pm_qos_remove_request(&dev_priv->pm_qos); 1918 pm_qos_remove_request(&dev_priv->pm_qos);
1923 1919
1924 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); 1920 i915_global_gtt_cleanup(dev);
1925 1921
1926 intel_uncore_fini(dev); 1922 intel_uncore_fini(dev);
1927 if (dev_priv->regs != NULL) 1923 if (dev_priv->regs != NULL)
@@ -1986,6 +1982,9 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1986 i915_gem_context_close(dev, file); 1982 i915_gem_context_close(dev, file);
1987 i915_gem_release(dev, file); 1983 i915_gem_release(dev, file);
1988 mutex_unlock(&dev->struct_mutex); 1984 mutex_unlock(&dev->struct_mutex);
1985
1986 if (drm_core_check_feature(dev, DRIVER_MODESET))
1987 intel_modeset_preclose(dev, file);
1989} 1988}
1990 1989
1991void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1990void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e27cdbe9d524..055d5e7fbf12 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -481,6 +481,10 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
481 if (i915.semaphores >= 0) 481 if (i915.semaphores >= 0)
482 return i915.semaphores; 482 return i915.semaphores;
483 483
484 /* TODO: make semaphores and Execlists play nicely together */
485 if (i915.enable_execlists)
486 return false;
487
484 /* Until we get further testing... */ 488 /* Until we get further testing... */
485 if (IS_GEN8(dev)) 489 if (IS_GEN8(dev))
486 return false; 490 return false;
@@ -524,6 +528,10 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
524 drm_modeset_unlock_all(dev); 528 drm_modeset_unlock_all(dev);
525} 529}
526 530
531static int intel_suspend_complete(struct drm_i915_private *dev_priv);
532static int intel_resume_prepare(struct drm_i915_private *dev_priv,
533 bool rpm_resume);
534
527static int i915_drm_freeze(struct drm_device *dev) 535static int i915_drm_freeze(struct drm_device *dev)
528{ 536{
529 struct drm_i915_private *dev_priv = dev->dev_private; 537 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -591,9 +599,7 @@ static int i915_drm_freeze(struct drm_device *dev)
591 intel_uncore_forcewake_reset(dev, false); 599 intel_uncore_forcewake_reset(dev, false);
592 intel_opregion_fini(dev); 600 intel_opregion_fini(dev);
593 601
594 console_lock(); 602 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
595 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
596 console_unlock();
597 603
598 dev_priv->suspend_count++; 604 dev_priv->suspend_count++;
599 605
@@ -632,30 +638,20 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
632 return 0; 638 return 0;
633} 639}
634 640
635void intel_console_resume(struct work_struct *work)
636{
637 struct drm_i915_private *dev_priv =
638 container_of(work, struct drm_i915_private,
639 console_resume_work);
640 struct drm_device *dev = dev_priv->dev;
641
642 console_lock();
643 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
644 console_unlock();
645}
646
647static int i915_drm_thaw_early(struct drm_device *dev) 641static int i915_drm_thaw_early(struct drm_device *dev)
648{ 642{
649 struct drm_i915_private *dev_priv = dev->dev_private; 643 struct drm_i915_private *dev_priv = dev->dev_private;
644 int ret;
650 645
651 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 646 ret = intel_resume_prepare(dev_priv, false);
652 hsw_disable_pc8(dev_priv); 647 if (ret)
648 DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
653 649
654 intel_uncore_early_sanitize(dev, true); 650 intel_uncore_early_sanitize(dev, true);
655 intel_uncore_sanitize(dev); 651 intel_uncore_sanitize(dev);
656 intel_power_domains_init_hw(dev_priv); 652 intel_power_domains_init_hw(dev_priv);
657 653
658 return 0; 654 return ret;
659} 655}
660 656
661static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) 657static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
@@ -714,17 +710,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
714 710
715 intel_opregion_init(dev); 711 intel_opregion_init(dev);
716 712
717 /* 713 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
718 * The console lock can be pretty contented on resume due
719 * to all the printk activity. Try to keep it out of the hot
720 * path of resume if possible.
721 */
722 if (console_trylock()) {
723 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
724 console_unlock();
725 } else {
726 schedule_work(&dev_priv->console_resume_work);
727 }
728 714
729 mutex_lock(&dev_priv->modeset_restore_lock); 715 mutex_lock(&dev_priv->modeset_restore_lock);
730 dev_priv->modeset_restore = MODESET_DONE; 716 dev_priv->modeset_restore = MODESET_DONE;
@@ -858,7 +844,13 @@ int i915_reset(struct drm_device *dev)
858 !dev_priv->ums.mm_suspended) { 844 !dev_priv->ums.mm_suspended) {
859 dev_priv->ums.mm_suspended = 0; 845 dev_priv->ums.mm_suspended = 0;
860 846
847 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
848 dev_priv->gpu_error.reload_in_reset = true;
849
861 ret = i915_gem_init_hw(dev); 850 ret = i915_gem_init_hw(dev);
851
852 dev_priv->gpu_error.reload_in_reset = false;
853
862 mutex_unlock(&dev->struct_mutex); 854 mutex_unlock(&dev->struct_mutex);
863 if (ret) { 855 if (ret) {
864 DRM_ERROR("Failed hw init on reset %d\n", ret); 856 DRM_ERROR("Failed hw init on reset %d\n", ret);
@@ -879,8 +871,6 @@ int i915_reset(struct drm_device *dev)
879 */ 871 */
880 if (INTEL_INFO(dev)->gen > 5) 872 if (INTEL_INFO(dev)->gen > 5)
881 intel_reset_gt_powersave(dev); 873 intel_reset_gt_powersave(dev);
882
883 intel_hpd_init(dev);
884 } else { 874 } else {
885 mutex_unlock(&dev->struct_mutex); 875 mutex_unlock(&dev->struct_mutex);
886 } 876 }
@@ -941,6 +931,7 @@ static int i915_pm_suspend_late(struct device *dev)
941 struct pci_dev *pdev = to_pci_dev(dev); 931 struct pci_dev *pdev = to_pci_dev(dev);
942 struct drm_device *drm_dev = pci_get_drvdata(pdev); 932 struct drm_device *drm_dev = pci_get_drvdata(pdev);
943 struct drm_i915_private *dev_priv = drm_dev->dev_private; 933 struct drm_i915_private *dev_priv = drm_dev->dev_private;
934 int ret;
944 935
945 /* 936 /*
946 * We have a suspedn ordering issue with the snd-hda driver also 937 * We have a suspedn ordering issue with the snd-hda driver also
@@ -954,13 +945,16 @@ static int i915_pm_suspend_late(struct device *dev)
954 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 945 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
955 return 0; 946 return 0;
956 947
957 if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev)) 948 ret = intel_suspend_complete(dev_priv);
958 hsw_enable_pc8(dev_priv);
959 949
960 pci_disable_device(pdev); 950 if (ret)
961 pci_set_power_state(pdev, PCI_D3hot); 951 DRM_ERROR("Suspend complete failed: %d\n", ret);
952 else {
953 pci_disable_device(pdev);
954 pci_set_power_state(pdev, PCI_D3hot);
955 }
962 956
963 return 0; 957 return ret;
964} 958}
965 959
966static int i915_pm_resume_early(struct device *dev) 960static int i915_pm_resume_early(struct device *dev)
@@ -1016,23 +1010,26 @@ static int i915_pm_poweroff(struct device *dev)
1016 return i915_drm_freeze(drm_dev); 1010 return i915_drm_freeze(drm_dev);
1017} 1011}
1018 1012
1019static int hsw_runtime_suspend(struct drm_i915_private *dev_priv) 1013static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
1020{ 1014{
1021 hsw_enable_pc8(dev_priv); 1015 hsw_enable_pc8(dev_priv);
1022 1016
1023 return 0; 1017 return 0;
1024} 1018}
1025 1019
1026static int snb_runtime_resume(struct drm_i915_private *dev_priv) 1020static int snb_resume_prepare(struct drm_i915_private *dev_priv,
1021 bool rpm_resume)
1027{ 1022{
1028 struct drm_device *dev = dev_priv->dev; 1023 struct drm_device *dev = dev_priv->dev;
1029 1024
1030 intel_init_pch_refclk(dev); 1025 if (rpm_resume)
1026 intel_init_pch_refclk(dev);
1031 1027
1032 return 0; 1028 return 0;
1033} 1029}
1034 1030
1035static int hsw_runtime_resume(struct drm_i915_private *dev_priv) 1031static int hsw_resume_prepare(struct drm_i915_private *dev_priv,
1032 bool rpm_resume)
1036{ 1033{
1037 hsw_disable_pc8(dev_priv); 1034 hsw_disable_pc8(dev_priv);
1038 1035
@@ -1328,7 +1325,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1328 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); 1325 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1329} 1326}
1330 1327
1331static int vlv_runtime_suspend(struct drm_i915_private *dev_priv) 1328static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
1332{ 1329{
1333 u32 mask; 1330 u32 mask;
1334 int err; 1331 int err;
@@ -1368,7 +1365,8 @@ err1:
1368 return err; 1365 return err;
1369} 1366}
1370 1367
1371static int vlv_runtime_resume(struct drm_i915_private *dev_priv) 1368static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1369 bool rpm_resume)
1372{ 1370{
1373 struct drm_device *dev = dev_priv->dev; 1371 struct drm_device *dev = dev_priv->dev;
1374 int err; 1372 int err;
@@ -1393,8 +1391,10 @@ static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
1393 1391
1394 vlv_check_no_gt_access(dev_priv); 1392 vlv_check_no_gt_access(dev_priv);
1395 1393
1396 intel_init_clock_gating(dev); 1394 if (rpm_resume) {
1397 i915_gem_restore_fences(dev); 1395 intel_init_clock_gating(dev);
1396 i915_gem_restore_fences(dev);
1397 }
1398 1398
1399 return ret; 1399 return ret;
1400} 1400}
@@ -1409,7 +1409,9 @@ static int intel_runtime_suspend(struct device *device)
1409 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) 1409 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1410 return -ENODEV; 1410 return -ENODEV;
1411 1411
1412 WARN_ON(!HAS_RUNTIME_PM(dev)); 1412 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1413 return -ENODEV;
1414
1413 assert_force_wake_inactive(dev_priv); 1415 assert_force_wake_inactive(dev_priv);
1414 1416
1415 DRM_DEBUG_KMS("Suspending device\n"); 1417 DRM_DEBUG_KMS("Suspending device\n");
@@ -1446,17 +1448,7 @@ static int intel_runtime_suspend(struct device *device)
1446 cancel_work_sync(&dev_priv->rps.work); 1448 cancel_work_sync(&dev_priv->rps.work);
1447 intel_runtime_pm_disable_interrupts(dev); 1449 intel_runtime_pm_disable_interrupts(dev);
1448 1450
1449 if (IS_GEN6(dev)) { 1451 ret = intel_suspend_complete(dev_priv);
1450 ret = 0;
1451 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1452 ret = hsw_runtime_suspend(dev_priv);
1453 } else if (IS_VALLEYVIEW(dev)) {
1454 ret = vlv_runtime_suspend(dev_priv);
1455 } else {
1456 ret = -ENODEV;
1457 WARN_ON(1);
1458 }
1459
1460 if (ret) { 1452 if (ret) {
1461 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); 1453 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1462 intel_runtime_pm_restore_interrupts(dev); 1454 intel_runtime_pm_restore_interrupts(dev);
@@ -1468,13 +1460,29 @@ static int intel_runtime_suspend(struct device *device)
1468 dev_priv->pm.suspended = true; 1460 dev_priv->pm.suspended = true;
1469 1461
1470 /* 1462 /*
1471 * current versions of firmware which depend on this opregion 1463 * FIXME: We really should find a document that references the arguments
1472 * notification have repurposed the D1 definition to mean 1464 * used below!
1473 * "runtime suspended" vs. what you would normally expect (D3)
1474 * to distinguish it from notifications that might be sent
1475 * via the suspend path.
1476 */ 1465 */
1477 intel_opregion_notify_adapter(dev, PCI_D1); 1466 if (IS_HASWELL(dev)) {
1467 /*
1468 * current versions of firmware which depend on this opregion
1469 * notification have repurposed the D1 definition to mean
1470 * "runtime suspended" vs. what you would normally expect (D3)
1471 * to distinguish it from notifications that might be sent via
1472 * the suspend path.
1473 */
1474 intel_opregion_notify_adapter(dev, PCI_D1);
1475 } else {
1476 /*
1477 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1478 * being detected, and the call we do at intel_runtime_resume()
1479 * won't be able to restore them. Since PCI_D3hot matches the
1480 * actual specification and appears to be working, use it. Let's
1481 * assume the other non-Haswell platforms will stay the same as
1482 * Broadwell.
1483 */
1484 intel_opregion_notify_adapter(dev, PCI_D3hot);
1485 }
1478 1486
1479 DRM_DEBUG_KMS("Device suspended\n"); 1487 DRM_DEBUG_KMS("Device suspended\n");
1480 return 0; 1488 return 0;
@@ -1487,24 +1495,15 @@ static int intel_runtime_resume(struct device *device)
1487 struct drm_i915_private *dev_priv = dev->dev_private; 1495 struct drm_i915_private *dev_priv = dev->dev_private;
1488 int ret; 1496 int ret;
1489 1497
1490 WARN_ON(!HAS_RUNTIME_PM(dev)); 1498 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1499 return -ENODEV;
1491 1500
1492 DRM_DEBUG_KMS("Resuming device\n"); 1501 DRM_DEBUG_KMS("Resuming device\n");
1493 1502
1494 intel_opregion_notify_adapter(dev, PCI_D0); 1503 intel_opregion_notify_adapter(dev, PCI_D0);
1495 dev_priv->pm.suspended = false; 1504 dev_priv->pm.suspended = false;
1496 1505
1497 if (IS_GEN6(dev)) { 1506 ret = intel_resume_prepare(dev_priv, true);
1498 ret = snb_runtime_resume(dev_priv);
1499 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1500 ret = hsw_runtime_resume(dev_priv);
1501 } else if (IS_VALLEYVIEW(dev)) {
1502 ret = vlv_runtime_resume(dev_priv);
1503 } else {
1504 WARN_ON(1);
1505 ret = -ENODEV;
1506 }
1507
1508 /* 1507 /*
1509 * No point of rolling back things in case of an error, as the best 1508 * No point of rolling back things in case of an error, as the best
1510 * we can do is to hope that things will still work (and disable RPM). 1509 * we can do is to hope that things will still work (and disable RPM).
@@ -1523,6 +1522,48 @@ static int intel_runtime_resume(struct device *device)
1523 return ret; 1522 return ret;
1524} 1523}
1525 1524
1525/*
1526 * This function implements common functionality of runtime and system
1527 * suspend sequence.
1528 */
1529static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1530{
1531 struct drm_device *dev = dev_priv->dev;
1532 int ret;
1533
1534 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1535 ret = hsw_suspend_complete(dev_priv);
1536 else if (IS_VALLEYVIEW(dev))
1537 ret = vlv_suspend_complete(dev_priv);
1538 else
1539 ret = 0;
1540
1541 return ret;
1542}
1543
1544/*
1545 * This function implements common functionality of runtime and system
1546 * resume sequence. Variable rpm_resume used for implementing different
1547 * code paths.
1548 */
1549static int intel_resume_prepare(struct drm_i915_private *dev_priv,
1550 bool rpm_resume)
1551{
1552 struct drm_device *dev = dev_priv->dev;
1553 int ret;
1554
1555 if (IS_GEN6(dev))
1556 ret = snb_resume_prepare(dev_priv, rpm_resume);
1557 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1558 ret = hsw_resume_prepare(dev_priv, rpm_resume);
1559 else if (IS_VALLEYVIEW(dev))
1560 ret = vlv_resume_prepare(dev_priv, rpm_resume);
1561 else
1562 ret = 0;
1563
1564 return ret;
1565}
1566
1526static const struct dev_pm_ops i915_pm_ops = { 1567static const struct dev_pm_ops i915_pm_ops = {
1527 .suspend = i915_pm_suspend, 1568 .suspend = i915_pm_suspend,
1528 .suspend_late = i915_pm_suspend_late, 1569 .suspend_late = i915_pm_suspend_late,
@@ -1572,6 +1613,7 @@ static struct drm_driver driver = {
1572 .lastclose = i915_driver_lastclose, 1613 .lastclose = i915_driver_lastclose,
1573 .preclose = i915_driver_preclose, 1614 .preclose = i915_driver_preclose,
1574 .postclose = i915_driver_postclose, 1615 .postclose = i915_driver_postclose,
1616 .set_busid = drm_pci_set_busid,
1575 1617
1576 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ 1618 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1577 .suspend = i915_suspend, 1619 .suspend = i915_suspend,
@@ -1663,6 +1705,8 @@ static void __exit i915_exit(void)
1663module_init(i915_init); 1705module_init(i915_init);
1664module_exit(i915_exit); 1706module_exit(i915_exit);
1665 1707
1666MODULE_AUTHOR(DRIVER_AUTHOR); 1708MODULE_AUTHOR("Tungsten Graphics, Inc.");
1709MODULE_AUTHOR("Intel Corporation");
1710
1667MODULE_DESCRIPTION(DRIVER_DESC); 1711MODULE_DESCRIPTION(DRIVER_DESC);
1668MODULE_LICENSE("GPL and additional rights"); 1712MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3524306d8cfb..16a6f6d187a1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -35,11 +35,15 @@
35#include "i915_reg.h" 35#include "i915_reg.h"
36#include "intel_bios.h" 36#include "intel_bios.h"
37#include "intel_ringbuffer.h" 37#include "intel_ringbuffer.h"
38#include "intel_lrc.h"
38#include "i915_gem_gtt.h" 39#include "i915_gem_gtt.h"
40#include "i915_gem_render_state.h"
39#include <linux/io-mapping.h> 41#include <linux/io-mapping.h>
40#include <linux/i2c.h> 42#include <linux/i2c.h>
41#include <linux/i2c-algo-bit.h> 43#include <linux/i2c-algo-bit.h>
42#include <drm/intel-gtt.h> 44#include <drm/intel-gtt.h>
45#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
46#include <drm/drm_gem.h>
43#include <linux/backlight.h> 47#include <linux/backlight.h>
44#include <linux/hashtable.h> 48#include <linux/hashtable.h>
45#include <linux/intel-iommu.h> 49#include <linux/intel-iommu.h>
@@ -49,11 +53,9 @@
49/* General customization: 53/* General customization:
50 */ 54 */
51 55
52#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
53
54#define DRIVER_NAME "i915" 56#define DRIVER_NAME "i915"
55#define DRIVER_DESC "Intel Graphics" 57#define DRIVER_DESC "Intel Graphics"
56#define DRIVER_DATE "20140725" 58#define DRIVER_DATE "20140905"
57 59
58enum pipe { 60enum pipe {
59 INVALID_PIPE = -1, 61 INVALID_PIPE = -1,
@@ -162,7 +164,10 @@ enum hpd_pin {
162 I915_GEM_DOMAIN_INSTRUCTION | \ 164 I915_GEM_DOMAIN_INSTRUCTION | \
163 I915_GEM_DOMAIN_VERTEX) 165 I915_GEM_DOMAIN_VERTEX)
164 166
165#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) 167#define for_each_pipe(__dev_priv, __p) \
168 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
169#define for_each_plane(pipe, p) \
170 for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++)
166#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) 171#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
167 172
168#define for_each_crtc(dev, crtc) \ 173#define for_each_crtc(dev, crtc) \
@@ -171,6 +176,11 @@ enum hpd_pin {
171#define for_each_intel_crtc(dev, intel_crtc) \ 176#define for_each_intel_crtc(dev, intel_crtc) \
172 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 177 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
173 178
179#define for_each_intel_encoder(dev, intel_encoder) \
180 list_for_each_entry(intel_encoder, \
181 &(dev)->mode_config.encoder_list, \
182 base.head)
183
174#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 184#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
175 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 185 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
176 if ((intel_encoder)->base.crtc == (__crtc)) 186 if ((intel_encoder)->base.crtc == (__crtc))
@@ -198,10 +208,13 @@ enum intel_dpll_id {
198#define I915_NUM_PLLS 2 208#define I915_NUM_PLLS 2
199 209
200struct intel_dpll_hw_state { 210struct intel_dpll_hw_state {
211 /* i9xx, pch plls */
201 uint32_t dpll; 212 uint32_t dpll;
202 uint32_t dpll_md; 213 uint32_t dpll_md;
203 uint32_t fp0; 214 uint32_t fp0;
204 uint32_t fp1; 215 uint32_t fp1;
216
217 /* hsw, bdw */
205 uint32_t wrpll; 218 uint32_t wrpll;
206}; 219};
207 220
@@ -277,8 +290,10 @@ struct intel_opregion {
277struct intel_overlay; 290struct intel_overlay;
278struct intel_overlay_error_state; 291struct intel_overlay_error_state;
279 292
293struct drm_local_map;
294
280struct drm_i915_master_private { 295struct drm_i915_master_private {
281 drm_local_map_t *sarea; 296 struct drm_local_map *sarea;
282 struct _drm_i915_sarea *sarea_priv; 297 struct _drm_i915_sarea *sarea_priv;
283}; 298};
284#define I915_FENCE_REG_NONE -1 299#define I915_FENCE_REG_NONE -1
@@ -388,6 +403,7 @@ struct drm_i915_error_state {
388 pid_t pid; 403 pid_t pid;
389 char comm[TASK_COMM_LEN]; 404 char comm[TASK_COMM_LEN];
390 } ring[I915_NUM_RINGS]; 405 } ring[I915_NUM_RINGS];
406
391 struct drm_i915_error_buffer { 407 struct drm_i915_error_buffer {
392 u32 size; 408 u32 size;
393 u32 name; 409 u32 name;
@@ -406,6 +422,7 @@ struct drm_i915_error_state {
406 } **active_bo, **pinned_bo; 422 } **active_bo, **pinned_bo;
407 423
408 u32 *active_bo_count, *pinned_bo_count; 424 u32 *active_bo_count, *pinned_bo_count;
425 u32 vm_count;
409}; 426};
410 427
411struct intel_connector; 428struct intel_connector;
@@ -551,6 +568,7 @@ struct intel_uncore {
551 568
552struct intel_device_info { 569struct intel_device_info {
553 u32 display_mmio_offset; 570 u32 display_mmio_offset;
571 u16 device_id;
554 u8 num_pipes:3; 572 u8 num_pipes:3;
555 u8 num_sprites[I915_MAX_PIPES]; 573 u8 num_sprites[I915_MAX_PIPES];
556 u8 gen; 574 u8 gen;
@@ -615,13 +633,21 @@ struct intel_context {
615 uint8_t remap_slice; 633 uint8_t remap_slice;
616 struct drm_i915_file_private *file_priv; 634 struct drm_i915_file_private *file_priv;
617 struct i915_ctx_hang_stats hang_stats; 635 struct i915_ctx_hang_stats hang_stats;
618 struct i915_address_space *vm; 636 struct i915_hw_ppgtt *ppgtt;
619 637
638 /* Legacy ring buffer submission */
620 struct { 639 struct {
621 struct drm_i915_gem_object *rcs_state; 640 struct drm_i915_gem_object *rcs_state;
622 bool initialized; 641 bool initialized;
623 } legacy_hw_ctx; 642 } legacy_hw_ctx;
624 643
644 /* Execlists */
645 bool rcs_initialized;
646 struct {
647 struct drm_i915_gem_object *state;
648 struct intel_ringbuffer *ringbuf;
649 } engine[I915_NUM_RINGS];
650
625 struct list_head link; 651 struct list_head link;
626}; 652};
627 653
@@ -635,6 +661,8 @@ struct i915_fbc {
635 struct drm_mm_node compressed_fb; 661 struct drm_mm_node compressed_fb;
636 struct drm_mm_node *compressed_llb; 662 struct drm_mm_node *compressed_llb;
637 663
664 bool false_color;
665
638 struct intel_fbc_work { 666 struct intel_fbc_work {
639 struct delayed_work work; 667 struct delayed_work work;
640 struct drm_crtc *crtc; 668 struct drm_crtc *crtc;
@@ -688,6 +716,7 @@ enum intel_sbi_destination {
688#define QUIRK_LVDS_SSC_DISABLE (1<<1) 716#define QUIRK_LVDS_SSC_DISABLE (1<<1)
689#define QUIRK_INVERT_BRIGHTNESS (1<<2) 717#define QUIRK_INVERT_BRIGHTNESS (1<<2)
690#define QUIRK_BACKLIGHT_PRESENT (1<<3) 718#define QUIRK_BACKLIGHT_PRESENT (1<<3)
719#define QUIRK_PIPEB_FORCE (1<<4)
691 720
692struct intel_fbdev; 721struct intel_fbdev;
693struct intel_fbc_work; 722struct intel_fbc_work;
@@ -1147,6 +1176,7 @@ struct i915_gem_mm {
1147}; 1176};
1148 1177
1149struct drm_i915_error_state_buf { 1178struct drm_i915_error_state_buf {
1179 struct drm_i915_private *i915;
1150 unsigned bytes; 1180 unsigned bytes;
1151 unsigned size; 1181 unsigned size;
1152 int err; 1182 int err;
@@ -1219,6 +1249,9 @@ struct i915_gpu_error {
1219 1249
1220 /* For missed irq/seqno simulation. */ 1250 /* For missed irq/seqno simulation. */
1221 unsigned int test_irq_rings; 1251 unsigned int test_irq_rings;
1252
1253 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
1254 bool reload_in_reset;
1222}; 1255};
1223 1256
1224enum modeset_restore { 1257enum modeset_restore {
@@ -1228,6 +1261,12 @@ enum modeset_restore {
1228}; 1261};
1229 1262
1230struct ddi_vbt_port_info { 1263struct ddi_vbt_port_info {
1264 /*
1265 * This is an index in the HDMI/DVI DDI buffer translation table.
1266 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
1267 * populate this field.
1268 */
1269#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
1231 uint8_t hdmi_level_shift; 1270 uint8_t hdmi_level_shift;
1232 1271
1233 uint8_t supports_dvi:1; 1272 uint8_t supports_dvi:1;
@@ -1421,7 +1460,7 @@ struct drm_i915_private {
1421 struct drm_i915_gem_object *semaphore_obj; 1460 struct drm_i915_gem_object *semaphore_obj;
1422 uint32_t last_seqno, next_seqno; 1461 uint32_t last_seqno, next_seqno;
1423 1462
1424 drm_dma_handle_t *status_page_dmah; 1463 struct drm_dma_handle *status_page_dmah;
1425 struct resource mch_res; 1464 struct resource mch_res;
1426 1465
1427 /* protects the irq masks */ 1466 /* protects the irq masks */
@@ -1475,6 +1514,9 @@ struct drm_i915_private {
1475 /* LVDS info */ 1514 /* LVDS info */
1476 bool no_aux_handshake; 1515 bool no_aux_handshake;
1477 1516
1517 /* protects panel power sequencer state */
1518 struct mutex pps_mutex;
1519
1478 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1520 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1479 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 1521 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1480 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1522 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -1526,6 +1568,20 @@ struct drm_i915_private {
1526 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1568 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1527 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1569 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1528 1570
1571 /*
1572 * workarounds are currently applied at different places and
1573 * changes are being done to consolidate them so exact count is
1574 * not clear at this point, use a max value for now.
1575 */
1576#define I915_MAX_WA_REGS 16
1577 struct {
1578 u32 addr;
1579 u32 value;
1580 /* bitmask representing WA bits */
1581 u32 mask;
1582 } intel_wa_regs[I915_MAX_WA_REGS];
1583 u32 num_wa_regs;
1584
1529 /* Reclocking support */ 1585 /* Reclocking support */
1530 bool render_reclock_avail; 1586 bool render_reclock_avail;
1531 bool lvds_downclock_avail; 1587 bool lvds_downclock_avail;
@@ -1561,14 +1617,9 @@ struct drm_i915_private {
1561#ifdef CONFIG_DRM_I915_FBDEV 1617#ifdef CONFIG_DRM_I915_FBDEV
1562 /* list of fbdev register on this device */ 1618 /* list of fbdev register on this device */
1563 struct intel_fbdev *fbdev; 1619 struct intel_fbdev *fbdev;
1620 struct work_struct fbdev_suspend_work;
1564#endif 1621#endif
1565 1622
1566 /*
1567 * The console may be contended at resume, but we don't
1568 * want it to block on it.
1569 */
1570 struct work_struct console_resume_work;
1571
1572 struct drm_property *broadcast_rgb_property; 1623 struct drm_property *broadcast_rgb_property;
1573 struct drm_property *force_audio_property; 1624 struct drm_property *force_audio_property;
1574 1625
@@ -1614,12 +1665,28 @@ struct drm_i915_private {
1614 */ 1665 */
1615 struct workqueue_struct *dp_wq; 1666 struct workqueue_struct *dp_wq;
1616 1667
1668 uint32_t bios_vgacntr;
1669
1617 /* Old dri1 support infrastructure, beware the dragons ya fools entering 1670 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1618 * here! */ 1671 * here! */
1619 struct i915_dri1_state dri1; 1672 struct i915_dri1_state dri1;
1620 /* Old ums support infrastructure, same warning applies. */ 1673 /* Old ums support infrastructure, same warning applies. */
1621 struct i915_ums_state ums; 1674 struct i915_ums_state ums;
1622 1675
1676 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1677 struct {
1678 int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
1679 struct intel_engine_cs *ring,
1680 struct intel_context *ctx,
1681 struct drm_i915_gem_execbuffer2 *args,
1682 struct list_head *vmas,
1683 struct drm_i915_gem_object *batch_obj,
1684 u64 exec_start, u32 flags);
1685 int (*init_rings)(struct drm_device *dev);
1686 void (*cleanup_ring)(struct intel_engine_cs *ring);
1687 void (*stop_ring)(struct intel_engine_cs *ring);
1688 } gt;
1689
1623 /* 1690 /*
1624 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 1691 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
1625 * will be rejected. Instead look for a better place. 1692 * will be rejected. Instead look for a better place.
@@ -1761,13 +1828,6 @@ struct drm_i915_gem_object {
1761 * Only honoured if hardware has relevant pte bit 1828 * Only honoured if hardware has relevant pte bit
1762 */ 1829 */
1763 unsigned long gt_ro:1; 1830 unsigned long gt_ro:1;
1764
1765 /*
1766 * Is the GPU currently using a fence to access this buffer,
1767 */
1768 unsigned int pending_fenced_gpu_access:1;
1769 unsigned int fenced_gpu_access:1;
1770
1771 unsigned int cache_level:3; 1831 unsigned int cache_level:3;
1772 1832
1773 unsigned int has_aliasing_ppgtt_mapping:1; 1833 unsigned int has_aliasing_ppgtt_mapping:1;
@@ -1805,7 +1865,7 @@ struct drm_i915_gem_object {
1805 struct drm_file *pin_filp; 1865 struct drm_file *pin_filp;
1806 1866
1807 /** for phy allocated objects */ 1867 /** for phy allocated objects */
1808 drm_dma_handle_t *phys_handle; 1868 struct drm_dma_handle *phys_handle;
1809 1869
1810 union { 1870 union {
1811 struct i915_gem_userptr { 1871 struct i915_gem_userptr {
@@ -1971,51 +2031,63 @@ struct drm_i915_cmd_table {
1971 int count; 2031 int count;
1972}; 2032};
1973 2033
1974#define INTEL_INFO(dev) (&to_i915(dev)->info) 2034/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
1975 2035#define __I915__(p) ({ \
1976#define IS_I830(dev) ((dev)->pdev->device == 0x3577) 2036 struct drm_i915_private *__p; \
1977#define IS_845G(dev) ((dev)->pdev->device == 0x2562) 2037 if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
2038 __p = (struct drm_i915_private *)p; \
2039 else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
2040 __p = to_i915((struct drm_device *)p); \
2041 else \
2042 BUILD_BUG(); \
2043 __p; \
2044})
2045#define INTEL_INFO(p) (&__I915__(p)->info)
2046#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
2047
2048#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
2049#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
1978#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 2050#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1979#define IS_I865G(dev) ((dev)->pdev->device == 0x2572) 2051#define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572)
1980#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 2052#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1981#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592) 2053#define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592)
1982#define IS_I945G(dev) ((dev)->pdev->device == 0x2772) 2054#define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772)
1983#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 2055#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1984#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 2056#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1985#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 2057#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1986#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42) 2058#define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42)
1987#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 2059#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1988#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001) 2060#define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001)
1989#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011) 2061#define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011)
1990#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 2062#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1991#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 2063#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1992#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046) 2064#define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046)
1993#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 2065#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1994#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \ 2066#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
1995 (dev)->pdev->device == 0x0152 || \ 2067 INTEL_DEVID(dev) == 0x0152 || \
1996 (dev)->pdev->device == 0x015a) 2068 INTEL_DEVID(dev) == 0x015a)
1997#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \ 2069#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \
1998 (dev)->pdev->device == 0x0106 || \ 2070 INTEL_DEVID(dev) == 0x0106 || \
1999 (dev)->pdev->device == 0x010A) 2071 INTEL_DEVID(dev) == 0x010A)
2000#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2072#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
2001#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2073#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2002#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2074#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
2003#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2075#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2004#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2076#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
2005#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2077#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
2006 ((dev)->pdev->device & 0xFF00) == 0x0C00) 2078 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
2007#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2079#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
2008 (((dev)->pdev->device & 0xf) == 0x2 || \ 2080 ((INTEL_DEVID(dev) & 0xf) == 0x2 || \
2009 ((dev)->pdev->device & 0xf) == 0x6 || \ 2081 (INTEL_DEVID(dev) & 0xf) == 0x6 || \
2010 ((dev)->pdev->device & 0xf) == 0xe)) 2082 (INTEL_DEVID(dev) & 0xf) == 0xe))
2011#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 2083#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
2012 ((dev)->pdev->device & 0xFF00) == 0x0A00) 2084 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
2013#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 2085#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
2014#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 2086#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
2015 ((dev)->pdev->device & 0x00F0) == 0x0020) 2087 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
2016/* ULX machines are also considered ULT. */ 2088/* ULX machines are also considered ULT. */
2017#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \ 2089#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
2018 (dev)->pdev->device == 0x0A1E) 2090 INTEL_DEVID(dev) == 0x0A1E)
2019#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2091#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
2020 2092
2021/* 2093/*
@@ -2047,10 +2119,11 @@ struct drm_i915_cmd_table {
2047#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2119#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
2048 2120
2049#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2121#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
2122#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
2050#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6) 2123#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
2051#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev)) 2124#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
2052#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) 2125#define USES_PPGTT(dev) (i915.enable_ppgtt)
2053#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) 2126#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2)
2054 2127
2055#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 2128#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
2056#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 2129#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@@ -2134,6 +2207,7 @@ struct i915_params {
2134 int enable_rc6; 2207 int enable_rc6;
2135 int enable_fbc; 2208 int enable_fbc;
2136 int enable_ppgtt; 2209 int enable_ppgtt;
2210 int enable_execlists;
2137 int enable_psr; 2211 int enable_psr;
2138 unsigned int preliminary_hw_support; 2212 unsigned int preliminary_hw_support;
2139 int disable_power_well; 2213 int disable_power_well;
@@ -2180,8 +2254,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2180int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2254int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2181void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2255void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2182 2256
2183extern void intel_console_resume(struct work_struct *work);
2184
2185/* i915_irq.c */ 2257/* i915_irq.c */
2186void i915_queue_hangcheck(struct drm_device *dev); 2258void i915_queue_hangcheck(struct drm_device *dev);
2187__printf(3, 4) 2259__printf(3, 4)
@@ -2229,6 +2301,20 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
2229 struct drm_file *file_priv); 2301 struct drm_file *file_priv);
2230int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 2302int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
2231 struct drm_file *file_priv); 2303 struct drm_file *file_priv);
2304void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
2305 struct intel_engine_cs *ring);
2306void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
2307 struct drm_file *file,
2308 struct intel_engine_cs *ring,
2309 struct drm_i915_gem_object *obj);
2310int i915_gem_ringbuffer_submission(struct drm_device *dev,
2311 struct drm_file *file,
2312 struct intel_engine_cs *ring,
2313 struct intel_context *ctx,
2314 struct drm_i915_gem_execbuffer2 *args,
2315 struct list_head *vmas,
2316 struct drm_i915_gem_object *batch_obj,
2317 u64 exec_start, u32 flags);
2232int i915_gem_execbuffer(struct drm_device *dev, void *data, 2318int i915_gem_execbuffer(struct drm_device *dev, void *data,
2233 struct drm_file *file_priv); 2319 struct drm_file *file_priv);
2234int i915_gem_execbuffer2(struct drm_device *dev, void *data, 2320int i915_gem_execbuffer2(struct drm_device *dev, void *data,
@@ -2263,6 +2349,12 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
2263int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2349int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2264 struct drm_file *file_priv); 2350 struct drm_file *file_priv);
2265void i915_gem_load(struct drm_device *dev); 2351void i915_gem_load(struct drm_device *dev);
2352unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
2353 long target,
2354 unsigned flags);
2355#define I915_SHRINK_PURGEABLE 0x1
2356#define I915_SHRINK_UNBOUND 0x2
2357#define I915_SHRINK_BOUND 0x4
2266void *i915_gem_object_alloc(struct drm_device *dev); 2358void *i915_gem_object_alloc(struct drm_device *dev);
2267void i915_gem_object_free(struct drm_i915_gem_object *obj); 2359void i915_gem_object_free(struct drm_i915_gem_object *obj);
2268void i915_gem_object_init(struct drm_i915_gem_object *obj, 2360void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2381,6 +2473,7 @@ void i915_gem_reset(struct drm_device *dev);
2381bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 2473bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
2382int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 2474int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
2383int __must_check i915_gem_init(struct drm_device *dev); 2475int __must_check i915_gem_init(struct drm_device *dev);
2476int i915_gem_init_rings(struct drm_device *dev);
2384int __must_check i915_gem_init_hw(struct drm_device *dev); 2477int __must_check i915_gem_init_hw(struct drm_device *dev);
2385int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice); 2478int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
2386void i915_gem_init_swizzling(struct drm_device *dev); 2479void i915_gem_init_swizzling(struct drm_device *dev);
@@ -2451,7 +2544,7 @@ static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
2451} 2544}
2452 2545
2453/* Some GGTT VM helpers */ 2546/* Some GGTT VM helpers */
2454#define obj_to_ggtt(obj) \ 2547#define i915_obj_to_ggtt(obj) \
2455 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) 2548 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
2456static inline bool i915_is_ggtt(struct i915_address_space *vm) 2549static inline bool i915_is_ggtt(struct i915_address_space *vm)
2457{ 2550{
@@ -2460,21 +2553,30 @@ static inline bool i915_is_ggtt(struct i915_address_space *vm)
2460 return vm == ggtt; 2553 return vm == ggtt;
2461} 2554}
2462 2555
2556static inline struct i915_hw_ppgtt *
2557i915_vm_to_ppgtt(struct i915_address_space *vm)
2558{
2559 WARN_ON(i915_is_ggtt(vm));
2560
2561 return container_of(vm, struct i915_hw_ppgtt, base);
2562}
2563
2564
2463static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) 2565static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
2464{ 2566{
2465 return i915_gem_obj_bound(obj, obj_to_ggtt(obj)); 2567 return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj));
2466} 2568}
2467 2569
2468static inline unsigned long 2570static inline unsigned long
2469i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) 2571i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
2470{ 2572{
2471 return i915_gem_obj_offset(obj, obj_to_ggtt(obj)); 2573 return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj));
2472} 2574}
2473 2575
2474static inline unsigned long 2576static inline unsigned long
2475i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 2577i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2476{ 2578{
2477 return i915_gem_obj_size(obj, obj_to_ggtt(obj)); 2579 return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
2478} 2580}
2479 2581
2480static inline int __must_check 2582static inline int __must_check
@@ -2482,7 +2584,8 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2482 uint32_t alignment, 2584 uint32_t alignment,
2483 unsigned flags) 2585 unsigned flags)
2484{ 2586{
2485 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL); 2587 return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
2588 alignment, flags | PIN_GLOBAL);
2486} 2589}
2487 2590
2488static inline int 2591static inline int
@@ -2494,7 +2597,6 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2494void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); 2597void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
2495 2598
2496/* i915_gem_context.c */ 2599/* i915_gem_context.c */
2497#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
2498int __must_check i915_gem_context_init(struct drm_device *dev); 2600int __must_check i915_gem_context_init(struct drm_device *dev);
2499void i915_gem_context_fini(struct drm_device *dev); 2601void i915_gem_context_fini(struct drm_device *dev);
2500void i915_gem_context_reset(struct drm_device *dev); 2602void i915_gem_context_reset(struct drm_device *dev);
@@ -2506,6 +2608,8 @@ int i915_switch_context(struct intel_engine_cs *ring,
2506struct intel_context * 2608struct intel_context *
2507i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 2609i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
2508void i915_gem_context_free(struct kref *ctx_ref); 2610void i915_gem_context_free(struct kref *ctx_ref);
2611struct drm_i915_gem_object *
2612i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
2509static inline void i915_gem_context_reference(struct intel_context *ctx) 2613static inline void i915_gem_context_reference(struct intel_context *ctx)
2510{ 2614{
2511 kref_get(&ctx->ref); 2615 kref_get(&ctx->ref);
@@ -2526,8 +2630,6 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2526int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2630int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2527 struct drm_file *file); 2631 struct drm_file *file);
2528 2632
2529/* i915_gem_render_state.c */
2530int i915_gem_render_state_init(struct intel_engine_cs *ring);
2531/* i915_gem_evict.c */ 2633/* i915_gem_evict.c */
2532int __must_check i915_gem_evict_something(struct drm_device *dev, 2634int __must_check i915_gem_evict_something(struct drm_device *dev,
2533 struct i915_address_space *vm, 2635 struct i915_address_space *vm,
@@ -2595,6 +2697,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
2595int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 2697int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
2596 const struct i915_error_state_file_priv *error); 2698 const struct i915_error_state_file_priv *error);
2597int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 2699int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
2700 struct drm_i915_private *i915,
2598 size_t count, loff_t pos); 2701 size_t count, loff_t pos);
2599static inline void i915_error_state_buf_release( 2702static inline void i915_error_state_buf_release(
2600 struct drm_i915_error_state_buf *eb) 2703 struct drm_i915_error_state_buf *eb)
@@ -2609,7 +2712,7 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2609void i915_destroy_error_state(struct drm_device *dev); 2712void i915_destroy_error_state(struct drm_device *dev);
2610 2713
2611void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 2714void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2612const char *i915_cache_level_str(int type); 2715const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
2613 2716
2614/* i915_cmd_parser.c */ 2717/* i915_cmd_parser.c */
2615int i915_cmd_parser_get_version(void); 2718int i915_cmd_parser_get_version(void);
@@ -2701,6 +2804,7 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2701extern void i915_redisable_vga(struct drm_device *dev); 2804extern void i915_redisable_vga(struct drm_device *dev);
2702extern void i915_redisable_vga_power_on(struct drm_device *dev); 2805extern void i915_redisable_vga_power_on(struct drm_device *dev);
2703extern bool intel_fbc_enabled(struct drm_device *dev); 2806extern bool intel_fbc_enabled(struct drm_device *dev);
2807extern void gen8_fbc_sw_flush(struct drm_device *dev, u32 value);
2704extern void intel_disable_fbc(struct drm_device *dev); 2808extern void intel_disable_fbc(struct drm_device *dev);
2705extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 2809extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2706extern void intel_init_pch_refclk(struct drm_device *dev); 2810extern void intel_init_pch_refclk(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ad55b06a3cb1..28f91df2604d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -60,7 +60,6 @@ static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
60static int i915_gem_shrinker_oom(struct notifier_block *nb, 60static int i915_gem_shrinker_oom(struct notifier_block *nb,
61 unsigned long event, 61 unsigned long event,
62 void *ptr); 62 void *ptr);
63static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
64static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 63static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
65 64
66static bool cpu_cache_is_coherent(struct drm_device *dev, 65static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -1085,7 +1084,13 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
1085 if (i915_terminally_wedged(error)) 1084 if (i915_terminally_wedged(error))
1086 return -EIO; 1085 return -EIO;
1087 1086
1088 return -EAGAIN; 1087 /*
1088 * Check if GPU Reset is in progress - we need intel_ring_begin
1089 * to work properly to reinit the hw state while the gpu is
1090 * still marked as reset-in-progress. Handle this with a flag.
1091 */
1092 if (!error->reload_in_reset)
1093 return -EAGAIN;
1089 } 1094 }
1090 1095
1091 return 0; 1096 return 0;
@@ -1735,7 +1740,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1735 * offsets on purgeable objects by truncating it and marking it purged, 1740 * offsets on purgeable objects by truncating it and marking it purged,
1736 * which prevents userspace from ever using that object again. 1741 * which prevents userspace from ever using that object again.
1737 */ 1742 */
1738 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); 1743 i915_gem_shrink(dev_priv,
1744 obj->base.size >> PAGE_SHIFT,
1745 I915_SHRINK_BOUND |
1746 I915_SHRINK_UNBOUND |
1747 I915_SHRINK_PURGEABLE);
1739 ret = drm_gem_create_mmap_offset(&obj->base); 1748 ret = drm_gem_create_mmap_offset(&obj->base);
1740 if (ret != -ENOSPC) 1749 if (ret != -ENOSPC)
1741 goto out; 1750 goto out;
@@ -1932,12 +1941,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1932 return 0; 1941 return 0;
1933} 1942}
1934 1943
1935static unsigned long 1944unsigned long
1936__i915_gem_shrink(struct drm_i915_private *dev_priv, long target, 1945i915_gem_shrink(struct drm_i915_private *dev_priv,
1937 bool purgeable_only) 1946 long target, unsigned flags)
1938{ 1947{
1939 struct list_head still_in_list; 1948 const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
1940 struct drm_i915_gem_object *obj;
1941 unsigned long count = 0; 1949 unsigned long count = 0;
1942 1950
1943 /* 1951 /*
@@ -1959,62 +1967,68 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1959 * dev->struct_mutex and so we won't ever be able to observe an 1967 * dev->struct_mutex and so we won't ever be able to observe an
1960 * object on the bound_list with a reference count equals 0. 1968 * object on the bound_list with a reference count equals 0.
1961 */ 1969 */
1962 INIT_LIST_HEAD(&still_in_list); 1970 if (flags & I915_SHRINK_UNBOUND) {
1963 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) { 1971 struct list_head still_in_list;
1964 obj = list_first_entry(&dev_priv->mm.unbound_list,
1965 typeof(*obj), global_list);
1966 list_move_tail(&obj->global_list, &still_in_list);
1967 1972
1968 if (!i915_gem_object_is_purgeable(obj) && purgeable_only) 1973 INIT_LIST_HEAD(&still_in_list);
1969 continue; 1974 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
1975 struct drm_i915_gem_object *obj;
1970 1976
1971 drm_gem_object_reference(&obj->base); 1977 obj = list_first_entry(&dev_priv->mm.unbound_list,
1978 typeof(*obj), global_list);
1979 list_move_tail(&obj->global_list, &still_in_list);
1972 1980
1973 if (i915_gem_object_put_pages(obj) == 0) 1981 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1974 count += obj->base.size >> PAGE_SHIFT; 1982 continue;
1975 1983
1976 drm_gem_object_unreference(&obj->base); 1984 drm_gem_object_reference(&obj->base);
1985
1986 if (i915_gem_object_put_pages(obj) == 0)
1987 count += obj->base.size >> PAGE_SHIFT;
1988
1989 drm_gem_object_unreference(&obj->base);
1990 }
1991 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1977 } 1992 }
1978 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1979 1993
1980 INIT_LIST_HEAD(&still_in_list); 1994 if (flags & I915_SHRINK_BOUND) {
1981 while (count < target && !list_empty(&dev_priv->mm.bound_list)) { 1995 struct list_head still_in_list;
1982 struct i915_vma *vma, *v;
1983 1996
1984 obj = list_first_entry(&dev_priv->mm.bound_list, 1997 INIT_LIST_HEAD(&still_in_list);
1985 typeof(*obj), global_list); 1998 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1986 list_move_tail(&obj->global_list, &still_in_list); 1999 struct drm_i915_gem_object *obj;
2000 struct i915_vma *vma, *v;
1987 2001
1988 if (!i915_gem_object_is_purgeable(obj) && purgeable_only) 2002 obj = list_first_entry(&dev_priv->mm.bound_list,
1989 continue; 2003 typeof(*obj), global_list);
2004 list_move_tail(&obj->global_list, &still_in_list);
1990 2005
1991 drm_gem_object_reference(&obj->base); 2006 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
2007 continue;
1992 2008
1993 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) 2009 drm_gem_object_reference(&obj->base);
1994 if (i915_vma_unbind(vma))
1995 break;
1996 2010
1997 if (i915_gem_object_put_pages(obj) == 0) 2011 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1998 count += obj->base.size >> PAGE_SHIFT; 2012 if (i915_vma_unbind(vma))
2013 break;
1999 2014
2000 drm_gem_object_unreference(&obj->base); 2015 if (i915_gem_object_put_pages(obj) == 0)
2016 count += obj->base.size >> PAGE_SHIFT;
2017
2018 drm_gem_object_unreference(&obj->base);
2019 }
2020 list_splice(&still_in_list, &dev_priv->mm.bound_list);
2001 } 2021 }
2002 list_splice(&still_in_list, &dev_priv->mm.bound_list);
2003 2022
2004 return count; 2023 return count;
2005} 2024}
2006 2025
2007static unsigned long 2026static unsigned long
2008i915_gem_purge(struct drm_i915_private *dev_priv, long target)
2009{
2010 return __i915_gem_shrink(dev_priv, target, true);
2011}
2012
2013static unsigned long
2014i915_gem_shrink_all(struct drm_i915_private *dev_priv) 2027i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2015{ 2028{
2016 i915_gem_evict_everything(dev_priv->dev); 2029 i915_gem_evict_everything(dev_priv->dev);
2017 return __i915_gem_shrink(dev_priv, LONG_MAX, false); 2030 return i915_gem_shrink(dev_priv, LONG_MAX,
2031 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
2018} 2032}
2019 2033
2020static int 2034static int
@@ -2061,7 +2075,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2061 for (i = 0; i < page_count; i++) { 2075 for (i = 0; i < page_count; i++) {
2062 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2076 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2063 if (IS_ERR(page)) { 2077 if (IS_ERR(page)) {
2064 i915_gem_purge(dev_priv, page_count); 2078 i915_gem_shrink(dev_priv,
2079 page_count,
2080 I915_SHRINK_BOUND |
2081 I915_SHRINK_UNBOUND |
2082 I915_SHRINK_PURGEABLE);
2065 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2083 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2066 } 2084 }
2067 if (IS_ERR(page)) { 2085 if (IS_ERR(page)) {
@@ -2163,8 +2181,6 @@ static void
2163i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 2181i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2164 struct intel_engine_cs *ring) 2182 struct intel_engine_cs *ring)
2165{ 2183{
2166 struct drm_device *dev = obj->base.dev;
2167 struct drm_i915_private *dev_priv = dev->dev_private;
2168 u32 seqno = intel_ring_get_seqno(ring); 2184 u32 seqno = intel_ring_get_seqno(ring);
2169 2185
2170 BUG_ON(ring == NULL); 2186 BUG_ON(ring == NULL);
@@ -2183,19 +2199,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2183 list_move_tail(&obj->ring_list, &ring->active_list); 2199 list_move_tail(&obj->ring_list, &ring->active_list);
2184 2200
2185 obj->last_read_seqno = seqno; 2201 obj->last_read_seqno = seqno;
2186
2187 if (obj->fenced_gpu_access) {
2188 obj->last_fenced_seqno = seqno;
2189
2190 /* Bump MRU to take account of the delayed flush */
2191 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2192 struct drm_i915_fence_reg *reg;
2193
2194 reg = &dev_priv->fence_regs[obj->fence_reg];
2195 list_move_tail(&reg->lru_list,
2196 &dev_priv->mm.fence_list);
2197 }
2198 }
2199} 2202}
2200 2203
2201void i915_vma_move_to_active(struct i915_vma *vma, 2204void i915_vma_move_to_active(struct i915_vma *vma,
@@ -2231,7 +2234,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2231 obj->base.write_domain = 0; 2234 obj->base.write_domain = 0;
2232 2235
2233 obj->last_fenced_seqno = 0; 2236 obj->last_fenced_seqno = 0;
2234 obj->fenced_gpu_access = false;
2235 2237
2236 obj->active = 0; 2238 obj->active = 0;
2237 drm_gem_object_unreference(&obj->base); 2239 drm_gem_object_unreference(&obj->base);
@@ -2329,10 +2331,21 @@ int __i915_add_request(struct intel_engine_cs *ring,
2329{ 2331{
2330 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2332 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2331 struct drm_i915_gem_request *request; 2333 struct drm_i915_gem_request *request;
2334 struct intel_ringbuffer *ringbuf;
2332 u32 request_ring_position, request_start; 2335 u32 request_ring_position, request_start;
2333 int ret; 2336 int ret;
2334 2337
2335 request_start = intel_ring_get_tail(ring->buffer); 2338 request = ring->preallocated_lazy_request;
2339 if (WARN_ON(request == NULL))
2340 return -ENOMEM;
2341
2342 if (i915.enable_execlists) {
2343 struct intel_context *ctx = request->ctx;
2344 ringbuf = ctx->engine[ring->id].ringbuf;
2345 } else
2346 ringbuf = ring->buffer;
2347
2348 request_start = intel_ring_get_tail(ringbuf);
2336 /* 2349 /*
2337 * Emit any outstanding flushes - execbuf can fail to emit the flush 2350 * Emit any outstanding flushes - execbuf can fail to emit the flush
2338 * after having emitted the batchbuffer command. Hence we need to fix 2351 * after having emitted the batchbuffer command. Hence we need to fix
@@ -2340,24 +2353,32 @@ int __i915_add_request(struct intel_engine_cs *ring,
2340 * is that the flush _must_ happen before the next request, no matter 2353 * is that the flush _must_ happen before the next request, no matter
2341 * what. 2354 * what.
2342 */ 2355 */
2343 ret = intel_ring_flush_all_caches(ring); 2356 if (i915.enable_execlists) {
2344 if (ret) 2357 ret = logical_ring_flush_all_caches(ringbuf);
2345 return ret; 2358 if (ret)
2346 2359 return ret;
2347 request = ring->preallocated_lazy_request; 2360 } else {
2348 if (WARN_ON(request == NULL)) 2361 ret = intel_ring_flush_all_caches(ring);
2349 return -ENOMEM; 2362 if (ret)
2363 return ret;
2364 }
2350 2365
2351 /* Record the position of the start of the request so that 2366 /* Record the position of the start of the request so that
2352 * should we detect the updated seqno part-way through the 2367 * should we detect the updated seqno part-way through the
2353 * GPU processing the request, we never over-estimate the 2368 * GPU processing the request, we never over-estimate the
2354 * position of the head. 2369 * position of the head.
2355 */ 2370 */
2356 request_ring_position = intel_ring_get_tail(ring->buffer); 2371 request_ring_position = intel_ring_get_tail(ringbuf);
2357 2372
2358 ret = ring->add_request(ring); 2373 if (i915.enable_execlists) {
2359 if (ret) 2374 ret = ring->emit_request(ringbuf);
2360 return ret; 2375 if (ret)
2376 return ret;
2377 } else {
2378 ret = ring->add_request(ring);
2379 if (ret)
2380 return ret;
2381 }
2361 2382
2362 request->seqno = intel_ring_get_seqno(ring); 2383 request->seqno = intel_ring_get_seqno(ring);
2363 request->ring = ring; 2384 request->ring = ring;
@@ -2372,12 +2393,14 @@ int __i915_add_request(struct intel_engine_cs *ring,
2372 */ 2393 */
2373 request->batch_obj = obj; 2394 request->batch_obj = obj;
2374 2395
2375 /* Hold a reference to the current context so that we can inspect 2396 if (!i915.enable_execlists) {
2376 * it later in case a hangcheck error event fires. 2397 /* Hold a reference to the current context so that we can inspect
2377 */ 2398 * it later in case a hangcheck error event fires.
2378 request->ctx = ring->last_context; 2399 */
2379 if (request->ctx) 2400 request->ctx = ring->last_context;
2380 i915_gem_context_reference(request->ctx); 2401 if (request->ctx)
2402 i915_gem_context_reference(request->ctx);
2403 }
2381 2404
2382 request->emitted_jiffies = jiffies; 2405 request->emitted_jiffies = jiffies;
2383 list_add_tail(&request->list, &ring->request_list); 2406 list_add_tail(&request->list, &ring->request_list);
@@ -2548,6 +2571,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2548 i915_gem_free_request(request); 2571 i915_gem_free_request(request);
2549 } 2572 }
2550 2573
2574 while (!list_empty(&ring->execlist_queue)) {
2575 struct intel_ctx_submit_request *submit_req;
2576
2577 submit_req = list_first_entry(&ring->execlist_queue,
2578 struct intel_ctx_submit_request,
2579 execlist_link);
2580 list_del(&submit_req->execlist_link);
2581 intel_runtime_pm_put(dev_priv);
2582 i915_gem_context_unreference(submit_req->ctx);
2583 kfree(submit_req);
2584 }
2585
2551 /* These may not have been flush before the reset, do so now */ 2586 /* These may not have been flush before the reset, do so now */
2552 kfree(ring->preallocated_lazy_request); 2587 kfree(ring->preallocated_lazy_request);
2553 ring->preallocated_lazy_request = NULL; 2588 ring->preallocated_lazy_request = NULL;
@@ -2632,6 +2667,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2632 2667
2633 while (!list_empty(&ring->request_list)) { 2668 while (!list_empty(&ring->request_list)) {
2634 struct drm_i915_gem_request *request; 2669 struct drm_i915_gem_request *request;
2670 struct intel_ringbuffer *ringbuf;
2635 2671
2636 request = list_first_entry(&ring->request_list, 2672 request = list_first_entry(&ring->request_list,
2637 struct drm_i915_gem_request, 2673 struct drm_i915_gem_request,
@@ -2641,12 +2677,24 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2641 break; 2677 break;
2642 2678
2643 trace_i915_gem_request_retire(ring, request->seqno); 2679 trace_i915_gem_request_retire(ring, request->seqno);
2680
2681 /* This is one of the few common intersection points
2682 * between legacy ringbuffer submission and execlists:
2683 * we need to tell them apart in order to find the correct
2684 * ringbuffer to which the request belongs to.
2685 */
2686 if (i915.enable_execlists) {
2687 struct intel_context *ctx = request->ctx;
2688 ringbuf = ctx->engine[ring->id].ringbuf;
2689 } else
2690 ringbuf = ring->buffer;
2691
2644 /* We know the GPU must have read the request to have 2692 /* We know the GPU must have read the request to have
2645 * sent us the seqno + interrupt, so use the position 2693 * sent us the seqno + interrupt, so use the position
2646 * of tail of the request to update the last known position 2694 * of tail of the request to update the last known position
2647 * of the GPU head. 2695 * of the GPU head.
2648 */ 2696 */
2649 ring->buffer->last_retired_head = request->tail; 2697 ringbuf->last_retired_head = request->tail;
2650 2698
2651 i915_gem_free_request(request); 2699 i915_gem_free_request(request);
2652 } 2700 }
@@ -2908,6 +2956,9 @@ int i915_vma_unbind(struct i915_vma *vma)
2908 * cause memory corruption through use-after-free. 2956 * cause memory corruption through use-after-free.
2909 */ 2957 */
2910 2958
2959 /* Throw away the active reference before moving to the unbound list */
2960 i915_gem_object_retire(obj);
2961
2911 if (i915_is_ggtt(vma->vm)) { 2962 if (i915_is_ggtt(vma->vm)) {
2912 i915_gem_object_finish_gtt(obj); 2963 i915_gem_object_finish_gtt(obj);
2913 2964
@@ -2922,9 +2973,8 @@ int i915_vma_unbind(struct i915_vma *vma)
2922 vma->unbind_vma(vma); 2973 vma->unbind_vma(vma);
2923 2974
2924 list_del_init(&vma->mm_list); 2975 list_del_init(&vma->mm_list);
2925 /* Avoid an unnecessary call to unbind on rebind. */
2926 if (i915_is_ggtt(vma->vm)) 2976 if (i915_is_ggtt(vma->vm))
2927 obj->map_and_fenceable = true; 2977 obj->map_and_fenceable = false;
2928 2978
2929 drm_mm_remove_node(&vma->node); 2979 drm_mm_remove_node(&vma->node);
2930 i915_gem_vma_destroy(vma); 2980 i915_gem_vma_destroy(vma);
@@ -2953,9 +3003,11 @@ int i915_gpu_idle(struct drm_device *dev)
2953 3003
2954 /* Flush everything onto the inactive list. */ 3004 /* Flush everything onto the inactive list. */
2955 for_each_ring(ring, dev_priv, i) { 3005 for_each_ring(ring, dev_priv, i) {
2956 ret = i915_switch_context(ring, ring->default_context); 3006 if (!i915.enable_execlists) {
2957 if (ret) 3007 ret = i915_switch_context(ring, ring->default_context);
2958 return ret; 3008 if (ret)
3009 return ret;
3010 }
2959 3011
2960 ret = intel_ring_idle(ring); 3012 ret = intel_ring_idle(ring);
2961 if (ret) 3013 if (ret)
@@ -3169,7 +3221,6 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3169 obj->last_fenced_seqno = 0; 3221 obj->last_fenced_seqno = 0;
3170 } 3222 }
3171 3223
3172 obj->fenced_gpu_access = false;
3173 return 0; 3224 return 0;
3174} 3225}
3175 3226
@@ -3276,6 +3327,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3276 return 0; 3327 return 0;
3277 } 3328 }
3278 } else if (enable) { 3329 } else if (enable) {
3330 if (WARN_ON(!obj->map_and_fenceable))
3331 return -EINVAL;
3332
3279 reg = i915_find_fence_reg(dev); 3333 reg = i915_find_fence_reg(dev);
3280 if (IS_ERR(reg)) 3334 if (IS_ERR(reg))
3281 return PTR_ERR(reg); 3335 return PTR_ERR(reg);
@@ -3297,17 +3351,20 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3297 return 0; 3351 return 0;
3298} 3352}
3299 3353
3300static bool i915_gem_valid_gtt_space(struct drm_device *dev, 3354static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3301 struct drm_mm_node *gtt_space,
3302 unsigned long cache_level) 3355 unsigned long cache_level)
3303{ 3356{
3357 struct drm_mm_node *gtt_space = &vma->node;
3304 struct drm_mm_node *other; 3358 struct drm_mm_node *other;
3305 3359
3306 /* On non-LLC machines we have to be careful when putting differing 3360 /*
3307 * types of snoopable memory together to avoid the prefetcher 3361 * On some machines we have to be careful when putting differing types
3308 * crossing memory domains and dying. 3362 * of snoopable memory together to avoid the prefetcher crossing memory
3363 * domains and dying. During vm initialisation, we decide whether or not
3364 * these constraints apply and set the drm_mm.color_adjust
3365 * appropriately.
3309 */ 3366 */
3310 if (HAS_LLC(dev)) 3367 if (vma->vm->mm.color_adjust == NULL)
3311 return true; 3368 return true;
3312 3369
3313 if (!drm_mm_node_allocated(gtt_space)) 3370 if (!drm_mm_node_allocated(gtt_space))
@@ -3445,8 +3502,7 @@ search_free:
3445 3502
3446 goto err_free_vma; 3503 goto err_free_vma;
3447 } 3504 }
3448 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node, 3505 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3449 obj->cache_level))) {
3450 ret = -EINVAL; 3506 ret = -EINVAL;
3451 goto err_remove_node; 3507 goto err_remove_node;
3452 } 3508 }
@@ -3586,11 +3642,12 @@ int
3586i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 3642i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3587{ 3643{
3588 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3644 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3645 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3589 uint32_t old_write_domain, old_read_domains; 3646 uint32_t old_write_domain, old_read_domains;
3590 int ret; 3647 int ret;
3591 3648
3592 /* Not valid to be called on unbound objects. */ 3649 /* Not valid to be called on unbound objects. */
3593 if (!i915_gem_obj_bound_any(obj)) 3650 if (vma == NULL)
3594 return -EINVAL; 3651 return -EINVAL;
3595 3652
3596 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3653 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3632,13 +3689,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3632 old_write_domain); 3689 old_write_domain);
3633 3690
3634 /* And bump the LRU for this access */ 3691 /* And bump the LRU for this access */
3635 if (i915_gem_object_is_inactive(obj)) { 3692 if (i915_gem_object_is_inactive(obj))
3636 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); 3693 list_move_tail(&vma->mm_list,
3637 if (vma) 3694 &dev_priv->gtt.base.inactive_list);
3638 list_move_tail(&vma->mm_list,
3639 &dev_priv->gtt.base.inactive_list);
3640
3641 }
3642 3695
3643 return 0; 3696 return 0;
3644} 3697}
@@ -3659,7 +3712,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3659 } 3712 }
3660 3713
3661 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 3714 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3662 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { 3715 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3663 ret = i915_vma_unbind(vma); 3716 ret = i915_vma_unbind(vma);
3664 if (ret) 3717 if (ret)
3665 return ret; 3718 return ret;
@@ -3802,9 +3855,6 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
3802{ 3855{
3803 struct i915_vma *vma; 3856 struct i915_vma *vma;
3804 3857
3805 if (list_empty(&obj->vma_list))
3806 return false;
3807
3808 vma = i915_gem_obj_to_ggtt(obj); 3858 vma = i915_gem_obj_to_ggtt(obj);
3809 if (!vma) 3859 if (!vma)
3810 return false; 3860 return false;
@@ -4331,8 +4381,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
4331 4381
4332 obj->fence_reg = I915_FENCE_REG_NONE; 4382 obj->fence_reg = I915_FENCE_REG_NONE;
4333 obj->madv = I915_MADV_WILLNEED; 4383 obj->madv = I915_MADV_WILLNEED;
4334 /* Avoid an unnecessary call to unbind on the first bind. */
4335 obj->map_and_fenceable = true;
4336 4384
4337 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); 4385 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4338} 4386}
@@ -4493,12 +4541,18 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4493 4541
4494void i915_gem_vma_destroy(struct i915_vma *vma) 4542void i915_gem_vma_destroy(struct i915_vma *vma)
4495{ 4543{
4544 struct i915_address_space *vm = NULL;
4496 WARN_ON(vma->node.allocated); 4545 WARN_ON(vma->node.allocated);
4497 4546
4498 /* Keep the vma as a placeholder in the execbuffer reservation lists */ 4547 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4499 if (!list_empty(&vma->exec_list)) 4548 if (!list_empty(&vma->exec_list))
4500 return; 4549 return;
4501 4550
4551 vm = vma->vm;
4552
4553 if (!i915_is_ggtt(vm))
4554 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
4555
4502 list_del(&vma->vma_link); 4556 list_del(&vma->vma_link);
4503 4557
4504 kfree(vma); 4558 kfree(vma);
@@ -4512,7 +4566,7 @@ i915_gem_stop_ringbuffers(struct drm_device *dev)
4512 int i; 4566 int i;
4513 4567
4514 for_each_ring(ring, dev_priv, i) 4568 for_each_ring(ring, dev_priv, i)
4515 intel_stop_ring_buffer(ring); 4569 dev_priv->gt.stop_ring(ring);
4516} 4570}
4517 4571
4518int 4572int
@@ -4629,11 +4683,46 @@ intel_enable_blt(struct drm_device *dev)
4629 return true; 4683 return true;
4630} 4684}
4631 4685
4632static int i915_gem_init_rings(struct drm_device *dev) 4686static void init_unused_ring(struct drm_device *dev, u32 base)
4687{
4688 struct drm_i915_private *dev_priv = dev->dev_private;
4689
4690 I915_WRITE(RING_CTL(base), 0);
4691 I915_WRITE(RING_HEAD(base), 0);
4692 I915_WRITE(RING_TAIL(base), 0);
4693 I915_WRITE(RING_START(base), 0);
4694}
4695
4696static void init_unused_rings(struct drm_device *dev)
4697{
4698 if (IS_I830(dev)) {
4699 init_unused_ring(dev, PRB1_BASE);
4700 init_unused_ring(dev, SRB0_BASE);
4701 init_unused_ring(dev, SRB1_BASE);
4702 init_unused_ring(dev, SRB2_BASE);
4703 init_unused_ring(dev, SRB3_BASE);
4704 } else if (IS_GEN2(dev)) {
4705 init_unused_ring(dev, SRB0_BASE);
4706 init_unused_ring(dev, SRB1_BASE);
4707 } else if (IS_GEN3(dev)) {
4708 init_unused_ring(dev, PRB1_BASE);
4709 init_unused_ring(dev, PRB2_BASE);
4710 }
4711}
4712
4713int i915_gem_init_rings(struct drm_device *dev)
4633{ 4714{
4634 struct drm_i915_private *dev_priv = dev->dev_private; 4715 struct drm_i915_private *dev_priv = dev->dev_private;
4635 int ret; 4716 int ret;
4636 4717
4718 /*
4719 * At least 830 can leave some of the unused rings
4720 * "active" (ie. head != tail) after resume which
4721 * will prevent c3 entry. Makes sure all unused rings
4722 * are totally idle.
4723 */
4724 init_unused_rings(dev);
4725
4637 ret = intel_init_render_ring_buffer(dev); 4726 ret = intel_init_render_ring_buffer(dev);
4638 if (ret) 4727 if (ret)
4639 return ret; 4728 return ret;
@@ -4712,7 +4801,7 @@ i915_gem_init_hw(struct drm_device *dev)
4712 4801
4713 i915_gem_init_swizzling(dev); 4802 i915_gem_init_swizzling(dev);
4714 4803
4715 ret = i915_gem_init_rings(dev); 4804 ret = dev_priv->gt.init_rings(dev);
4716 if (ret) 4805 if (ret)
4717 return ret; 4806 return ret;
4718 4807
@@ -4730,6 +4819,14 @@ i915_gem_init_hw(struct drm_device *dev)
4730 if (ret && ret != -EIO) { 4819 if (ret && ret != -EIO) {
4731 DRM_ERROR("Context enable failed %d\n", ret); 4820 DRM_ERROR("Context enable failed %d\n", ret);
4732 i915_gem_cleanup_ringbuffer(dev); 4821 i915_gem_cleanup_ringbuffer(dev);
4822
4823 return ret;
4824 }
4825
4826 ret = i915_ppgtt_init_hw(dev);
4827 if (ret && ret != -EIO) {
4828 DRM_ERROR("PPGTT enable failed %d\n", ret);
4829 i915_gem_cleanup_ringbuffer(dev);
4733 } 4830 }
4734 4831
4735 return ret; 4832 return ret;
@@ -4740,6 +4837,9 @@ int i915_gem_init(struct drm_device *dev)
4740 struct drm_i915_private *dev_priv = dev->dev_private; 4837 struct drm_i915_private *dev_priv = dev->dev_private;
4741 int ret; 4838 int ret;
4742 4839
4840 i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4841 i915.enable_execlists);
4842
4743 mutex_lock(&dev->struct_mutex); 4843 mutex_lock(&dev->struct_mutex);
4744 4844
4745 if (IS_VALLEYVIEW(dev)) { 4845 if (IS_VALLEYVIEW(dev)) {
@@ -4750,7 +4850,24 @@ int i915_gem_init(struct drm_device *dev)
4750 DRM_DEBUG_DRIVER("allow wake ack timed out\n"); 4850 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4751 } 4851 }
4752 4852
4753 i915_gem_init_userptr(dev); 4853 if (!i915.enable_execlists) {
4854 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
4855 dev_priv->gt.init_rings = i915_gem_init_rings;
4856 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
4857 dev_priv->gt.stop_ring = intel_stop_ring_buffer;
4858 } else {
4859 dev_priv->gt.do_execbuf = intel_execlists_submission;
4860 dev_priv->gt.init_rings = intel_logical_rings_init;
4861 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
4862 dev_priv->gt.stop_ring = intel_logical_ring_stop;
4863 }
4864
4865 ret = i915_gem_init_userptr(dev);
4866 if (ret) {
4867 mutex_unlock(&dev->struct_mutex);
4868 return ret;
4869 }
4870
4754 i915_gem_init_global_gtt(dev); 4871 i915_gem_init_global_gtt(dev);
4755 4872
4756 ret = i915_gem_context_init(dev); 4873 ret = i915_gem_context_init(dev);
@@ -4785,7 +4902,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4785 int i; 4902 int i;
4786 4903
4787 for_each_ring(ring, dev_priv, i) 4904 for_each_ring(ring, dev_priv, i)
4788 intel_cleanup_ring_buffer(ring); 4905 dev_priv->gt.cleanup_ring(ring);
4789} 4906}
4790 4907
4791int 4908int
@@ -5097,9 +5214,7 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5097 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 5214 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5098 struct i915_vma *vma; 5215 struct i915_vma *vma;
5099 5216
5100 if (!dev_priv->mm.aliasing_ppgtt || 5217 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5101 vm == &dev_priv->mm.aliasing_ppgtt->base)
5102 vm = &dev_priv->gtt.base;
5103 5218
5104 list_for_each_entry(vma, &o->vma_list, vma_link) { 5219 list_for_each_entry(vma, &o->vma_list, vma_link) {
5105 if (vma->vm == vm) 5220 if (vma->vm == vm)
@@ -5140,9 +5255,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5140 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 5255 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5141 struct i915_vma *vma; 5256 struct i915_vma *vma;
5142 5257
5143 if (!dev_priv->mm.aliasing_ppgtt || 5258 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5144 vm == &dev_priv->mm.aliasing_ppgtt->base)
5145 vm = &dev_priv->gtt.base;
5146 5259
5147 BUG_ON(list_empty(&o->vma_list)); 5260 BUG_ON(list_empty(&o->vma_list));
5148 5261
@@ -5165,11 +5278,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
5165 if (!i915_gem_shrinker_lock(dev, &unlock)) 5278 if (!i915_gem_shrinker_lock(dev, &unlock))
5166 return SHRINK_STOP; 5279 return SHRINK_STOP;
5167 5280
5168 freed = i915_gem_purge(dev_priv, sc->nr_to_scan); 5281 freed = i915_gem_shrink(dev_priv,
5282 sc->nr_to_scan,
5283 I915_SHRINK_BOUND |
5284 I915_SHRINK_UNBOUND |
5285 I915_SHRINK_PURGEABLE);
5169 if (freed < sc->nr_to_scan) 5286 if (freed < sc->nr_to_scan)
5170 freed += __i915_gem_shrink(dev_priv, 5287 freed += i915_gem_shrink(dev_priv,
5171 sc->nr_to_scan - freed, 5288 sc->nr_to_scan - freed,
5172 false); 5289 I915_SHRINK_BOUND |
5290 I915_SHRINK_UNBOUND);
5173 if (unlock) 5291 if (unlock)
5174 mutex_unlock(&dev->struct_mutex); 5292 mutex_unlock(&dev->struct_mutex);
5175 5293
@@ -5247,14 +5365,8 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5247{ 5365{
5248 struct i915_vma *vma; 5366 struct i915_vma *vma;
5249 5367
5250 /* This WARN has probably outlived its usefulness (callers already
5251 * WARN if they don't find the GGTT vma they expect). When removing,
5252 * remember to remove the pre-check in is_pin_display() as well */
5253 if (WARN_ON(list_empty(&obj->vma_list)))
5254 return NULL;
5255
5256 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link); 5368 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5257 if (vma->vm != obj_to_ggtt(obj)) 5369 if (vma->vm != i915_obj_to_ggtt(obj))
5258 return NULL; 5370 return NULL;
5259 5371
5260 return vma; 5372 return vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 3b99390e467a..a5221d8f1580 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -96,50 +96,6 @@
96#define GEN6_CONTEXT_ALIGN (64<<10) 96#define GEN6_CONTEXT_ALIGN (64<<10)
97#define GEN7_CONTEXT_ALIGN 4096 97#define GEN7_CONTEXT_ALIGN 4096
98 98
99static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
100{
101 struct drm_device *dev = ppgtt->base.dev;
102 struct drm_i915_private *dev_priv = dev->dev_private;
103 struct i915_address_space *vm = &ppgtt->base;
104
105 if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
106 (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
107 ppgtt->base.cleanup(&ppgtt->base);
108 return;
109 }
110
111 /*
112 * Make sure vmas are unbound before we take down the drm_mm
113 *
114 * FIXME: Proper refcounting should take care of this, this shouldn't be
115 * needed at all.
116 */
117 if (!list_empty(&vm->active_list)) {
118 struct i915_vma *vma;
119
120 list_for_each_entry(vma, &vm->active_list, mm_list)
121 if (WARN_ON(list_empty(&vma->vma_link) ||
122 list_is_singular(&vma->vma_link)))
123 break;
124
125 i915_gem_evict_vm(&ppgtt->base, true);
126 } else {
127 i915_gem_retire_requests(dev);
128 i915_gem_evict_vm(&ppgtt->base, false);
129 }
130
131 ppgtt->base.cleanup(&ppgtt->base);
132}
133
134static void ppgtt_release(struct kref *kref)
135{
136 struct i915_hw_ppgtt *ppgtt =
137 container_of(kref, struct i915_hw_ppgtt, ref);
138
139 do_ppgtt_cleanup(ppgtt);
140 kfree(ppgtt);
141}
142
143static size_t get_context_alignment(struct drm_device *dev) 99static size_t get_context_alignment(struct drm_device *dev)
144{ 100{
145 if (IS_GEN6(dev)) 101 if (IS_GEN6(dev))
@@ -179,24 +135,20 @@ static int get_context_size(struct drm_device *dev)
179void i915_gem_context_free(struct kref *ctx_ref) 135void i915_gem_context_free(struct kref *ctx_ref)
180{ 136{
181 struct intel_context *ctx = container_of(ctx_ref, 137 struct intel_context *ctx = container_of(ctx_ref,
182 typeof(*ctx), ref); 138 typeof(*ctx), ref);
183 struct i915_hw_ppgtt *ppgtt = NULL;
184 139
185 if (ctx->legacy_hw_ctx.rcs_state) { 140 if (i915.enable_execlists)
186 /* We refcount even the aliasing PPGTT to keep the code symmetric */ 141 intel_lr_context_free(ctx);
187 if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev)) 142
188 ppgtt = ctx_to_ppgtt(ctx); 143 i915_ppgtt_put(ctx->ppgtt);
189 }
190 144
191 if (ppgtt)
192 kref_put(&ppgtt->ref, ppgtt_release);
193 if (ctx->legacy_hw_ctx.rcs_state) 145 if (ctx->legacy_hw_ctx.rcs_state)
194 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); 146 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
195 list_del(&ctx->link); 147 list_del(&ctx->link);
196 kfree(ctx); 148 kfree(ctx);
197} 149}
198 150
199static struct drm_i915_gem_object * 151struct drm_i915_gem_object *
200i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) 152i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
201{ 153{
202 struct drm_i915_gem_object *obj; 154 struct drm_i915_gem_object *obj;
@@ -226,29 +178,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
226 return obj; 178 return obj;
227} 179}
228 180
229static struct i915_hw_ppgtt *
230create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
231{
232 struct i915_hw_ppgtt *ppgtt;
233 int ret;
234
235 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
236 if (!ppgtt)
237 return ERR_PTR(-ENOMEM);
238
239 ret = i915_gem_init_ppgtt(dev, ppgtt);
240 if (ret) {
241 kfree(ppgtt);
242 return ERR_PTR(ret);
243 }
244
245 ppgtt->ctx = ctx;
246 return ppgtt;
247}
248
249static struct intel_context * 181static struct intel_context *
250__create_hw_context(struct drm_device *dev, 182__create_hw_context(struct drm_device *dev,
251 struct drm_i915_file_private *file_priv) 183 struct drm_i915_file_private *file_priv)
252{ 184{
253 struct drm_i915_private *dev_priv = dev->dev_private; 185 struct drm_i915_private *dev_priv = dev->dev_private;
254 struct intel_context *ctx; 186 struct intel_context *ctx;
@@ -301,11 +233,9 @@ err_out:
301 */ 233 */
302static struct intel_context * 234static struct intel_context *
303i915_gem_create_context(struct drm_device *dev, 235i915_gem_create_context(struct drm_device *dev,
304 struct drm_i915_file_private *file_priv, 236 struct drm_i915_file_private *file_priv)
305 bool create_vm)
306{ 237{
307 const bool is_global_default_ctx = file_priv == NULL; 238 const bool is_global_default_ctx = file_priv == NULL;
308 struct drm_i915_private *dev_priv = dev->dev_private;
309 struct intel_context *ctx; 239 struct intel_context *ctx;
310 int ret = 0; 240 int ret = 0;
311 241
@@ -331,34 +261,18 @@ i915_gem_create_context(struct drm_device *dev,
331 } 261 }
332 } 262 }
333 263
334 if (create_vm) { 264 if (USES_FULL_PPGTT(dev)) {
335 struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx); 265 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
336 266
337 if (IS_ERR_OR_NULL(ppgtt)) { 267 if (IS_ERR_OR_NULL(ppgtt)) {
338 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 268 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
339 PTR_ERR(ppgtt)); 269 PTR_ERR(ppgtt));
340 ret = PTR_ERR(ppgtt); 270 ret = PTR_ERR(ppgtt);
341 goto err_unpin; 271 goto err_unpin;
342 } else
343 ctx->vm = &ppgtt->base;
344
345 /* This case is reserved for the global default context and
346 * should only happen once. */
347 if (is_global_default_ctx) {
348 if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
349 ret = -EEXIST;
350 goto err_unpin;
351 }
352
353 dev_priv->mm.aliasing_ppgtt = ppgtt;
354 } 272 }
355 } else if (USES_PPGTT(dev)) { 273
356 /* For platforms which only have aliasing PPGTT, we fake the 274 ctx->ppgtt = ppgtt;
357 * address space and refcounting. */ 275 }
358 ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
359 kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
360 } else
361 ctx->vm = &dev_priv->gtt.base;
362 276
363 return ctx; 277 return ctx;
364 278
@@ -375,34 +289,23 @@ void i915_gem_context_reset(struct drm_device *dev)
375 struct drm_i915_private *dev_priv = dev->dev_private; 289 struct drm_i915_private *dev_priv = dev->dev_private;
376 int i; 290 int i;
377 291
378 /* Prevent the hardware from restoring the last context (which hung) on 292 /* In execlists mode we will unreference the context when the execlist
379 * the next switch */ 293 * queue is cleared and the requests destroyed.
294 */
295 if (i915.enable_execlists)
296 return;
297
380 for (i = 0; i < I915_NUM_RINGS; i++) { 298 for (i = 0; i < I915_NUM_RINGS; i++) {
381 struct intel_engine_cs *ring = &dev_priv->ring[i]; 299 struct intel_engine_cs *ring = &dev_priv->ring[i];
382 struct intel_context *dctx = ring->default_context;
383 struct intel_context *lctx = ring->last_context; 300 struct intel_context *lctx = ring->last_context;
384 301
385 /* Do a fake switch to the default context */ 302 if (lctx) {
386 if (lctx == dctx) 303 if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
387 continue; 304 i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
388
389 if (!lctx)
390 continue;
391 305
392 if (dctx->legacy_hw_ctx.rcs_state && i == RCS) { 306 i915_gem_context_unreference(lctx);
393 WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state, 307 ring->last_context = NULL;
394 get_context_alignment(dev), 0));
395 /* Fake a finish/inactive */
396 dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
397 dctx->legacy_hw_ctx.rcs_state->active = 0;
398 } 308 }
399
400 if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
401 i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
402
403 i915_gem_context_unreference(lctx);
404 i915_gem_context_reference(dctx);
405 ring->last_context = dctx;
406 } 309 }
407} 310}
408 311
@@ -417,7 +320,11 @@ int i915_gem_context_init(struct drm_device *dev)
417 if (WARN_ON(dev_priv->ring[RCS].default_context)) 320 if (WARN_ON(dev_priv->ring[RCS].default_context))
418 return 0; 321 return 0;
419 322
420 if (HAS_HW_CONTEXTS(dev)) { 323 if (i915.enable_execlists) {
324 /* NB: intentionally left blank. We will allocate our own
325 * backing objects as we need them, thank you very much */
326 dev_priv->hw_context_size = 0;
327 } else if (HAS_HW_CONTEXTS(dev)) {
421 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 328 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
422 if (dev_priv->hw_context_size > (1<<20)) { 329 if (dev_priv->hw_context_size > (1<<20)) {
423 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", 330 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
@@ -426,18 +333,23 @@ int i915_gem_context_init(struct drm_device *dev)
426 } 333 }
427 } 334 }
428 335
429 ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev)); 336 ctx = i915_gem_create_context(dev, NULL);
430 if (IS_ERR(ctx)) { 337 if (IS_ERR(ctx)) {
431 DRM_ERROR("Failed to create default global context (error %ld)\n", 338 DRM_ERROR("Failed to create default global context (error %ld)\n",
432 PTR_ERR(ctx)); 339 PTR_ERR(ctx));
433 return PTR_ERR(ctx); 340 return PTR_ERR(ctx);
434 } 341 }
435 342
436 /* NB: RCS will hold a ref for all rings */ 343 for (i = 0; i < I915_NUM_RINGS; i++) {
437 for (i = 0; i < I915_NUM_RINGS; i++) 344 struct intel_engine_cs *ring = &dev_priv->ring[i];
438 dev_priv->ring[i].default_context = ctx;
439 345
440 DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake"); 346 /* NB: RCS will hold a ref for all rings */
347 ring->default_context = ctx;
348 }
349
350 DRM_DEBUG_DRIVER("%s context support initialized\n",
351 i915.enable_execlists ? "LR" :
352 dev_priv->hw_context_size ? "HW" : "fake");
441 return 0; 353 return 0;
442} 354}
443 355
@@ -489,19 +401,11 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
489 struct intel_engine_cs *ring; 401 struct intel_engine_cs *ring;
490 int ret, i; 402 int ret, i;
491 403
492 /* This is the only place the aliasing PPGTT gets enabled, which means 404 BUG_ON(!dev_priv->ring[RCS].default_context);
493 * it has to happen before we bail on reset */
494 if (dev_priv->mm.aliasing_ppgtt) {
495 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
496 ppgtt->enable(ppgtt);
497 }
498 405
499 /* FIXME: We should make this work, even in reset */ 406 if (i915.enable_execlists)
500 if (i915_reset_in_progress(&dev_priv->gpu_error))
501 return 0; 407 return 0;
502 408
503 BUG_ON(!dev_priv->ring[RCS].default_context);
504
505 for_each_ring(ring, dev_priv, i) { 409 for_each_ring(ring, dev_priv, i) {
506 ret = i915_switch_context(ring, ring->default_context); 410 ret = i915_switch_context(ring, ring->default_context);
507 if (ret) 411 if (ret)
@@ -527,7 +431,7 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
527 idr_init(&file_priv->context_idr); 431 idr_init(&file_priv->context_idr);
528 432
529 mutex_lock(&dev->struct_mutex); 433 mutex_lock(&dev->struct_mutex);
530 ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev)); 434 ctx = i915_gem_create_context(dev, file_priv);
531 mutex_unlock(&dev->struct_mutex); 435 mutex_unlock(&dev->struct_mutex);
532 436
533 if (IS_ERR(ctx)) { 437 if (IS_ERR(ctx)) {
@@ -563,6 +467,7 @@ mi_set_context(struct intel_engine_cs *ring,
563 struct intel_context *new_context, 467 struct intel_context *new_context,
564 u32 hw_flags) 468 u32 hw_flags)
565{ 469{
470 u32 flags = hw_flags | MI_MM_SPACE_GTT;
566 int ret; 471 int ret;
567 472
568 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB 473 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
@@ -576,6 +481,10 @@ mi_set_context(struct intel_engine_cs *ring,
576 return ret; 481 return ret;
577 } 482 }
578 483
484 /* These flags are for resource streamer on HSW+ */
485 if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
486 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
487
579 ret = intel_ring_begin(ring, 6); 488 ret = intel_ring_begin(ring, 6);
580 if (ret) 489 if (ret)
581 return ret; 490 return ret;
@@ -589,10 +498,7 @@ mi_set_context(struct intel_engine_cs *ring,
589 intel_ring_emit(ring, MI_NOOP); 498 intel_ring_emit(ring, MI_NOOP);
590 intel_ring_emit(ring, MI_SET_CONTEXT); 499 intel_ring_emit(ring, MI_SET_CONTEXT);
591 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) | 500 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
592 MI_MM_SPACE_GTT | 501 flags);
593 MI_SAVE_EXT_STATE_EN |
594 MI_RESTORE_EXT_STATE_EN |
595 hw_flags);
596 /* 502 /*
597 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 503 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
598 * WaMiSetContext_Hang:snb,ivb,vlv 504 * WaMiSetContext_Hang:snb,ivb,vlv
@@ -614,7 +520,6 @@ static int do_switch(struct intel_engine_cs *ring,
614{ 520{
615 struct drm_i915_private *dev_priv = ring->dev->dev_private; 521 struct drm_i915_private *dev_priv = ring->dev->dev_private;
616 struct intel_context *from = ring->last_context; 522 struct intel_context *from = ring->last_context;
617 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
618 u32 hw_flags = 0; 523 u32 hw_flags = 0;
619 bool uninitialized = false; 524 bool uninitialized = false;
620 int ret, i; 525 int ret, i;
@@ -642,8 +547,8 @@ static int do_switch(struct intel_engine_cs *ring,
642 */ 547 */
643 from = ring->last_context; 548 from = ring->last_context;
644 549
645 if (USES_FULL_PPGTT(ring->dev)) { 550 if (to->ppgtt) {
646 ret = ppgtt->switch_mm(ppgtt, ring, false); 551 ret = to->ppgtt->switch_mm(to->ppgtt, ring);
647 if (ret) 552 if (ret)
648 goto unpin_out; 553 goto unpin_out;
649 } 554 }
@@ -723,6 +628,12 @@ done:
723 ring->last_context = to; 628 ring->last_context = to;
724 629
725 if (uninitialized) { 630 if (uninitialized) {
631 if (ring->init_context) {
632 ret = ring->init_context(ring);
633 if (ret)
634 DRM_ERROR("ring init context: %d\n", ret);
635 }
636
726 ret = i915_gem_render_state_init(ring); 637 ret = i915_gem_render_state_init(ring);
727 if (ret) 638 if (ret)
728 DRM_ERROR("init render state: %d\n", ret); 639 DRM_ERROR("init render state: %d\n", ret);
@@ -743,14 +654,19 @@ unpin_out:
743 * 654 *
744 * The context life cycle is simple. The context refcount is incremented and 655 * The context life cycle is simple. The context refcount is incremented and
745 * decremented by 1 and create and destroy. If the context is in use by the GPU, 656 * decremented by 1 and create and destroy. If the context is in use by the GPU,
746 * it will have a refoucnt > 1. This allows us to destroy the context abstract 657 * it will have a refcount > 1. This allows us to destroy the context abstract
747 * object while letting the normal object tracking destroy the backing BO. 658 * object while letting the normal object tracking destroy the backing BO.
659 *
660 * This function should not be used in execlists mode. Instead the context is
661 * switched by writing to the ELSP and requests keep a reference to their
662 * context.
748 */ 663 */
749int i915_switch_context(struct intel_engine_cs *ring, 664int i915_switch_context(struct intel_engine_cs *ring,
750 struct intel_context *to) 665 struct intel_context *to)
751{ 666{
752 struct drm_i915_private *dev_priv = ring->dev->dev_private; 667 struct drm_i915_private *dev_priv = ring->dev->dev_private;
753 668
669 WARN_ON(i915.enable_execlists);
754 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 670 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
755 671
756 if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ 672 if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
@@ -766,9 +682,9 @@ int i915_switch_context(struct intel_engine_cs *ring,
766 return do_switch(ring, to); 682 return do_switch(ring, to);
767} 683}
768 684
769static bool hw_context_enabled(struct drm_device *dev) 685static bool contexts_enabled(struct drm_device *dev)
770{ 686{
771 return to_i915(dev)->hw_context_size; 687 return i915.enable_execlists || to_i915(dev)->hw_context_size;
772} 688}
773 689
774int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 690int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -779,14 +695,14 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
779 struct intel_context *ctx; 695 struct intel_context *ctx;
780 int ret; 696 int ret;
781 697
782 if (!hw_context_enabled(dev)) 698 if (!contexts_enabled(dev))
783 return -ENODEV; 699 return -ENODEV;
784 700
785 ret = i915_mutex_lock_interruptible(dev); 701 ret = i915_mutex_lock_interruptible(dev);
786 if (ret) 702 if (ret)
787 return ret; 703 return ret;
788 704
789 ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev)); 705 ctx = i915_gem_create_context(dev, file_priv);
790 mutex_unlock(&dev->struct_mutex); 706 mutex_unlock(&dev->struct_mutex);
791 if (IS_ERR(ctx)) 707 if (IS_ERR(ctx))
792 return PTR_ERR(ctx); 708 return PTR_ERR(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index bbf4b12d842e..886ff2ee7a28 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -243,7 +243,7 @@ int
243i915_gem_evict_everything(struct drm_device *dev) 243i915_gem_evict_everything(struct drm_device *dev)
244{ 244{
245 struct drm_i915_private *dev_priv = dev->dev_private; 245 struct drm_i915_private *dev_priv = dev->dev_private;
246 struct i915_address_space *vm; 246 struct i915_address_space *vm, *v;
247 bool lists_empty = true; 247 bool lists_empty = true;
248 int ret; 248 int ret;
249 249
@@ -270,7 +270,7 @@ i915_gem_evict_everything(struct drm_device *dev)
270 i915_gem_retire_requests(dev); 270 i915_gem_retire_requests(dev);
271 271
272 /* Having flushed everything, unbind() should never raise an error */ 272 /* Having flushed everything, unbind() should never raise an error */
273 list_for_each_entry(vm, &dev_priv->vm_list, global_link) 273 list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
274 WARN_ON(i915_gem_evict_vm(vm, false)); 274 WARN_ON(i915_gem_evict_vm(vm, false));
275 275
276 return 0; 276 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 60998fc4e5b2..1a0611bb576b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -35,6 +35,7 @@
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31) 36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30) 37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
38#define __EXEC_OBJECT_NEEDS_BIAS (1<<28) 39#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
39 40
40#define BATCH_OFFSET_BIAS (256*1024) 41#define BATCH_OFFSET_BIAS (256*1024)
@@ -94,7 +95,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
94 struct i915_address_space *vm, 95 struct i915_address_space *vm,
95 struct drm_file *file) 96 struct drm_file *file)
96{ 97{
97 struct drm_i915_private *dev_priv = vm->dev->dev_private;
98 struct drm_i915_gem_object *obj; 98 struct drm_i915_gem_object *obj;
99 struct list_head objects; 99 struct list_head objects;
100 int i, ret; 100 int i, ret;
@@ -129,20 +129,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
129 i = 0; 129 i = 0;
130 while (!list_empty(&objects)) { 130 while (!list_empty(&objects)) {
131 struct i915_vma *vma; 131 struct i915_vma *vma;
132 struct i915_address_space *bind_vm = vm;
133
134 if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
135 USES_FULL_PPGTT(vm->dev)) {
136 ret = -EINVAL;
137 goto err;
138 }
139
140 /* If we have secure dispatch, or the userspace assures us that
141 * they know what they're doing, use the GGTT VM.
142 */
143 if (((args->flags & I915_EXEC_SECURE) &&
144 (i == (args->buffer_count - 1))))
145 bind_vm = &dev_priv->gtt.base;
146 132
147 obj = list_first_entry(&objects, 133 obj = list_first_entry(&objects,
148 struct drm_i915_gem_object, 134 struct drm_i915_gem_object,
@@ -156,7 +142,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
156 * from the (obj, vm) we don't run the risk of creating 142 * from the (obj, vm) we don't run the risk of creating
157 * duplicated vmas for the same vm. 143 * duplicated vmas for the same vm.
158 */ 144 */
159 vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm); 145 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
160 if (IS_ERR(vma)) { 146 if (IS_ERR(vma)) {
161 DRM_DEBUG("Failed to lookup VMA\n"); 147 DRM_DEBUG("Failed to lookup VMA\n");
162 ret = PTR_ERR(vma); 148 ret = PTR_ERR(vma);
@@ -307,7 +293,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
307 struct drm_device *dev = obj->base.dev; 293 struct drm_device *dev = obj->base.dev;
308 struct drm_i915_private *dev_priv = dev->dev_private; 294 struct drm_i915_private *dev_priv = dev->dev_private;
309 uint64_t delta = reloc->delta + target_offset; 295 uint64_t delta = reloc->delta + target_offset;
310 uint32_t __iomem *reloc_entry; 296 uint64_t offset;
311 void __iomem *reloc_page; 297 void __iomem *reloc_page;
312 int ret; 298 int ret;
313 299
@@ -320,25 +306,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
320 return ret; 306 return ret;
321 307
322 /* Map the page containing the relocation we're going to perform. */ 308 /* Map the page containing the relocation we're going to perform. */
323 reloc->offset += i915_gem_obj_ggtt_offset(obj); 309 offset = i915_gem_obj_ggtt_offset(obj);
310 offset += reloc->offset;
324 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 311 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
325 reloc->offset & PAGE_MASK); 312 offset & PAGE_MASK);
326 reloc_entry = (uint32_t __iomem *) 313 iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
327 (reloc_page + offset_in_page(reloc->offset));
328 iowrite32(lower_32_bits(delta), reloc_entry);
329 314
330 if (INTEL_INFO(dev)->gen >= 8) { 315 if (INTEL_INFO(dev)->gen >= 8) {
331 reloc_entry += 1; 316 offset += sizeof(uint32_t);
332 317
333 if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) { 318 if (offset_in_page(offset) == 0) {
334 io_mapping_unmap_atomic(reloc_page); 319 io_mapping_unmap_atomic(reloc_page);
335 reloc_page = io_mapping_map_atomic_wc( 320 reloc_page =
336 dev_priv->gtt.mappable, 321 io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
337 reloc->offset + sizeof(uint32_t)); 322 offset);
338 reloc_entry = reloc_page;
339 } 323 }
340 324
341 iowrite32(upper_32_bits(delta), reloc_entry); 325 iowrite32(upper_32_bits(delta),
326 reloc_page + offset_in_page(offset));
342 } 327 }
343 328
344 io_mapping_unmap_atomic(reloc_page); 329 io_mapping_unmap_atomic(reloc_page);
@@ -535,34 +520,18 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
535} 520}
536 521
537static int 522static int
538need_reloc_mappable(struct i915_vma *vma)
539{
540 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
541 return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
542 i915_is_ggtt(vma->vm);
543}
544
545static int
546i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, 523i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
547 struct intel_engine_cs *ring, 524 struct intel_engine_cs *ring,
548 bool *need_reloc) 525 bool *need_reloc)
549{ 526{
550 struct drm_i915_gem_object *obj = vma->obj; 527 struct drm_i915_gem_object *obj = vma->obj;
551 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 528 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
552 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
553 bool need_fence;
554 uint64_t flags; 529 uint64_t flags;
555 int ret; 530 int ret;
556 531
557 flags = 0; 532 flags = 0;
558 533 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
559 need_fence =
560 has_fenced_gpu_access &&
561 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
562 obj->tiling_mode != I915_TILING_NONE;
563 if (need_fence || need_reloc_mappable(vma))
564 flags |= PIN_MAPPABLE; 534 flags |= PIN_MAPPABLE;
565
566 if (entry->flags & EXEC_OBJECT_NEEDS_GTT) 535 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
567 flags |= PIN_GLOBAL; 536 flags |= PIN_GLOBAL;
568 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) 537 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
@@ -574,17 +543,13 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
574 543
575 entry->flags |= __EXEC_OBJECT_HAS_PIN; 544 entry->flags |= __EXEC_OBJECT_HAS_PIN;
576 545
577 if (has_fenced_gpu_access) { 546 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
578 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { 547 ret = i915_gem_object_get_fence(obj);
579 ret = i915_gem_object_get_fence(obj); 548 if (ret)
580 if (ret) 549 return ret;
581 return ret;
582
583 if (i915_gem_object_pin_fence(obj))
584 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
585 550
586 obj->pending_fenced_gpu_access = true; 551 if (i915_gem_object_pin_fence(obj))
587 } 552 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
588 } 553 }
589 554
590 if (entry->offset != vma->node.start) { 555 if (entry->offset != vma->node.start) {
@@ -601,26 +566,40 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
601} 566}
602 567
603static bool 568static bool
604eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access) 569need_reloc_mappable(struct i915_vma *vma)
605{ 570{
606 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 571 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
607 struct drm_i915_gem_object *obj = vma->obj;
608 bool need_fence, need_mappable;
609 572
610 need_fence = 573 if (entry->relocation_count == 0)
611 has_fenced_gpu_access && 574 return false;
612 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 575
613 obj->tiling_mode != I915_TILING_NONE; 576 if (!i915_is_ggtt(vma->vm))
614 need_mappable = need_fence || need_reloc_mappable(vma); 577 return false;
578
579 /* See also use_cpu_reloc() */
580 if (HAS_LLC(vma->obj->base.dev))
581 return false;
615 582
616 WARN_ON((need_mappable || need_fence) && 583 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
584 return false;
585
586 return true;
587}
588
589static bool
590eb_vma_misplaced(struct i915_vma *vma)
591{
592 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
593 struct drm_i915_gem_object *obj = vma->obj;
594
595 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
617 !i915_is_ggtt(vma->vm)); 596 !i915_is_ggtt(vma->vm));
618 597
619 if (entry->alignment && 598 if (entry->alignment &&
620 vma->node.start & (entry->alignment - 1)) 599 vma->node.start & (entry->alignment - 1))
621 return true; 600 return true;
622 601
623 if (need_mappable && !obj->map_and_fenceable) 602 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
624 return true; 603 return true;
625 604
626 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && 605 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
@@ -642,9 +621,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
642 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 621 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
643 int retry; 622 int retry;
644 623
645 if (list_empty(vmas))
646 return 0;
647
648 i915_gem_retire_requests_ring(ring); 624 i915_gem_retire_requests_ring(ring);
649 625
650 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; 626 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -658,20 +634,21 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
658 obj = vma->obj; 634 obj = vma->obj;
659 entry = vma->exec_entry; 635 entry = vma->exec_entry;
660 636
637 if (!has_fenced_gpu_access)
638 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
661 need_fence = 639 need_fence =
662 has_fenced_gpu_access &&
663 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 640 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
664 obj->tiling_mode != I915_TILING_NONE; 641 obj->tiling_mode != I915_TILING_NONE;
665 need_mappable = need_fence || need_reloc_mappable(vma); 642 need_mappable = need_fence || need_reloc_mappable(vma);
666 643
667 if (need_mappable) 644 if (need_mappable) {
645 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
668 list_move(&vma->exec_list, &ordered_vmas); 646 list_move(&vma->exec_list, &ordered_vmas);
669 else 647 } else
670 list_move_tail(&vma->exec_list, &ordered_vmas); 648 list_move_tail(&vma->exec_list, &ordered_vmas);
671 649
672 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; 650 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
673 obj->base.pending_write_domain = 0; 651 obj->base.pending_write_domain = 0;
674 obj->pending_fenced_gpu_access = false;
675 } 652 }
676 list_splice(&ordered_vmas, vmas); 653 list_splice(&ordered_vmas, vmas);
677 654
@@ -696,7 +673,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
696 if (!drm_mm_node_allocated(&vma->node)) 673 if (!drm_mm_node_allocated(&vma->node))
697 continue; 674 continue;
698 675
699 if (eb_vma_misplaced(vma, has_fenced_gpu_access)) 676 if (eb_vma_misplaced(vma))
700 ret = i915_vma_unbind(vma); 677 ret = i915_vma_unbind(vma);
701 else 678 else
702 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); 679 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
@@ -744,9 +721,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
744 int i, total, ret; 721 int i, total, ret;
745 unsigned count = args->buffer_count; 722 unsigned count = args->buffer_count;
746 723
747 if (WARN_ON(list_empty(&eb->vmas)))
748 return 0;
749
750 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm; 724 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
751 725
752 /* We may process another execbuffer during the unlock... */ 726 /* We may process another execbuffer during the unlock... */
@@ -890,18 +864,24 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
890} 864}
891 865
892static int 866static int
893validate_exec_list(struct drm_i915_gem_exec_object2 *exec, 867validate_exec_list(struct drm_device *dev,
868 struct drm_i915_gem_exec_object2 *exec,
894 int count) 869 int count)
895{ 870{
896 int i;
897 unsigned relocs_total = 0; 871 unsigned relocs_total = 0;
898 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry); 872 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
873 unsigned invalid_flags;
874 int i;
875
876 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
877 if (USES_FULL_PPGTT(dev))
878 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
899 879
900 for (i = 0; i < count; i++) { 880 for (i = 0; i < count; i++) {
901 char __user *ptr = to_user_ptr(exec[i].relocs_ptr); 881 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
902 int length; /* limited by fault_in_pages_readable() */ 882 int length; /* limited by fault_in_pages_readable() */
903 883
904 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) 884 if (exec[i].flags & invalid_flags)
905 return -EINVAL; 885 return -EINVAL;
906 886
907 /* First check for malicious input causing overflow in 887 /* First check for malicious input causing overflow in
@@ -951,16 +931,26 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
951 return ERR_PTR(-EIO); 931 return ERR_PTR(-EIO);
952 } 932 }
953 933
934 if (i915.enable_execlists && !ctx->engine[ring->id].state) {
935 int ret = intel_lr_context_deferred_create(ctx, ring);
936 if (ret) {
937 DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
938 return ERR_PTR(ret);
939 }
940 }
941
954 return ctx; 942 return ctx;
955} 943}
956 944
957static void 945void
958i915_gem_execbuffer_move_to_active(struct list_head *vmas, 946i915_gem_execbuffer_move_to_active(struct list_head *vmas,
959 struct intel_engine_cs *ring) 947 struct intel_engine_cs *ring)
960{ 948{
949 u32 seqno = intel_ring_get_seqno(ring);
961 struct i915_vma *vma; 950 struct i915_vma *vma;
962 951
963 list_for_each_entry(vma, vmas, exec_list) { 952 list_for_each_entry(vma, vmas, exec_list) {
953 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
964 struct drm_i915_gem_object *obj = vma->obj; 954 struct drm_i915_gem_object *obj = vma->obj;
965 u32 old_read = obj->base.read_domains; 955 u32 old_read = obj->base.read_domains;
966 u32 old_write = obj->base.write_domain; 956 u32 old_write = obj->base.write_domain;
@@ -969,24 +959,31 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
969 if (obj->base.write_domain == 0) 959 if (obj->base.write_domain == 0)
970 obj->base.pending_read_domains |= obj->base.read_domains; 960 obj->base.pending_read_domains |= obj->base.read_domains;
971 obj->base.read_domains = obj->base.pending_read_domains; 961 obj->base.read_domains = obj->base.pending_read_domains;
972 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
973 962
974 i915_vma_move_to_active(vma, ring); 963 i915_vma_move_to_active(vma, ring);
975 if (obj->base.write_domain) { 964 if (obj->base.write_domain) {
976 obj->dirty = 1; 965 obj->dirty = 1;
977 obj->last_write_seqno = intel_ring_get_seqno(ring); 966 obj->last_write_seqno = seqno;
978 967
979 intel_fb_obj_invalidate(obj, ring); 968 intel_fb_obj_invalidate(obj, ring);
980 969
981 /* update for the implicit flush after a batch */ 970 /* update for the implicit flush after a batch */
982 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; 971 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
983 } 972 }
973 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
974 obj->last_fenced_seqno = seqno;
975 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
976 struct drm_i915_private *dev_priv = to_i915(ring->dev);
977 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
978 &dev_priv->mm.fence_list);
979 }
980 }
984 981
985 trace_i915_gem_object_change_domain(obj, old_read, old_write); 982 trace_i915_gem_object_change_domain(obj, old_read, old_write);
986 } 983 }
987} 984}
988 985
989static void 986void
990i915_gem_execbuffer_retire_commands(struct drm_device *dev, 987i915_gem_execbuffer_retire_commands(struct drm_device *dev,
991 struct drm_file *file, 988 struct drm_file *file,
992 struct intel_engine_cs *ring, 989 struct intel_engine_cs *ring,
@@ -1026,14 +1023,14 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
1026 return 0; 1023 return 0;
1027} 1024}
1028 1025
1029static int 1026int
1030legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, 1027i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1031 struct intel_engine_cs *ring, 1028 struct intel_engine_cs *ring,
1032 struct intel_context *ctx, 1029 struct intel_context *ctx,
1033 struct drm_i915_gem_execbuffer2 *args, 1030 struct drm_i915_gem_execbuffer2 *args,
1034 struct list_head *vmas, 1031 struct list_head *vmas,
1035 struct drm_i915_gem_object *batch_obj, 1032 struct drm_i915_gem_object *batch_obj,
1036 u64 exec_start, u32 flags) 1033 u64 exec_start, u32 flags)
1037{ 1034{
1038 struct drm_clip_rect *cliprects = NULL; 1035 struct drm_clip_rect *cliprects = NULL;
1039 struct drm_i915_private *dev_priv = dev->dev_private; 1036 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1254,7 +1251,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1254 if (!i915_gem_check_execbuffer(args)) 1251 if (!i915_gem_check_execbuffer(args))
1255 return -EINVAL; 1252 return -EINVAL;
1256 1253
1257 ret = validate_exec_list(exec, args->buffer_count); 1254 ret = validate_exec_list(dev, exec, args->buffer_count);
1258 if (ret) 1255 if (ret)
1259 return ret; 1256 return ret;
1260 1257
@@ -1318,8 +1315,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1318 1315
1319 i915_gem_context_reference(ctx); 1316 i915_gem_context_reference(ctx);
1320 1317
1321 vm = ctx->vm; 1318 if (ctx->ppgtt)
1322 if (!USES_FULL_PPGTT(dev)) 1319 vm = &ctx->ppgtt->base;
1320 else
1323 vm = &dev_priv->gtt.base; 1321 vm = &dev_priv->gtt.base;
1324 1322
1325 eb = eb_create(args); 1323 eb = eb_create(args);
@@ -1386,25 +1384,36 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1386 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 1384 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1387 * batch" bit. Hence we need to pin secure batches into the global gtt. 1385 * batch" bit. Hence we need to pin secure batches into the global gtt.
1388 * hsw should have this fixed, but bdw mucks it up again. */ 1386 * hsw should have this fixed, but bdw mucks it up again. */
1389 if (flags & I915_DISPATCH_SECURE && 1387 if (flags & I915_DISPATCH_SECURE) {
1390 !batch_obj->has_global_gtt_mapping) { 1388 /*
1391 /* When we have multiple VMs, we'll need to make sure that we 1389 * So on first glance it looks freaky that we pin the batch here
1392 * allocate space first */ 1390 * outside of the reservation loop. But:
1393 struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj); 1391 * - The batch is already pinned into the relevant ppgtt, so we
1394 BUG_ON(!vma); 1392 * already have the backing storage fully allocated.
1395 vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND); 1393 * - No other BO uses the global gtt (well contexts, but meh),
1396 } 1394 * so we don't really have issues with mutliple objects not
1395 * fitting due to fragmentation.
1396 * So this is actually safe.
1397 */
1398 ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
1399 if (ret)
1400 goto err;
1397 1401
1398 if (flags & I915_DISPATCH_SECURE)
1399 exec_start += i915_gem_obj_ggtt_offset(batch_obj); 1402 exec_start += i915_gem_obj_ggtt_offset(batch_obj);
1400 else 1403 } else
1401 exec_start += i915_gem_obj_offset(batch_obj, vm); 1404 exec_start += i915_gem_obj_offset(batch_obj, vm);
1402 1405
1403 ret = legacy_ringbuffer_submission(dev, file, ring, ctx, 1406 ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
1404 args, &eb->vmas, batch_obj, exec_start, flags); 1407 &eb->vmas, batch_obj, exec_start, flags);
1405 if (ret)
1406 goto err;
1407 1408
1409 /*
1410 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1411 * batch vma for correctness. For less ugly and less fragility this
1412 * needs to be adjusted to also track the ggtt batch vma properly as
1413 * active.
1414 */
1415 if (flags & I915_DISPATCH_SECURE)
1416 i915_gem_object_ggtt_unpin(batch_obj);
1408err: 1417err:
1409 /* the request owns the ref now */ 1418 /* the request owns the ref now */
1410 i915_gem_context_unreference(ctx); 1419 i915_gem_context_unreference(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e42925f76b4b..b672b843fd5e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -33,17 +33,6 @@
33static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv); 33static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
34static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); 34static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
35 35
36bool intel_enable_ppgtt(struct drm_device *dev, bool full)
37{
38 if (i915.enable_ppgtt == 0)
39 return false;
40
41 if (i915.enable_ppgtt == 1 && full)
42 return false;
43
44 return true;
45}
46
47static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) 36static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
48{ 37{
49 if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) 38 if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
@@ -78,7 +67,6 @@ static void ppgtt_bind_vma(struct i915_vma *vma,
78 enum i915_cache_level cache_level, 67 enum i915_cache_level cache_level,
79 u32 flags); 68 u32 flags);
80static void ppgtt_unbind_vma(struct i915_vma *vma); 69static void ppgtt_unbind_vma(struct i915_vma *vma);
81static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
82 70
83static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, 71static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
84 enum i915_cache_level level, 72 enum i915_cache_level level,
@@ -216,19 +204,12 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
216 204
217/* Broadwell Page Directory Pointer Descriptors */ 205/* Broadwell Page Directory Pointer Descriptors */
218static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, 206static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
219 uint64_t val, bool synchronous) 207 uint64_t val)
220{ 208{
221 struct drm_i915_private *dev_priv = ring->dev->dev_private;
222 int ret; 209 int ret;
223 210
224 BUG_ON(entry >= 4); 211 BUG_ON(entry >= 4);
225 212
226 if (synchronous) {
227 I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
228 I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
229 return 0;
230 }
231
232 ret = intel_ring_begin(ring, 6); 213 ret = intel_ring_begin(ring, 6);
233 if (ret) 214 if (ret)
234 return ret; 215 return ret;
@@ -245,8 +226,7 @@ static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
245} 226}
246 227
247static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, 228static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
248 struct intel_engine_cs *ring, 229 struct intel_engine_cs *ring)
249 bool synchronous)
250{ 230{
251 int i, ret; 231 int i, ret;
252 232
@@ -255,7 +235,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
255 235
256 for (i = used_pd - 1; i >= 0; i--) { 236 for (i = used_pd - 1; i >= 0; i--) {
257 dma_addr_t addr = ppgtt->pd_dma_addr[i]; 237 dma_addr_t addr = ppgtt->pd_dma_addr[i];
258 ret = gen8_write_pdp(ring, i, addr, synchronous); 238 ret = gen8_write_pdp(ring, i, addr);
259 if (ret) 239 if (ret)
260 return ret; 240 return ret;
261 } 241 }
@@ -403,9 +383,6 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
403 struct i915_hw_ppgtt *ppgtt = 383 struct i915_hw_ppgtt *ppgtt =
404 container_of(vm, struct i915_hw_ppgtt, base); 384 container_of(vm, struct i915_hw_ppgtt, base);
405 385
406 list_del(&vm->global_link);
407 drm_mm_takedown(&vm->mm);
408
409 gen8_ppgtt_unmap_pages(ppgtt); 386 gen8_ppgtt_unmap_pages(ppgtt);
410 gen8_ppgtt_free(ppgtt); 387 gen8_ppgtt_free(ppgtt);
411} 388}
@@ -615,7 +592,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
615 kunmap_atomic(pd_vaddr); 592 kunmap_atomic(pd_vaddr);
616 } 593 }
617 594
618 ppgtt->enable = gen8_ppgtt_enable;
619 ppgtt->switch_mm = gen8_mm_switch; 595 ppgtt->switch_mm = gen8_mm_switch;
620 ppgtt->base.clear_range = gen8_ppgtt_clear_range; 596 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
621 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; 597 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
@@ -724,29 +700,10 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
724} 700}
725 701
726static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, 702static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
727 struct intel_engine_cs *ring, 703 struct intel_engine_cs *ring)
728 bool synchronous)
729{ 704{
730 struct drm_device *dev = ppgtt->base.dev;
731 struct drm_i915_private *dev_priv = dev->dev_private;
732 int ret; 705 int ret;
733 706
734 /* If we're in reset, we can assume the GPU is sufficiently idle to
735 * manually frob these bits. Ideally we could use the ring functions,
736 * except our error handling makes it quite difficult (can't use
737 * intel_ring_begin, ring->flush, or intel_ring_advance)
738 *
739 * FIXME: We should try not to special case reset
740 */
741 if (synchronous ||
742 i915_reset_in_progress(&dev_priv->gpu_error)) {
743 WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
744 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
745 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
746 POSTING_READ(RING_PP_DIR_BASE(ring));
747 return 0;
748 }
749
750 /* NB: TLBs must be flushed and invalidated before a switch */ 707 /* NB: TLBs must be flushed and invalidated before a switch */
751 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 708 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
752 if (ret) 709 if (ret)
@@ -768,29 +725,10 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
768} 725}
769 726
770static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, 727static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
771 struct intel_engine_cs *ring, 728 struct intel_engine_cs *ring)
772 bool synchronous)
773{ 729{
774 struct drm_device *dev = ppgtt->base.dev;
775 struct drm_i915_private *dev_priv = dev->dev_private;
776 int ret; 730 int ret;
777 731
778 /* If we're in reset, we can assume the GPU is sufficiently idle to
779 * manually frob these bits. Ideally we could use the ring functions,
780 * except our error handling makes it quite difficult (can't use
781 * intel_ring_begin, ring->flush, or intel_ring_advance)
782 *
783 * FIXME: We should try not to special case reset
784 */
785 if (synchronous ||
786 i915_reset_in_progress(&dev_priv->gpu_error)) {
787 WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
788 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
789 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
790 POSTING_READ(RING_PP_DIR_BASE(ring));
791 return 0;
792 }
793
794 /* NB: TLBs must be flushed and invalidated before a switch */ 732 /* NB: TLBs must be flushed and invalidated before a switch */
795 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 733 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
796 if (ret) 734 if (ret)
@@ -819,14 +757,11 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
819} 757}
820 758
821static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, 759static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
822 struct intel_engine_cs *ring, 760 struct intel_engine_cs *ring)
823 bool synchronous)
824{ 761{
825 struct drm_device *dev = ppgtt->base.dev; 762 struct drm_device *dev = ppgtt->base.dev;
826 struct drm_i915_private *dev_priv = dev->dev_private; 763 struct drm_i915_private *dev_priv = dev->dev_private;
827 764
828 if (!synchronous)
829 return 0;
830 765
831 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); 766 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
832 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); 767 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
@@ -836,39 +771,20 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
836 return 0; 771 return 0;
837} 772}
838 773
839static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) 774static void gen8_ppgtt_enable(struct drm_device *dev)
840{ 775{
841 struct drm_device *dev = ppgtt->base.dev;
842 struct drm_i915_private *dev_priv = dev->dev_private; 776 struct drm_i915_private *dev_priv = dev->dev_private;
843 struct intel_engine_cs *ring; 777 struct intel_engine_cs *ring;
844 int j, ret; 778 int j;
845 779
846 for_each_ring(ring, dev_priv, j) { 780 for_each_ring(ring, dev_priv, j) {
847 I915_WRITE(RING_MODE_GEN7(ring), 781 I915_WRITE(RING_MODE_GEN7(ring),
848 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 782 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
849
850 /* We promise to do a switch later with FULL PPGTT. If this is
851 * aliasing, this is the one and only switch we'll do */
852 if (USES_FULL_PPGTT(dev))
853 continue;
854
855 ret = ppgtt->switch_mm(ppgtt, ring, true);
856 if (ret)
857 goto err_out;
858 } 783 }
859
860 return 0;
861
862err_out:
863 for_each_ring(ring, dev_priv, j)
864 I915_WRITE(RING_MODE_GEN7(ring),
865 _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
866 return ret;
867} 784}
868 785
869static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) 786static void gen7_ppgtt_enable(struct drm_device *dev)
870{ 787{
871 struct drm_device *dev = ppgtt->base.dev;
872 struct drm_i915_private *dev_priv = dev->dev_private; 788 struct drm_i915_private *dev_priv = dev->dev_private;
873 struct intel_engine_cs *ring; 789 struct intel_engine_cs *ring;
874 uint32_t ecochk, ecobits; 790 uint32_t ecochk, ecobits;
@@ -887,31 +803,16 @@ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
887 I915_WRITE(GAM_ECOCHK, ecochk); 803 I915_WRITE(GAM_ECOCHK, ecochk);
888 804
889 for_each_ring(ring, dev_priv, i) { 805 for_each_ring(ring, dev_priv, i) {
890 int ret;
891 /* GFX_MODE is per-ring on gen7+ */ 806 /* GFX_MODE is per-ring on gen7+ */
892 I915_WRITE(RING_MODE_GEN7(ring), 807 I915_WRITE(RING_MODE_GEN7(ring),
893 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 808 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
894
895 /* We promise to do a switch later with FULL PPGTT. If this is
896 * aliasing, this is the one and only switch we'll do */
897 if (USES_FULL_PPGTT(dev))
898 continue;
899
900 ret = ppgtt->switch_mm(ppgtt, ring, true);
901 if (ret)
902 return ret;
903 } 809 }
904
905 return 0;
906} 810}
907 811
908static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) 812static void gen6_ppgtt_enable(struct drm_device *dev)
909{ 813{
910 struct drm_device *dev = ppgtt->base.dev;
911 struct drm_i915_private *dev_priv = dev->dev_private; 814 struct drm_i915_private *dev_priv = dev->dev_private;
912 struct intel_engine_cs *ring;
913 uint32_t ecochk, gab_ctl, ecobits; 815 uint32_t ecochk, gab_ctl, ecobits;
914 int i;
915 816
916 ecobits = I915_READ(GAC_ECO_BITS); 817 ecobits = I915_READ(GAC_ECO_BITS);
917 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | 818 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
@@ -924,14 +825,6 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
924 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); 825 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
925 826
926 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 827 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
927
928 for_each_ring(ring, dev_priv, i) {
929 int ret = ppgtt->switch_mm(ppgtt, ring, true);
930 if (ret)
931 return ret;
932 }
933
934 return 0;
935} 828}
936 829
937/* PPGTT support for Sandybdrige/Gen6 and later */ 830/* PPGTT support for Sandybdrige/Gen6 and later */
@@ -1029,8 +922,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1029 struct i915_hw_ppgtt *ppgtt = 922 struct i915_hw_ppgtt *ppgtt =
1030 container_of(vm, struct i915_hw_ppgtt, base); 923 container_of(vm, struct i915_hw_ppgtt, base);
1031 924
1032 list_del(&vm->global_link);
1033 drm_mm_takedown(&ppgtt->base.mm);
1034 drm_mm_remove_node(&ppgtt->node); 925 drm_mm_remove_node(&ppgtt->node);
1035 926
1036 gen6_ppgtt_unmap_pages(ppgtt); 927 gen6_ppgtt_unmap_pages(ppgtt);
@@ -1151,13 +1042,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1151 1042
1152 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; 1043 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
1153 if (IS_GEN6(dev)) { 1044 if (IS_GEN6(dev)) {
1154 ppgtt->enable = gen6_ppgtt_enable;
1155 ppgtt->switch_mm = gen6_mm_switch; 1045 ppgtt->switch_mm = gen6_mm_switch;
1156 } else if (IS_HASWELL(dev)) { 1046 } else if (IS_HASWELL(dev)) {
1157 ppgtt->enable = gen7_ppgtt_enable;
1158 ppgtt->switch_mm = hsw_mm_switch; 1047 ppgtt->switch_mm = hsw_mm_switch;
1159 } else if (IS_GEN7(dev)) { 1048 } else if (IS_GEN7(dev)) {
1160 ppgtt->enable = gen7_ppgtt_enable;
1161 ppgtt->switch_mm = gen7_mm_switch; 1049 ppgtt->switch_mm = gen7_mm_switch;
1162 } else 1050 } else
1163 BUG(); 1051 BUG();
@@ -1188,39 +1076,114 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1188 ppgtt->node.size >> 20, 1076 ppgtt->node.size >> 20,
1189 ppgtt->node.start / PAGE_SIZE); 1077 ppgtt->node.start / PAGE_SIZE);
1190 1078
1079 gen6_write_pdes(ppgtt);
1080 DRM_DEBUG("Adding PPGTT at offset %x\n",
1081 ppgtt->pd_offset << 10);
1082
1191 return 0; 1083 return 0;
1192} 1084}
1193 1085
1194int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 1086static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1195{ 1087{
1196 struct drm_i915_private *dev_priv = dev->dev_private; 1088 struct drm_i915_private *dev_priv = dev->dev_private;
1197 int ret = 0;
1198 1089
1199 ppgtt->base.dev = dev; 1090 ppgtt->base.dev = dev;
1200 ppgtt->base.scratch = dev_priv->gtt.base.scratch; 1091 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
1201 1092
1202 if (INTEL_INFO(dev)->gen < 8) 1093 if (INTEL_INFO(dev)->gen < 8)
1203 ret = gen6_ppgtt_init(ppgtt); 1094 return gen6_ppgtt_init(ppgtt);
1204 else if (IS_GEN8(dev)) 1095 else if (IS_GEN8(dev))
1205 ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); 1096 return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
1206 else 1097 else
1207 BUG(); 1098 BUG();
1099}
1100int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1101{
1102 struct drm_i915_private *dev_priv = dev->dev_private;
1103 int ret = 0;
1208 1104
1209 if (!ret) { 1105 ret = __hw_ppgtt_init(dev, ppgtt);
1210 struct drm_i915_private *dev_priv = dev->dev_private; 1106 if (ret == 0) {
1211 kref_init(&ppgtt->ref); 1107 kref_init(&ppgtt->ref);
1212 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, 1108 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
1213 ppgtt->base.total); 1109 ppgtt->base.total);
1214 i915_init_vm(dev_priv, &ppgtt->base); 1110 i915_init_vm(dev_priv, &ppgtt->base);
1215 if (INTEL_INFO(dev)->gen < 8) { 1111 }
1216 gen6_write_pdes(ppgtt); 1112
1217 DRM_DEBUG("Adding PPGTT at offset %x\n", 1113 return ret;
1218 ppgtt->pd_offset << 10); 1114}
1115
1116int i915_ppgtt_init_hw(struct drm_device *dev)
1117{
1118 struct drm_i915_private *dev_priv = dev->dev_private;
1119 struct intel_engine_cs *ring;
1120 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1121 int i, ret = 0;
1122
1123 /* In the case of execlists, PPGTT is enabled by the context descriptor
1124 * and the PDPs are contained within the context itself. We don't
1125 * need to do anything here. */
1126 if (i915.enable_execlists)
1127 return 0;
1128
1129 if (!USES_PPGTT(dev))
1130 return 0;
1131
1132 if (IS_GEN6(dev))
1133 gen6_ppgtt_enable(dev);
1134 else if (IS_GEN7(dev))
1135 gen7_ppgtt_enable(dev);
1136 else if (INTEL_INFO(dev)->gen >= 8)
1137 gen8_ppgtt_enable(dev);
1138 else
1139 WARN_ON(1);
1140
1141 if (ppgtt) {
1142 for_each_ring(ring, dev_priv, i) {
1143 ret = ppgtt->switch_mm(ppgtt, ring);
1144 if (ret != 0)
1145 return ret;
1219 } 1146 }
1220 } 1147 }
1221 1148
1222 return ret; 1149 return ret;
1223} 1150}
1151struct i915_hw_ppgtt *
1152i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
1153{
1154 struct i915_hw_ppgtt *ppgtt;
1155 int ret;
1156
1157 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1158 if (!ppgtt)
1159 return ERR_PTR(-ENOMEM);
1160
1161 ret = i915_ppgtt_init(dev, ppgtt);
1162 if (ret) {
1163 kfree(ppgtt);
1164 return ERR_PTR(ret);
1165 }
1166
1167 ppgtt->file_priv = fpriv;
1168
1169 return ppgtt;
1170}
1171
1172void i915_ppgtt_release(struct kref *kref)
1173{
1174 struct i915_hw_ppgtt *ppgtt =
1175 container_of(kref, struct i915_hw_ppgtt, ref);
1176
1177 /* vmas should already be unbound */
1178 WARN_ON(!list_empty(&ppgtt->base.active_list));
1179 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
1180
1181 list_del(&ppgtt->base.global_link);
1182 drm_mm_takedown(&ppgtt->base.mm);
1183
1184 ppgtt->base.cleanup(&ppgtt->base);
1185 kfree(ppgtt);
1186}
1224 1187
1225static void 1188static void
1226ppgtt_bind_vma(struct i915_vma *vma, 1189ppgtt_bind_vma(struct i915_vma *vma,
@@ -1687,10 +1650,10 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
1687 } 1650 }
1688} 1651}
1689 1652
1690void i915_gem_setup_global_gtt(struct drm_device *dev, 1653int i915_gem_setup_global_gtt(struct drm_device *dev,
1691 unsigned long start, 1654 unsigned long start,
1692 unsigned long mappable_end, 1655 unsigned long mappable_end,
1693 unsigned long end) 1656 unsigned long end)
1694{ 1657{
1695 /* Let GEM Manage all of the aperture. 1658 /* Let GEM Manage all of the aperture.
1696 * 1659 *
@@ -1706,6 +1669,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
1706 struct drm_mm_node *entry; 1669 struct drm_mm_node *entry;
1707 struct drm_i915_gem_object *obj; 1670 struct drm_i915_gem_object *obj;
1708 unsigned long hole_start, hole_end; 1671 unsigned long hole_start, hole_end;
1672 int ret;
1709 1673
1710 BUG_ON(mappable_end > end); 1674 BUG_ON(mappable_end > end);
1711 1675
@@ -1717,14 +1681,16 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
1717 /* Mark any preallocated objects as occupied */ 1681 /* Mark any preallocated objects as occupied */
1718 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 1682 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1719 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); 1683 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
1720 int ret; 1684
1721 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", 1685 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
1722 i915_gem_obj_ggtt_offset(obj), obj->base.size); 1686 i915_gem_obj_ggtt_offset(obj), obj->base.size);
1723 1687
1724 WARN_ON(i915_gem_obj_ggtt_bound(obj)); 1688 WARN_ON(i915_gem_obj_ggtt_bound(obj));
1725 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); 1689 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
1726 if (ret) 1690 if (ret) {
1727 DRM_DEBUG_KMS("Reservation failed\n"); 1691 DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
1692 return ret;
1693 }
1728 obj->has_global_gtt_mapping = 1; 1694 obj->has_global_gtt_mapping = 1;
1729 } 1695 }
1730 1696
@@ -1741,6 +1707,22 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
1741 1707
1742 /* And finally clear the reserved guard page */ 1708 /* And finally clear the reserved guard page */
1743 ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); 1709 ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
1710
1711 if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
1712 struct i915_hw_ppgtt *ppgtt;
1713
1714 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1715 if (!ppgtt)
1716 return -ENOMEM;
1717
1718 ret = __hw_ppgtt_init(dev, ppgtt);
1719 if (ret != 0)
1720 return ret;
1721
1722 dev_priv->mm.aliasing_ppgtt = ppgtt;
1723 }
1724
1725 return 0;
1744} 1726}
1745 1727
1746void i915_gem_init_global_gtt(struct drm_device *dev) 1728void i915_gem_init_global_gtt(struct drm_device *dev)
@@ -1754,6 +1736,25 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
1754 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 1736 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
1755} 1737}
1756 1738
1739void i915_global_gtt_cleanup(struct drm_device *dev)
1740{
1741 struct drm_i915_private *dev_priv = dev->dev_private;
1742 struct i915_address_space *vm = &dev_priv->gtt.base;
1743
1744 if (dev_priv->mm.aliasing_ppgtt) {
1745 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1746
1747 ppgtt->base.cleanup(&ppgtt->base);
1748 }
1749
1750 if (drm_mm_initialized(&vm->mm)) {
1751 drm_mm_takedown(&vm->mm);
1752 list_del(&vm->global_link);
1753 }
1754
1755 vm->cleanup(vm);
1756}
1757
1757static int setup_scratch_page(struct drm_device *dev) 1758static int setup_scratch_page(struct drm_device *dev)
1758{ 1759{
1759 struct drm_i915_private *dev_priv = dev->dev_private; 1760 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2022,10 +2023,6 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
2022 2023
2023 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); 2024 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
2024 2025
2025 if (drm_mm_initialized(&vm->mm)) {
2026 drm_mm_takedown(&vm->mm);
2027 list_del(&vm->global_link);
2028 }
2029 iounmap(gtt->gsm); 2026 iounmap(gtt->gsm);
2030 teardown_scratch_page(vm->dev); 2027 teardown_scratch_page(vm->dev);
2031} 2028}
@@ -2058,10 +2055,6 @@ static int i915_gmch_probe(struct drm_device *dev,
2058 2055
2059static void i915_gmch_remove(struct i915_address_space *vm) 2056static void i915_gmch_remove(struct i915_address_space *vm)
2060{ 2057{
2061 if (drm_mm_initialized(&vm->mm)) {
2062 drm_mm_takedown(&vm->mm);
2063 list_del(&vm->global_link);
2064 }
2065 intel_gmch_remove(); 2058 intel_gmch_remove();
2066} 2059}
2067 2060
@@ -2160,8 +2153,10 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
2160 /* Keep GGTT vmas first to make debug easier */ 2153 /* Keep GGTT vmas first to make debug easier */
2161 if (i915_is_ggtt(vm)) 2154 if (i915_is_ggtt(vm))
2162 list_add(&vma->vma_link, &obj->vma_list); 2155 list_add(&vma->vma_link, &obj->vma_list);
2163 else 2156 else {
2164 list_add_tail(&vma->vma_link, &obj->vma_list); 2157 list_add_tail(&vma->vma_link, &obj->vma_list);
2158 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
2159 }
2165 2160
2166 return vma; 2161 return vma;
2167} 2162}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 8d6f7c18c404..d5c14af51e99 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -34,6 +34,8 @@
34#ifndef __I915_GEM_GTT_H__ 34#ifndef __I915_GEM_GTT_H__
35#define __I915_GEM_GTT_H__ 35#define __I915_GEM_GTT_H__
36 36
37struct drm_i915_file_private;
38
37typedef uint32_t gen6_gtt_pte_t; 39typedef uint32_t gen6_gtt_pte_t;
38typedef uint64_t gen8_gtt_pte_t; 40typedef uint64_t gen8_gtt_pte_t;
39typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; 41typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
@@ -258,22 +260,36 @@ struct i915_hw_ppgtt {
258 dma_addr_t *gen8_pt_dma_addr[4]; 260 dma_addr_t *gen8_pt_dma_addr[4];
259 }; 261 };
260 262
261 struct intel_context *ctx; 263 struct drm_i915_file_private *file_priv;
262 264
263 int (*enable)(struct i915_hw_ppgtt *ppgtt); 265 int (*enable)(struct i915_hw_ppgtt *ppgtt);
264 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, 266 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
265 struct intel_engine_cs *ring, 267 struct intel_engine_cs *ring);
266 bool synchronous);
267 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); 268 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
268}; 269};
269 270
270int i915_gem_gtt_init(struct drm_device *dev); 271int i915_gem_gtt_init(struct drm_device *dev);
271void i915_gem_init_global_gtt(struct drm_device *dev); 272void i915_gem_init_global_gtt(struct drm_device *dev);
272void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, 273int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
273 unsigned long mappable_end, unsigned long end); 274 unsigned long mappable_end, unsigned long end);
274 275void i915_global_gtt_cleanup(struct drm_device *dev);
275bool intel_enable_ppgtt(struct drm_device *dev, bool full); 276
276int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); 277
278int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
279int i915_ppgtt_init_hw(struct drm_device *dev);
280void i915_ppgtt_release(struct kref *kref);
281struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
282 struct drm_i915_file_private *fpriv);
283static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
284{
285 if (ppgtt)
286 kref_get(&ppgtt->ref);
287}
288static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
289{
290 if (ppgtt)
291 kref_put(&ppgtt->ref, i915_ppgtt_release);
292}
277 293
278void i915_check_and_clear_faults(struct drm_device *dev); 294void i915_check_and_clear_faults(struct drm_device *dev);
279void i915_gem_suspend_gtt_mappings(struct drm_device *dev); 295void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index e60be3f552a6..a9a62d75aa57 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,13 +28,6 @@
28#include "i915_drv.h" 28#include "i915_drv.h"
29#include "intel_renderstate.h" 29#include "intel_renderstate.h"
30 30
31struct render_state {
32 const struct intel_renderstate_rodata *rodata;
33 struct drm_i915_gem_object *obj;
34 u64 ggtt_offset;
35 int gen;
36};
37
38static const struct intel_renderstate_rodata * 31static const struct intel_renderstate_rodata *
39render_state_get_rodata(struct drm_device *dev, const int gen) 32render_state_get_rodata(struct drm_device *dev, const int gen)
40{ 33{
@@ -127,30 +120,47 @@ static int render_state_setup(struct render_state *so)
127 return 0; 120 return 0;
128} 121}
129 122
130static void render_state_fini(struct render_state *so) 123void i915_gem_render_state_fini(struct render_state *so)
131{ 124{
132 i915_gem_object_ggtt_unpin(so->obj); 125 i915_gem_object_ggtt_unpin(so->obj);
133 drm_gem_object_unreference(&so->obj->base); 126 drm_gem_object_unreference(&so->obj->base);
134} 127}
135 128
136int i915_gem_render_state_init(struct intel_engine_cs *ring) 129int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
130 struct render_state *so)
137{ 131{
138 struct render_state so;
139 int ret; 132 int ret;
140 133
141 if (WARN_ON(ring->id != RCS)) 134 if (WARN_ON(ring->id != RCS))
142 return -ENOENT; 135 return -ENOENT;
143 136
144 ret = render_state_init(&so, ring->dev); 137 ret = render_state_init(so, ring->dev);
145 if (ret) 138 if (ret)
146 return ret; 139 return ret;
147 140
148 if (so.rodata == NULL) 141 if (so->rodata == NULL)
149 return 0; 142 return 0;
150 143
151 ret = render_state_setup(&so); 144 ret = render_state_setup(so);
145 if (ret) {
146 i915_gem_render_state_fini(so);
147 return ret;
148 }
149
150 return 0;
151}
152
153int i915_gem_render_state_init(struct intel_engine_cs *ring)
154{
155 struct render_state so;
156 int ret;
157
158 ret = i915_gem_render_state_prepare(ring, &so);
152 if (ret) 159 if (ret)
153 goto out; 160 return ret;
161
162 if (so.rodata == NULL)
163 return 0;
154 164
155 ret = ring->dispatch_execbuffer(ring, 165 ret = ring->dispatch_execbuffer(ring,
156 so.ggtt_offset, 166 so.ggtt_offset,
@@ -164,6 +174,6 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
164 ret = __i915_add_request(ring, NULL, so.obj, NULL); 174 ret = __i915_add_request(ring, NULL, so.obj, NULL);
165 /* __i915_add_request moves object to inactive if it fails */ 175 /* __i915_add_request moves object to inactive if it fails */
166out: 176out:
167 render_state_fini(&so); 177 i915_gem_render_state_fini(&so);
168 return ret; 178 return ret;
169} 179}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
new file mode 100644
index 000000000000..c44961ed3fad
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef _I915_GEM_RENDER_STATE_H_
25#define _I915_GEM_RENDER_STATE_H_
26
27#include <linux/types.h>
28
29struct intel_renderstate_rodata {
30 const u32 *reloc;
31 const u32 *batch;
32 const u32 batch_items;
33};
34
35struct render_state {
36 const struct intel_renderstate_rodata *rodata;
37 struct drm_i915_gem_object *obj;
38 u64 ggtt_offset;
39 int gen;
40};
41
42int i915_gem_render_state_init(struct intel_engine_cs *ring);
43void i915_gem_render_state_fini(struct render_state *so);
44int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
45 struct render_state *so);
46
47#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 21c025a209c0..85fda6b803e4 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -289,6 +289,7 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
289int i915_gem_init_stolen(struct drm_device *dev) 289int i915_gem_init_stolen(struct drm_device *dev)
290{ 290{
291 struct drm_i915_private *dev_priv = dev->dev_private; 291 struct drm_i915_private *dev_priv = dev->dev_private;
292 u32 tmp;
292 int bios_reserved = 0; 293 int bios_reserved = 0;
293 294
294#ifdef CONFIG_INTEL_IOMMU 295#ifdef CONFIG_INTEL_IOMMU
@@ -308,8 +309,16 @@ int i915_gem_init_stolen(struct drm_device *dev)
308 DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n", 309 DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
309 dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base); 310 dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
310 311
311 if (IS_VALLEYVIEW(dev)) 312 if (INTEL_INFO(dev)->gen >= 8) {
312 bios_reserved = 1024*1024; /* top 1M on VLV/BYT */ 313 tmp = I915_READ(GEN7_BIOS_RESERVED);
314 tmp >>= GEN8_BIOS_RESERVED_SHIFT;
315 tmp &= GEN8_BIOS_RESERVED_MASK;
316 bios_reserved = (1024*1024) << tmp;
317 } else if (IS_GEN7(dev)) {
318 tmp = I915_READ(GEN7_BIOS_RESERVED);
319 bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
320 256*1024 : 1024*1024;
321 }
313 322
314 if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size)) 323 if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
315 return 0; 324 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index cb150e8b4336..2cefb597df6d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -91,7 +91,14 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
91 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 91 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
92 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
93 93
94 if (IS_VALLEYVIEW(dev)) { 94 if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
95 /*
96 * On BDW+, swizzling is not used. We leave the CPU memory
97 * controller in charge of optimizing memory accesses without
98 * the extra address manipulation GPU side.
99 *
100 * VLV and CHV don't have GPU swizzling.
101 */
95 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 102 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
96 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 103 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
97 } else if (INTEL_INFO(dev)->gen >= 6) { 104 } else if (INTEL_INFO(dev)->gen >= 6) {
@@ -376,7 +383,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
376 383
377 if (ret == 0) { 384 if (ret == 0) {
378 obj->fence_dirty = 385 obj->fence_dirty =
379 obj->fenced_gpu_access || 386 obj->last_fenced_seqno ||
380 obj->fence_reg != I915_FENCE_REG_NONE; 387 obj->fence_reg != I915_FENCE_REG_NONE;
381 388
382 obj->tiling_mode = args->tiling_mode; 389 obj->tiling_mode = args->tiling_mode;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d38413997379..d182058383a9 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -293,15 +293,23 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
293static struct i915_mmu_notifier * 293static struct i915_mmu_notifier *
294i915_mmu_notifier_find(struct i915_mm_struct *mm) 294i915_mmu_notifier_find(struct i915_mm_struct *mm)
295{ 295{
296 if (mm->mn == NULL) { 296 struct i915_mmu_notifier *mn = mm->mn;
297 down_write(&mm->mm->mmap_sem); 297
298 mutex_lock(&to_i915(mm->dev)->mm_lock); 298 mn = mm->mn;
299 if (mm->mn == NULL) 299 if (mn)
300 mm->mn = i915_mmu_notifier_create(mm->mm); 300 return mn;
301 mutex_unlock(&to_i915(mm->dev)->mm_lock); 301
302 up_write(&mm->mm->mmap_sem); 302 down_write(&mm->mm->mmap_sem);
303 mutex_lock(&to_i915(mm->dev)->mm_lock);
304 if ((mn = mm->mn) == NULL) {
305 mn = i915_mmu_notifier_create(mm->mm);
306 if (!IS_ERR(mn))
307 mm->mn = mn;
303 } 308 }
304 return mm->mn; 309 mutex_unlock(&to_i915(mm->dev)->mm_lock);
310 up_write(&mm->mm->mmap_sem);
311
312 return mn;
305} 313}
306 314
307static int 315static int
@@ -681,16 +689,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
681static void 689static void
682i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) 690i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
683{ 691{
684 struct scatterlist *sg; 692 struct sg_page_iter sg_iter;
685 int i;
686 693
687 BUG_ON(obj->userptr.work != NULL); 694 BUG_ON(obj->userptr.work != NULL);
688 695
689 if (obj->madv != I915_MADV_WILLNEED) 696 if (obj->madv != I915_MADV_WILLNEED)
690 obj->dirty = 0; 697 obj->dirty = 0;
691 698
692 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { 699 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
693 struct page *page = sg_page(sg); 700 struct page *page = sg_page_iter_page(&sg_iter);
694 701
695 if (obj->dirty) 702 if (obj->dirty)
696 set_page_dirty(page); 703 set_page_dirty(page);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index eab41f9390f8..2c87a797213f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -192,10 +192,10 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
192 struct drm_i915_error_buffer *err, 192 struct drm_i915_error_buffer *err,
193 int count) 193 int count)
194{ 194{
195 err_printf(m, "%s [%d]:\n", name, count); 195 err_printf(m, " %s [%d]:\n", name, count);
196 196
197 while (count--) { 197 while (count--) {
198 err_printf(m, " %08x %8u %02x %02x %x %x", 198 err_printf(m, " %08x %8u %02x %02x %x %x",
199 err->gtt_offset, 199 err->gtt_offset,
200 err->size, 200 err->size,
201 err->read_domains, 201 err->read_domains,
@@ -208,7 +208,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
208 err_puts(m, err->userptr ? " userptr" : ""); 208 err_puts(m, err->userptr ? " userptr" : "");
209 err_puts(m, err->ring != -1 ? " " : ""); 209 err_puts(m, err->ring != -1 ? " " : "");
210 err_puts(m, ring_str(err->ring)); 210 err_puts(m, ring_str(err->ring));
211 err_puts(m, i915_cache_level_str(err->cache_level)); 211 err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
212 212
213 if (err->name) 213 if (err->name)
214 err_printf(m, " (name: %d)", err->name); 214 err_printf(m, " (name: %d)", err->name);
@@ -393,15 +393,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
393 i915_ring_error_state(m, dev, &error->ring[i]); 393 i915_ring_error_state(m, dev, &error->ring[i]);
394 } 394 }
395 395
396 if (error->active_bo) 396 for (i = 0; i < error->vm_count; i++) {
397 err_printf(m, "vm[%d]\n", i);
398
397 print_error_buffers(m, "Active", 399 print_error_buffers(m, "Active",
398 error->active_bo[0], 400 error->active_bo[i],
399 error->active_bo_count[0]); 401 error->active_bo_count[i]);
400 402
401 if (error->pinned_bo)
402 print_error_buffers(m, "Pinned", 403 print_error_buffers(m, "Pinned",
403 error->pinned_bo[0], 404 error->pinned_bo[i],
404 error->pinned_bo_count[0]); 405 error->pinned_bo_count[i]);
406 }
405 407
406 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 408 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
407 obj = error->ring[i].batchbuffer; 409 obj = error->ring[i].batchbuffer;
@@ -492,9 +494,11 @@ out:
492} 494}
493 495
494int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, 496int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
497 struct drm_i915_private *i915,
495 size_t count, loff_t pos) 498 size_t count, loff_t pos)
496{ 499{
497 memset(ebuf, 0, sizeof(*ebuf)); 500 memset(ebuf, 0, sizeof(*ebuf));
501 ebuf->i915 = i915;
498 502
499 /* We need to have enough room to store any i915_error_state printf 503 /* We need to have enough room to store any i915_error_state printf
500 * so that we can move it to start position. 504 * so that we can move it to start position.
@@ -556,24 +560,54 @@ static void i915_error_state_free(struct kref *error_ref)
556} 560}
557 561
558static struct drm_i915_error_object * 562static struct drm_i915_error_object *
559i915_error_object_create_sized(struct drm_i915_private *dev_priv, 563i915_error_object_create(struct drm_i915_private *dev_priv,
560 struct drm_i915_gem_object *src, 564 struct drm_i915_gem_object *src,
561 struct i915_address_space *vm, 565 struct i915_address_space *vm)
562 const int num_pages)
563{ 566{
564 struct drm_i915_error_object *dst; 567 struct drm_i915_error_object *dst;
565 int i; 568 int num_pages;
569 bool use_ggtt;
570 int i = 0;
566 u32 reloc_offset; 571 u32 reloc_offset;
567 572
568 if (src == NULL || src->pages == NULL) 573 if (src == NULL || src->pages == NULL)
569 return NULL; 574 return NULL;
570 575
576 num_pages = src->base.size >> PAGE_SHIFT;
577
571 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); 578 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
572 if (dst == NULL) 579 if (dst == NULL)
573 return NULL; 580 return NULL;
574 581
575 reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm); 582 if (i915_gem_obj_bound(src, vm))
576 for (i = 0; i < num_pages; i++) { 583 dst->gtt_offset = i915_gem_obj_offset(src, vm);
584 else
585 dst->gtt_offset = -1;
586
587 reloc_offset = dst->gtt_offset;
588 use_ggtt = (src->cache_level == I915_CACHE_NONE &&
589 i915_is_ggtt(vm) &&
590 src->has_global_gtt_mapping &&
591 reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
592
593 /* Cannot access stolen address directly, try to use the aperture */
594 if (src->stolen) {
595 use_ggtt = true;
596
597 if (!src->has_global_gtt_mapping)
598 goto unwind;
599
600 reloc_offset = i915_gem_obj_ggtt_offset(src);
601 if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
602 goto unwind;
603 }
604
605 /* Cannot access snooped pages through the aperture */
606 if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev))
607 goto unwind;
608
609 dst->page_count = num_pages;
610 while (num_pages--) {
577 unsigned long flags; 611 unsigned long flags;
578 void *d; 612 void *d;
579 613
@@ -582,10 +616,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
582 goto unwind; 616 goto unwind;
583 617
584 local_irq_save(flags); 618 local_irq_save(flags);
585 if (src->cache_level == I915_CACHE_NONE && 619 if (use_ggtt) {
586 reloc_offset < dev_priv->gtt.mappable_end &&
587 src->has_global_gtt_mapping &&
588 i915_is_ggtt(vm)) {
589 void __iomem *s; 620 void __iomem *s;
590 621
591 /* Simply ignore tiling or any overlapping fence. 622 /* Simply ignore tiling or any overlapping fence.
@@ -597,14 +628,6 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
597 reloc_offset); 628 reloc_offset);
598 memcpy_fromio(d, s, PAGE_SIZE); 629 memcpy_fromio(d, s, PAGE_SIZE);
599 io_mapping_unmap_atomic(s); 630 io_mapping_unmap_atomic(s);
600 } else if (src->stolen) {
601 unsigned long offset;
602
603 offset = dev_priv->mm.stolen_base;
604 offset += src->stolen->start;
605 offset += i << PAGE_SHIFT;
606
607 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
608 } else { 631 } else {
609 struct page *page; 632 struct page *page;
610 void *s; 633 void *s;
@@ -621,11 +644,9 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
621 } 644 }
622 local_irq_restore(flags); 645 local_irq_restore(flags);
623 646
624 dst->pages[i] = d; 647 dst->pages[i++] = d;
625
626 reloc_offset += PAGE_SIZE; 648 reloc_offset += PAGE_SIZE;
627 } 649 }
628 dst->page_count = num_pages;
629 650
630 return dst; 651 return dst;
631 652
@@ -635,22 +656,19 @@ unwind:
635 kfree(dst); 656 kfree(dst);
636 return NULL; 657 return NULL;
637} 658}
638#define i915_error_object_create(dev_priv, src, vm) \
639 i915_error_object_create_sized((dev_priv), (src), (vm), \
640 (src)->base.size>>PAGE_SHIFT)
641
642#define i915_error_ggtt_object_create(dev_priv, src) \ 659#define i915_error_ggtt_object_create(dev_priv, src) \
643 i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \ 660 i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
644 (src)->base.size>>PAGE_SHIFT)
645 661
646static void capture_bo(struct drm_i915_error_buffer *err, 662static void capture_bo(struct drm_i915_error_buffer *err,
647 struct drm_i915_gem_object *obj) 663 struct i915_vma *vma)
648{ 664{
665 struct drm_i915_gem_object *obj = vma->obj;
666
649 err->size = obj->base.size; 667 err->size = obj->base.size;
650 err->name = obj->base.name; 668 err->name = obj->base.name;
651 err->rseqno = obj->last_read_seqno; 669 err->rseqno = obj->last_read_seqno;
652 err->wseqno = obj->last_write_seqno; 670 err->wseqno = obj->last_write_seqno;
653 err->gtt_offset = i915_gem_obj_ggtt_offset(obj); 671 err->gtt_offset = vma->node.start;
654 err->read_domains = obj->base.read_domains; 672 err->read_domains = obj->base.read_domains;
655 err->write_domain = obj->base.write_domain; 673 err->write_domain = obj->base.write_domain;
656 err->fence_reg = obj->fence_reg; 674 err->fence_reg = obj->fence_reg;
@@ -674,7 +692,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
674 int i = 0; 692 int i = 0;
675 693
676 list_for_each_entry(vma, head, mm_list) { 694 list_for_each_entry(vma, head, mm_list) {
677 capture_bo(err++, vma->obj); 695 capture_bo(err++, vma);
678 if (++i == count) 696 if (++i == count)
679 break; 697 break;
680 } 698 }
@@ -683,21 +701,27 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
683} 701}
684 702
685static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, 703static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
686 int count, struct list_head *head) 704 int count, struct list_head *head,
705 struct i915_address_space *vm)
687{ 706{
688 struct drm_i915_gem_object *obj; 707 struct drm_i915_gem_object *obj;
689 int i = 0; 708 struct drm_i915_error_buffer * const first = err;
709 struct drm_i915_error_buffer * const last = err + count;
690 710
691 list_for_each_entry(obj, head, global_list) { 711 list_for_each_entry(obj, head, global_list) {
692 if (!i915_gem_obj_is_pinned(obj)) 712 struct i915_vma *vma;
693 continue;
694 713
695 capture_bo(err++, obj); 714 if (err == last)
696 if (++i == count)
697 break; 715 break;
716
717 list_for_each_entry(vma, &obj->vma_list, vma_link)
718 if (vma->vm == vm && vma->pin_count > 0) {
719 capture_bo(err++, vma);
720 break;
721 }
698 } 722 }
699 723
700 return i; 724 return err - first;
701} 725}
702 726
703/* Generate a semi-unique error code. The code is not meant to have meaning, The 727/* Generate a semi-unique error code. The code is not meant to have meaning, The
@@ -890,9 +914,6 @@ static void i915_record_ring_state(struct drm_device *dev,
890 ering->hws = I915_READ(mmio); 914 ering->hws = I915_READ(mmio);
891 } 915 }
892 916
893 ering->cpu_ring_head = ring->buffer->head;
894 ering->cpu_ring_tail = ring->buffer->tail;
895
896 ering->hangcheck_score = ring->hangcheck.score; 917 ering->hangcheck_score = ring->hangcheck.score;
897 ering->hangcheck_action = ring->hangcheck.action; 918 ering->hangcheck_action = ring->hangcheck.action;
898 919
@@ -955,6 +976,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
955 976
956 for (i = 0; i < I915_NUM_RINGS; i++) { 977 for (i = 0; i < I915_NUM_RINGS; i++) {
957 struct intel_engine_cs *ring = &dev_priv->ring[i]; 978 struct intel_engine_cs *ring = &dev_priv->ring[i];
979 struct intel_ringbuffer *rbuf;
958 980
959 error->ring[i].pid = -1; 981 error->ring[i].pid = -1;
960 982
@@ -967,6 +989,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
967 989
968 request = i915_gem_find_active_request(ring); 990 request = i915_gem_find_active_request(ring);
969 if (request) { 991 if (request) {
992 struct i915_address_space *vm;
993
994 vm = request->ctx && request->ctx->ppgtt ?
995 &request->ctx->ppgtt->base :
996 &dev_priv->gtt.base;
997
970 /* We need to copy these to an anonymous buffer 998 /* We need to copy these to an anonymous buffer
971 * as the simplest method to avoid being overwritten 999 * as the simplest method to avoid being overwritten
972 * by userspace. 1000 * by userspace.
@@ -974,12 +1002,9 @@ static void i915_gem_record_rings(struct drm_device *dev,
974 error->ring[i].batchbuffer = 1002 error->ring[i].batchbuffer =
975 i915_error_object_create(dev_priv, 1003 i915_error_object_create(dev_priv,
976 request->batch_obj, 1004 request->batch_obj,
977 request->ctx ? 1005 vm);
978 request->ctx->vm :
979 &dev_priv->gtt.base);
980 1006
981 if (HAS_BROKEN_CS_TLB(dev_priv->dev) && 1007 if (HAS_BROKEN_CS_TLB(dev_priv->dev))
982 ring->scratch.obj)
983 error->ring[i].wa_batchbuffer = 1008 error->ring[i].wa_batchbuffer =
984 i915_error_ggtt_object_create(dev_priv, 1009 i915_error_ggtt_object_create(dev_priv,
985 ring->scratch.obj); 1010 ring->scratch.obj);
@@ -998,12 +1023,27 @@ static void i915_gem_record_rings(struct drm_device *dev,
998 } 1023 }
999 } 1024 }
1000 1025
1026 if (i915.enable_execlists) {
1027 /* TODO: This is only a small fix to keep basic error
1028 * capture working, but we need to add more information
1029 * for it to be useful (e.g. dump the context being
1030 * executed).
1031 */
1032 if (request)
1033 rbuf = request->ctx->engine[ring->id].ringbuf;
1034 else
1035 rbuf = ring->default_context->engine[ring->id].ringbuf;
1036 } else
1037 rbuf = ring->buffer;
1038
1039 error->ring[i].cpu_ring_head = rbuf->head;
1040 error->ring[i].cpu_ring_tail = rbuf->tail;
1041
1001 error->ring[i].ringbuffer = 1042 error->ring[i].ringbuffer =
1002 i915_error_ggtt_object_create(dev_priv, ring->buffer->obj); 1043 i915_error_ggtt_object_create(dev_priv, rbuf->obj);
1003 1044
1004 if (ring->status_page.obj) 1045 error->ring[i].hws_page =
1005 error->ring[i].hws_page = 1046 i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
1006 i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
1007 1047
1008 i915_gem_record_active_context(ring, error, &error->ring[i]); 1048 i915_gem_record_active_context(ring, error, &error->ring[i]);
1009 1049
@@ -1049,9 +1089,14 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1049 list_for_each_entry(vma, &vm->active_list, mm_list) 1089 list_for_each_entry(vma, &vm->active_list, mm_list)
1050 i++; 1090 i++;
1051 error->active_bo_count[ndx] = i; 1091 error->active_bo_count[ndx] = i;
1052 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 1092
1053 if (i915_gem_obj_is_pinned(obj)) 1093 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1054 i++; 1094 list_for_each_entry(vma, &obj->vma_list, vma_link)
1095 if (vma->vm == vm && vma->pin_count > 0) {
1096 i++;
1097 break;
1098 }
1099 }
1055 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; 1100 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
1056 1101
1057 if (i) { 1102 if (i) {
@@ -1070,7 +1115,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1070 error->pinned_bo_count[ndx] = 1115 error->pinned_bo_count[ndx] =
1071 capture_pinned_bo(pinned_bo, 1116 capture_pinned_bo(pinned_bo,
1072 error->pinned_bo_count[ndx], 1117 error->pinned_bo_count[ndx],
1073 &dev_priv->mm.bound_list); 1118 &dev_priv->mm.bound_list, vm);
1074 error->active_bo[ndx] = active_bo; 1119 error->active_bo[ndx] = active_bo;
1075 error->pinned_bo[ndx] = pinned_bo; 1120 error->pinned_bo[ndx] = pinned_bo;
1076} 1121}
@@ -1091,8 +1136,25 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
1091 error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count), 1136 error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
1092 GFP_ATOMIC); 1137 GFP_ATOMIC);
1093 1138
1094 list_for_each_entry(vm, &dev_priv->vm_list, global_link) 1139 if (error->active_bo == NULL ||
1095 i915_gem_capture_vm(dev_priv, error, vm, i++); 1140 error->pinned_bo == NULL ||
1141 error->active_bo_count == NULL ||
1142 error->pinned_bo_count == NULL) {
1143 kfree(error->active_bo);
1144 kfree(error->active_bo_count);
1145 kfree(error->pinned_bo);
1146 kfree(error->pinned_bo_count);
1147
1148 error->active_bo = NULL;
1149 error->active_bo_count = NULL;
1150 error->pinned_bo = NULL;
1151 error->pinned_bo_count = NULL;
1152 } else {
1153 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
1154 i915_gem_capture_vm(dev_priv, error, vm, i++);
1155
1156 error->vm_count = cnt;
1157 }
1096} 1158}
1097 1159
1098/* Capture all registers which don't fit into another category. */ 1160/* Capture all registers which don't fit into another category. */
@@ -1295,11 +1357,11 @@ void i915_destroy_error_state(struct drm_device *dev)
1295 kref_put(&error->ref, i915_error_state_free); 1357 kref_put(&error->ref, i915_error_state_free);
1296} 1358}
1297 1359
1298const char *i915_cache_level_str(int type) 1360const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
1299{ 1361{
1300 switch (type) { 1362 switch (type) {
1301 case I915_CACHE_NONE: return " uncached"; 1363 case I915_CACHE_NONE: return " uncached";
1302 case I915_CACHE_LLC: return " snooped or LLC"; 1364 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
1303 case I915_CACHE_L3_LLC: return " L3+LLC"; 1365 case I915_CACHE_L3_LLC: return " L3+LLC";
1304 case I915_CACHE_WT: return " WT"; 1366 case I915_CACHE_WT: return " WT";
1305 default: return ""; 1367 default: return "";
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0050ee9470f1..3201986bf25e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
151{ 151{
152 assert_spin_locked(&dev_priv->irq_lock); 152 assert_spin_locked(&dev_priv->irq_lock);
153 153
154 if (!intel_irqs_enabled(dev_priv)) 154 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
155 return; 155 return;
156 156
157 if ((dev_priv->irq_mask & mask) != mask) { 157 if ((dev_priv->irq_mask & mask) != mask) {
@@ -238,7 +238,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
238 238
239 assert_spin_locked(&dev_priv->irq_lock); 239 assert_spin_locked(&dev_priv->irq_lock);
240 240
241 for_each_pipe(pipe) { 241 for_each_pipe(dev_priv, pipe) {
242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
243 243
244 if (crtc->cpu_fifo_underrun_disabled) 244 if (crtc->cpu_fifo_underrun_disabled)
@@ -296,7 +296,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
296 296
297 assert_spin_locked(&dev_priv->irq_lock); 297 assert_spin_locked(&dev_priv->irq_lock);
298 298
299 for_each_pipe(pipe) { 299 for_each_pipe(dev_priv, pipe) {
300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
301 301
302 if (crtc->pch_fifo_underrun_disabled) 302 if (crtc->pch_fifo_underrun_disabled)
@@ -497,7 +497,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
497 old = !intel_crtc->cpu_fifo_underrun_disabled; 497 old = !intel_crtc->cpu_fifo_underrun_disabled;
498 intel_crtc->cpu_fifo_underrun_disabled = !enable; 498 intel_crtc->cpu_fifo_underrun_disabled = !enable;
499 499
500 if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) 500 if (HAS_GMCH_DISPLAY(dev))
501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); 501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
502 else if (IS_GEN5(dev) || IS_GEN6(dev)) 502 else if (IS_GEN5(dev) || IS_GEN6(dev))
503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
@@ -1020,7 +1020,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
1020 1020
1021 /* In vblank? */ 1021 /* In vblank? */
1022 if (in_vbl) 1022 if (in_vbl)
1023 ret |= DRM_SCANOUTPOS_INVBL; 1023 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1024 1024
1025 return ret; 1025 return ret;
1026} 1026}
@@ -1322,10 +1322,10 @@ static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1322 * @dev_priv: DRM device private 1322 * @dev_priv: DRM device private
1323 * 1323 *
1324 */ 1324 */
1325static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) 1325static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1326{ 1326{
1327 u32 residency_C0_up = 0, residency_C0_down = 0; 1327 u32 residency_C0_up = 0, residency_C0_down = 0;
1328 u8 new_delay, adj; 1328 int new_delay, adj;
1329 1329
1330 dev_priv->rps.ei_interrupt_count++; 1330 dev_priv->rps.ei_interrupt_count++;
1331 1331
@@ -1627,6 +1627,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1627 struct drm_i915_private *dev_priv, 1627 struct drm_i915_private *dev_priv,
1628 u32 master_ctl) 1628 u32 master_ctl)
1629{ 1629{
1630 struct intel_engine_cs *ring;
1630 u32 rcs, bcs, vcs; 1631 u32 rcs, bcs, vcs;
1631 uint32_t tmp = 0; 1632 uint32_t tmp = 0;
1632 irqreturn_t ret = IRQ_NONE; 1633 irqreturn_t ret = IRQ_NONE;
@@ -1636,12 +1637,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1636 if (tmp) { 1637 if (tmp) {
1637 I915_WRITE(GEN8_GT_IIR(0), tmp); 1638 I915_WRITE(GEN8_GT_IIR(0), tmp);
1638 ret = IRQ_HANDLED; 1639 ret = IRQ_HANDLED;
1640
1639 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1641 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1640 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1642 ring = &dev_priv->ring[RCS];
1641 if (rcs & GT_RENDER_USER_INTERRUPT) 1643 if (rcs & GT_RENDER_USER_INTERRUPT)
1642 notify_ring(dev, &dev_priv->ring[RCS]); 1644 notify_ring(dev, ring);
1645 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1646 intel_execlists_handle_ctx_events(ring);
1647
1648 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1649 ring = &dev_priv->ring[BCS];
1643 if (bcs & GT_RENDER_USER_INTERRUPT) 1650 if (bcs & GT_RENDER_USER_INTERRUPT)
1644 notify_ring(dev, &dev_priv->ring[BCS]); 1651 notify_ring(dev, ring);
1652 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1653 intel_execlists_handle_ctx_events(ring);
1645 } else 1654 } else
1646 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1655 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1647 } 1656 }
@@ -1651,12 +1660,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1651 if (tmp) { 1660 if (tmp) {
1652 I915_WRITE(GEN8_GT_IIR(1), tmp); 1661 I915_WRITE(GEN8_GT_IIR(1), tmp);
1653 ret = IRQ_HANDLED; 1662 ret = IRQ_HANDLED;
1663
1654 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1664 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1665 ring = &dev_priv->ring[VCS];
1655 if (vcs & GT_RENDER_USER_INTERRUPT) 1666 if (vcs & GT_RENDER_USER_INTERRUPT)
1656 notify_ring(dev, &dev_priv->ring[VCS]); 1667 notify_ring(dev, ring);
1668 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1669 intel_execlists_handle_ctx_events(ring);
1670
1657 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; 1671 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1672 ring = &dev_priv->ring[VCS2];
1658 if (vcs & GT_RENDER_USER_INTERRUPT) 1673 if (vcs & GT_RENDER_USER_INTERRUPT)
1659 notify_ring(dev, &dev_priv->ring[VCS2]); 1674 notify_ring(dev, ring);
1675 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1676 intel_execlists_handle_ctx_events(ring);
1660 } else 1677 } else
1661 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1678 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1662 } 1679 }
@@ -1677,9 +1694,13 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1677 if (tmp) { 1694 if (tmp) {
1678 I915_WRITE(GEN8_GT_IIR(3), tmp); 1695 I915_WRITE(GEN8_GT_IIR(3), tmp);
1679 ret = IRQ_HANDLED; 1696 ret = IRQ_HANDLED;
1697
1680 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1698 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1699 ring = &dev_priv->ring[VECS];
1681 if (vcs & GT_RENDER_USER_INTERRUPT) 1700 if (vcs & GT_RENDER_USER_INTERRUPT)
1682 notify_ring(dev, &dev_priv->ring[VECS]); 1701 notify_ring(dev, ring);
1702 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1703 intel_execlists_handle_ctx_events(ring);
1683 } else 1704 } else
1684 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1705 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1685 } 1706 }
@@ -1772,7 +1793,9 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1772 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1793 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1773 } 1794 }
1774 1795
1775 DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd); 1796 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1797 port_name(port),
1798 long_hpd ? "long" : "short");
1776 /* for long HPD pulses we want to have the digital queue happen, 1799 /* for long HPD pulses we want to have the digital queue happen,
1777 but we still want HPD storm detection to function. */ 1800 but we still want HPD storm detection to function. */
1778 if (long_hpd) { 1801 if (long_hpd) {
@@ -1984,14 +2007,9 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1984 2007
1985static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 2008static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1986{ 2009{
1987 struct intel_crtc *crtc;
1988
1989 if (!drm_handle_vblank(dev, pipe)) 2010 if (!drm_handle_vblank(dev, pipe))
1990 return false; 2011 return false;
1991 2012
1992 crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
1993 wake_up(&crtc->vbl_wait);
1994
1995 return true; 2013 return true;
1996} 2014}
1997 2015
@@ -2002,7 +2020,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2002 int pipe; 2020 int pipe;
2003 2021
2004 spin_lock(&dev_priv->irq_lock); 2022 spin_lock(&dev_priv->irq_lock);
2005 for_each_pipe(pipe) { 2023 for_each_pipe(dev_priv, pipe) {
2006 int reg; 2024 int reg;
2007 u32 mask, iir_bit = 0; 2025 u32 mask, iir_bit = 0;
2008 2026
@@ -2047,9 +2065,10 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2047 } 2065 }
2048 spin_unlock(&dev_priv->irq_lock); 2066 spin_unlock(&dev_priv->irq_lock);
2049 2067
2050 for_each_pipe(pipe) { 2068 for_each_pipe(dev_priv, pipe) {
2051 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2069 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2052 intel_pipe_handle_vblank(dev, pipe); 2070 intel_pipe_handle_vblank(dev, pipe))
2071 intel_check_page_flip(dev, pipe);
2053 2072
2054 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 2073 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
2055 intel_prepare_page_flip(dev, pipe); 2074 intel_prepare_page_flip(dev, pipe);
@@ -2216,7 +2235,7 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2216 DRM_ERROR("PCH poison interrupt\n"); 2235 DRM_ERROR("PCH poison interrupt\n");
2217 2236
2218 if (pch_iir & SDE_FDI_MASK) 2237 if (pch_iir & SDE_FDI_MASK)
2219 for_each_pipe(pipe) 2238 for_each_pipe(dev_priv, pipe)
2220 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2239 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2221 pipe_name(pipe), 2240 pipe_name(pipe),
2222 I915_READ(FDI_RX_IIR(pipe))); 2241 I915_READ(FDI_RX_IIR(pipe)));
@@ -2247,7 +2266,7 @@ static void ivb_err_int_handler(struct drm_device *dev)
2247 if (err_int & ERR_INT_POISON) 2266 if (err_int & ERR_INT_POISON)
2248 DRM_ERROR("Poison interrupt\n"); 2267 DRM_ERROR("Poison interrupt\n");
2249 2268
2250 for_each_pipe(pipe) { 2269 for_each_pipe(dev_priv, pipe) {
2251 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 2270 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
2252 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2271 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2253 false)) 2272 false))
@@ -2324,7 +2343,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2324 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2343 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2325 2344
2326 if (pch_iir & SDE_FDI_MASK_CPT) 2345 if (pch_iir & SDE_FDI_MASK_CPT)
2327 for_each_pipe(pipe) 2346 for_each_pipe(dev_priv, pipe)
2328 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2347 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2329 pipe_name(pipe), 2348 pipe_name(pipe),
2330 I915_READ(FDI_RX_IIR(pipe))); 2349 I915_READ(FDI_RX_IIR(pipe)));
@@ -2347,9 +2366,10 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2347 if (de_iir & DE_POISON) 2366 if (de_iir & DE_POISON)
2348 DRM_ERROR("Poison interrupt\n"); 2367 DRM_ERROR("Poison interrupt\n");
2349 2368
2350 for_each_pipe(pipe) { 2369 for_each_pipe(dev_priv, pipe) {
2351 if (de_iir & DE_PIPE_VBLANK(pipe)) 2370 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2352 intel_pipe_handle_vblank(dev, pipe); 2371 intel_pipe_handle_vblank(dev, pipe))
2372 intel_check_page_flip(dev, pipe);
2353 2373
2354 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2374 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2355 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 2375 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
@@ -2397,9 +2417,10 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2397 if (de_iir & DE_GSE_IVB) 2417 if (de_iir & DE_GSE_IVB)
2398 intel_opregion_asle_intr(dev); 2418 intel_opregion_asle_intr(dev);
2399 2419
2400 for_each_pipe(pipe) { 2420 for_each_pipe(dev_priv, pipe) {
2401 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2421 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2402 intel_pipe_handle_vblank(dev, pipe); 2422 intel_pipe_handle_vblank(dev, pipe))
2423 intel_check_page_flip(dev, pipe);
2403 2424
2404 /* plane/pipes map 1:1 on ilk+ */ 2425 /* plane/pipes map 1:1 on ilk+ */
2405 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2426 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
@@ -2544,7 +2565,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2544 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2565 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2545 } 2566 }
2546 2567
2547 for_each_pipe(pipe) { 2568 for_each_pipe(dev_priv, pipe) {
2548 uint32_t pipe_iir; 2569 uint32_t pipe_iir;
2549 2570
2550 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2571 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
@@ -2554,8 +2575,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2554 if (pipe_iir) { 2575 if (pipe_iir) {
2555 ret = IRQ_HANDLED; 2576 ret = IRQ_HANDLED;
2556 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2577 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2557 if (pipe_iir & GEN8_PIPE_VBLANK) 2578 if (pipe_iir & GEN8_PIPE_VBLANK &&
2558 intel_pipe_handle_vblank(dev, pipe); 2579 intel_pipe_handle_vblank(dev, pipe))
2580 intel_check_page_flip(dev, pipe);
2559 2581
2560 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { 2582 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
2561 intel_prepare_page_flip(dev, pipe); 2583 intel_prepare_page_flip(dev, pipe);
@@ -2763,7 +2785,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2763 2785
2764 if (eir & I915_ERROR_MEMORY_REFRESH) { 2786 if (eir & I915_ERROR_MEMORY_REFRESH) {
2765 pr_err("memory refresh error:\n"); 2787 pr_err("memory refresh error:\n");
2766 for_each_pipe(pipe) 2788 for_each_pipe(dev_priv, pipe)
2767 pr_err("pipe %c stat: 0x%08x\n", 2789 pr_err("pipe %c stat: 0x%08x\n",
2768 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2790 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2769 /* pipestat has already been acked */ 2791 /* pipestat has already been acked */
@@ -2860,52 +2882,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
2860 schedule_work(&dev_priv->gpu_error.work); 2882 schedule_work(&dev_priv->gpu_error.work);
2861} 2883}
2862 2884
2863static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2864{
2865 struct drm_i915_private *dev_priv = dev->dev_private;
2866 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2867 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2868 struct drm_i915_gem_object *obj;
2869 struct intel_unpin_work *work;
2870 unsigned long flags;
2871 bool stall_detected;
2872
2873 /* Ignore early vblank irqs */
2874 if (intel_crtc == NULL)
2875 return;
2876
2877 spin_lock_irqsave(&dev->event_lock, flags);
2878 work = intel_crtc->unpin_work;
2879
2880 if (work == NULL ||
2881 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2882 !work->enable_stall_check) {
2883 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2884 spin_unlock_irqrestore(&dev->event_lock, flags);
2885 return;
2886 }
2887
2888 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2889 obj = work->pending_flip_obj;
2890 if (INTEL_INFO(dev)->gen >= 4) {
2891 int dspsurf = DSPSURF(intel_crtc->plane);
2892 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2893 i915_gem_obj_ggtt_offset(obj);
2894 } else {
2895 int dspaddr = DSPADDR(intel_crtc->plane);
2896 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2897 crtc->y * crtc->primary->fb->pitches[0] +
2898 crtc->x * crtc->primary->fb->bits_per_pixel/8);
2899 }
2900
2901 spin_unlock_irqrestore(&dev->event_lock, flags);
2902
2903 if (stall_detected) {
2904 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2905 intel_prepare_page_flip(dev, intel_crtc->plane);
2906 }
2907}
2908
2909/* Called from drm generic code, passed 'crtc' which 2885/* Called from drm generic code, passed 'crtc' which
2910 * we use as a pipe index 2886 * we use as a pipe index
2911 */ 2887 */
@@ -3441,7 +3417,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
3441 3417
3442 I915_WRITE(PORT_HOTPLUG_EN, 0); 3418 I915_WRITE(PORT_HOTPLUG_EN, 0);
3443 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3419 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3444 for_each_pipe(pipe) 3420 for_each_pipe(dev_priv, pipe)
3445 I915_WRITE(PIPESTAT(pipe), 0xffff); 3421 I915_WRITE(PIPESTAT(pipe), 0xffff);
3446 I915_WRITE(VLV_IIR, 0xffffffff); 3422 I915_WRITE(VLV_IIR, 0xffffffff);
3447 I915_WRITE(VLV_IMR, 0xffffffff); 3423 I915_WRITE(VLV_IMR, 0xffffffff);
@@ -3467,7 +3443,7 @@ static void gen8_irq_reset(struct drm_device *dev)
3467 3443
3468 gen8_gt_irq_reset(dev_priv); 3444 gen8_gt_irq_reset(dev_priv);
3469 3445
3470 for_each_pipe(pipe) 3446 for_each_pipe(dev_priv, pipe)
3471 if (intel_display_power_enabled(dev_priv, 3447 if (intel_display_power_enabled(dev_priv,
3472 POWER_DOMAIN_PIPE(pipe))) 3448 POWER_DOMAIN_PIPE(pipe)))
3473 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3449 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
@@ -3510,7 +3486,7 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
3510 I915_WRITE(PORT_HOTPLUG_EN, 0); 3486 I915_WRITE(PORT_HOTPLUG_EN, 0);
3511 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3487 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3512 3488
3513 for_each_pipe(pipe) 3489 for_each_pipe(dev_priv, pipe)
3514 I915_WRITE(PIPESTAT(pipe), 0xffff); 3490 I915_WRITE(PIPESTAT(pipe), 0xffff);
3515 3491
3516 I915_WRITE(VLV_IMR, 0xffffffff); 3492 I915_WRITE(VLV_IMR, 0xffffffff);
@@ -3522,18 +3498,17 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
3522static void ibx_hpd_irq_setup(struct drm_device *dev) 3498static void ibx_hpd_irq_setup(struct drm_device *dev)
3523{ 3499{
3524 struct drm_i915_private *dev_priv = dev->dev_private; 3500 struct drm_i915_private *dev_priv = dev->dev_private;
3525 struct drm_mode_config *mode_config = &dev->mode_config;
3526 struct intel_encoder *intel_encoder; 3501 struct intel_encoder *intel_encoder;
3527 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3502 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3528 3503
3529 if (HAS_PCH_IBX(dev)) { 3504 if (HAS_PCH_IBX(dev)) {
3530 hotplug_irqs = SDE_HOTPLUG_MASK; 3505 hotplug_irqs = SDE_HOTPLUG_MASK;
3531 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3506 for_each_intel_encoder(dev, intel_encoder)
3532 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3507 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3533 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3508 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3534 } else { 3509 } else {
3535 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3510 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3536 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3511 for_each_intel_encoder(dev, intel_encoder)
3537 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3512 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3538 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3513 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3539 } 3514 }
@@ -3782,28 +3757,31 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
3782 3757
3783static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3758static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3784{ 3759{
3785 int i;
3786
3787 /* These are interrupts we'll toggle with the ring mask register */ 3760 /* These are interrupts we'll toggle with the ring mask register */
3788 uint32_t gt_interrupts[] = { 3761 uint32_t gt_interrupts[] = {
3789 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3762 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3763 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3790 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3764 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3791 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3765 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3766 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3792 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3767 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3793 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3768 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3769 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3770 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3794 0, 3771 0,
3795 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3772 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3773 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3796 }; 3774 };
3797 3775
3798 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
3799 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
3800
3801 dev_priv->pm_irq_mask = 0xffffffff; 3776 dev_priv->pm_irq_mask = 0xffffffff;
3777 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3778 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3779 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
3780 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3802} 3781}
3803 3782
3804static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3783static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3805{ 3784{
3806 struct drm_device *dev = dev_priv->dev;
3807 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE | 3785 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
3808 GEN8_PIPE_CDCLK_CRC_DONE | 3786 GEN8_PIPE_CDCLK_CRC_DONE |
3809 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3787 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -3814,7 +3792,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3814 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3792 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3815 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3793 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3816 3794
3817 for_each_pipe(pipe) 3795 for_each_pipe(dev_priv, pipe)
3818 if (intel_display_power_enabled(dev_priv, 3796 if (intel_display_power_enabled(dev_priv,
3819 POWER_DOMAIN_PIPE(pipe))) 3797 POWER_DOMAIN_PIPE(pipe)))
3820 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3798 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
@@ -3859,12 +3837,12 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
3859 */ 3837 */
3860 dev_priv->irq_mask = ~enable_mask; 3838 dev_priv->irq_mask = ~enable_mask;
3861 3839
3862 for_each_pipe(pipe) 3840 for_each_pipe(dev_priv, pipe)
3863 I915_WRITE(PIPESTAT(pipe), 0xffff); 3841 I915_WRITE(PIPESTAT(pipe), 0xffff);
3864 3842
3865 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3843 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3866 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3844 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3867 for_each_pipe(pipe) 3845 for_each_pipe(dev_priv, pipe)
3868 i915_enable_pipestat(dev_priv, pipe, pipestat_enable); 3846 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3869 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3847 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3870 3848
@@ -3901,7 +3879,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3901 3879
3902 I915_WRITE(VLV_MASTER_IER, 0); 3880 I915_WRITE(VLV_MASTER_IER, 0);
3903 3881
3904 for_each_pipe(pipe) 3882 for_each_pipe(dev_priv, pipe)
3905 I915_WRITE(PIPESTAT(pipe), 0xffff); 3883 I915_WRITE(PIPESTAT(pipe), 0xffff);
3906 3884
3907 I915_WRITE(HWSTAM, 0xffffffff); 3885 I915_WRITE(HWSTAM, 0xffffffff);
@@ -3963,7 +3941,7 @@ do { \
3963 I915_WRITE(PORT_HOTPLUG_EN, 0); 3941 I915_WRITE(PORT_HOTPLUG_EN, 0);
3964 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3942 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3965 3943
3966 for_each_pipe(pipe) 3944 for_each_pipe(dev_priv, pipe)
3967 I915_WRITE(PIPESTAT(pipe), 0xffff); 3945 I915_WRITE(PIPESTAT(pipe), 0xffff);
3968 3946
3969 I915_WRITE(VLV_IMR, 0xffffffff); 3947 I915_WRITE(VLV_IMR, 0xffffffff);
@@ -3987,7 +3965,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
3987 struct drm_i915_private *dev_priv = dev->dev_private; 3965 struct drm_i915_private *dev_priv = dev->dev_private;
3988 int pipe; 3966 int pipe;
3989 3967
3990 for_each_pipe(pipe) 3968 for_each_pipe(dev_priv, pipe)
3991 I915_WRITE(PIPESTAT(pipe), 0); 3969 I915_WRITE(PIPESTAT(pipe), 0);
3992 I915_WRITE16(IMR, 0xffff); 3970 I915_WRITE16(IMR, 0xffff);
3993 I915_WRITE16(IER, 0x0); 3971 I915_WRITE16(IER, 0x0);
@@ -4041,7 +4019,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
4041 return false; 4019 return false;
4042 4020
4043 if ((iir & flip_pending) == 0) 4021 if ((iir & flip_pending) == 0)
4044 return false; 4022 goto check_page_flip;
4045 4023
4046 intel_prepare_page_flip(dev, plane); 4024 intel_prepare_page_flip(dev, plane);
4047 4025
@@ -4052,11 +4030,14 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
4052 * an interrupt per se, we watch for the change at vblank. 4030 * an interrupt per se, we watch for the change at vblank.
4053 */ 4031 */
4054 if (I915_READ16(ISR) & flip_pending) 4032 if (I915_READ16(ISR) & flip_pending)
4055 return false; 4033 goto check_page_flip;
4056 4034
4057 intel_finish_page_flip(dev, pipe); 4035 intel_finish_page_flip(dev, pipe);
4058
4059 return true; 4036 return true;
4037
4038check_page_flip:
4039 intel_check_page_flip(dev, pipe);
4040 return false;
4060} 4041}
4061 4042
4062static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4043static irqreturn_t i8xx_irq_handler(int irq, void *arg)
@@ -4087,7 +4068,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4087 "Command parser error, iir 0x%08x", 4068 "Command parser error, iir 0x%08x",
4088 iir); 4069 iir);
4089 4070
4090 for_each_pipe(pipe) { 4071 for_each_pipe(dev_priv, pipe) {
4091 int reg = PIPESTAT(pipe); 4072 int reg = PIPESTAT(pipe);
4092 pipe_stats[pipe] = I915_READ(reg); 4073 pipe_stats[pipe] = I915_READ(reg);
4093 4074
@@ -4107,7 +4088,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4107 if (iir & I915_USER_INTERRUPT) 4088 if (iir & I915_USER_INTERRUPT)
4108 notify_ring(dev, &dev_priv->ring[RCS]); 4089 notify_ring(dev, &dev_priv->ring[RCS]);
4109 4090
4110 for_each_pipe(pipe) { 4091 for_each_pipe(dev_priv, pipe) {
4111 int plane = pipe; 4092 int plane = pipe;
4112 if (HAS_FBC(dev)) 4093 if (HAS_FBC(dev))
4113 plane = !plane; 4094 plane = !plane;
@@ -4135,7 +4116,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev)
4135 struct drm_i915_private *dev_priv = dev->dev_private; 4116 struct drm_i915_private *dev_priv = dev->dev_private;
4136 int pipe; 4117 int pipe;
4137 4118
4138 for_each_pipe(pipe) { 4119 for_each_pipe(dev_priv, pipe) {
4139 /* Clear enable bits; then clear status bits */ 4120 /* Clear enable bits; then clear status bits */
4140 I915_WRITE(PIPESTAT(pipe), 0); 4121 I915_WRITE(PIPESTAT(pipe), 0);
4141 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4122 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
@@ -4156,7 +4137,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
4156 } 4137 }
4157 4138
4158 I915_WRITE16(HWSTAM, 0xeffe); 4139 I915_WRITE16(HWSTAM, 0xeffe);
4159 for_each_pipe(pipe) 4140 for_each_pipe(dev_priv, pipe)
4160 I915_WRITE(PIPESTAT(pipe), 0); 4141 I915_WRITE(PIPESTAT(pipe), 0);
4161 I915_WRITE(IMR, 0xffffffff); 4142 I915_WRITE(IMR, 0xffffffff);
4162 I915_WRITE(IER, 0x0); 4143 I915_WRITE(IER, 0x0);
@@ -4226,7 +4207,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
4226 return false; 4207 return false;
4227 4208
4228 if ((iir & flip_pending) == 0) 4209 if ((iir & flip_pending) == 0)
4229 return false; 4210 goto check_page_flip;
4230 4211
4231 intel_prepare_page_flip(dev, plane); 4212 intel_prepare_page_flip(dev, plane);
4232 4213
@@ -4237,11 +4218,14 @@ static bool i915_handle_vblank(struct drm_device *dev,
4237 * an interrupt per se, we watch for the change at vblank. 4218 * an interrupt per se, we watch for the change at vblank.
4238 */ 4219 */
4239 if (I915_READ(ISR) & flip_pending) 4220 if (I915_READ(ISR) & flip_pending)
4240 return false; 4221 goto check_page_flip;
4241 4222
4242 intel_finish_page_flip(dev, pipe); 4223 intel_finish_page_flip(dev, pipe);
4243
4244 return true; 4224 return true;
4225
4226check_page_flip:
4227 intel_check_page_flip(dev, pipe);
4228 return false;
4245} 4229}
4246 4230
4247static irqreturn_t i915_irq_handler(int irq, void *arg) 4231static irqreturn_t i915_irq_handler(int irq, void *arg)
@@ -4271,7 +4255,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4271 "Command parser error, iir 0x%08x", 4255 "Command parser error, iir 0x%08x",
4272 iir); 4256 iir);
4273 4257
4274 for_each_pipe(pipe) { 4258 for_each_pipe(dev_priv, pipe) {
4275 int reg = PIPESTAT(pipe); 4259 int reg = PIPESTAT(pipe);
4276 pipe_stats[pipe] = I915_READ(reg); 4260 pipe_stats[pipe] = I915_READ(reg);
4277 4261
@@ -4297,7 +4281,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4297 if (iir & I915_USER_INTERRUPT) 4281 if (iir & I915_USER_INTERRUPT)
4298 notify_ring(dev, &dev_priv->ring[RCS]); 4282 notify_ring(dev, &dev_priv->ring[RCS]);
4299 4283
4300 for_each_pipe(pipe) { 4284 for_each_pipe(dev_priv, pipe) {
4301 int plane = pipe; 4285 int plane = pipe;
4302 if (HAS_FBC(dev)) 4286 if (HAS_FBC(dev))
4303 plane = !plane; 4287 plane = !plane;
@@ -4355,7 +4339,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
4355 } 4339 }
4356 4340
4357 I915_WRITE16(HWSTAM, 0xffff); 4341 I915_WRITE16(HWSTAM, 0xffff);
4358 for_each_pipe(pipe) { 4342 for_each_pipe(dev_priv, pipe) {
4359 /* Clear enable bits; then clear status bits */ 4343 /* Clear enable bits; then clear status bits */
4360 I915_WRITE(PIPESTAT(pipe), 0); 4344 I915_WRITE(PIPESTAT(pipe), 0);
4361 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4345 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
@@ -4375,7 +4359,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
4375 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4359 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4376 4360
4377 I915_WRITE(HWSTAM, 0xeffe); 4361 I915_WRITE(HWSTAM, 0xeffe);
4378 for_each_pipe(pipe) 4362 for_each_pipe(dev_priv, pipe)
4379 I915_WRITE(PIPESTAT(pipe), 0); 4363 I915_WRITE(PIPESTAT(pipe), 0);
4380 I915_WRITE(IMR, 0xffffffff); 4364 I915_WRITE(IMR, 0xffffffff);
4381 I915_WRITE(IER, 0x0); 4365 I915_WRITE(IER, 0x0);
@@ -4444,7 +4428,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
4444static void i915_hpd_irq_setup(struct drm_device *dev) 4428static void i915_hpd_irq_setup(struct drm_device *dev)
4445{ 4429{
4446 struct drm_i915_private *dev_priv = dev->dev_private; 4430 struct drm_i915_private *dev_priv = dev->dev_private;
4447 struct drm_mode_config *mode_config = &dev->mode_config;
4448 struct intel_encoder *intel_encoder; 4431 struct intel_encoder *intel_encoder;
4449 u32 hotplug_en; 4432 u32 hotplug_en;
4450 4433
@@ -4455,7 +4438,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
4455 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 4438 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4456 /* Note HDMI and DP share hotplug bits */ 4439 /* Note HDMI and DP share hotplug bits */
4457 /* enable bits are the same for all generations */ 4440 /* enable bits are the same for all generations */
4458 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 4441 for_each_intel_encoder(dev, intel_encoder)
4459 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 4442 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4460 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 4443 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4461 /* Programming the CRT detection parameters tends 4444 /* Programming the CRT detection parameters tends
@@ -4501,7 +4484,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4501 "Command parser error, iir 0x%08x", 4484 "Command parser error, iir 0x%08x",
4502 iir); 4485 iir);
4503 4486
4504 for_each_pipe(pipe) { 4487 for_each_pipe(dev_priv, pipe) {
4505 int reg = PIPESTAT(pipe); 4488 int reg = PIPESTAT(pipe);
4506 pipe_stats[pipe] = I915_READ(reg); 4489 pipe_stats[pipe] = I915_READ(reg);
4507 4490
@@ -4532,7 +4515,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4532 if (iir & I915_BSD_USER_INTERRUPT) 4515 if (iir & I915_BSD_USER_INTERRUPT)
4533 notify_ring(dev, &dev_priv->ring[VCS]); 4516 notify_ring(dev, &dev_priv->ring[VCS]);
4534 4517
4535 for_each_pipe(pipe) { 4518 for_each_pipe(dev_priv, pipe) {
4536 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4519 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4537 i915_handle_vblank(dev, pipe, pipe, iir)) 4520 i915_handle_vblank(dev, pipe, pipe, iir))
4538 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4521 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
@@ -4589,12 +4572,12 @@ static void i965_irq_uninstall(struct drm_device * dev)
4589 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4572 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4590 4573
4591 I915_WRITE(HWSTAM, 0xffffffff); 4574 I915_WRITE(HWSTAM, 0xffffffff);
4592 for_each_pipe(pipe) 4575 for_each_pipe(dev_priv, pipe)
4593 I915_WRITE(PIPESTAT(pipe), 0); 4576 I915_WRITE(PIPESTAT(pipe), 0);
4594 I915_WRITE(IMR, 0xffffffff); 4577 I915_WRITE(IMR, 0xffffffff);
4595 I915_WRITE(IER, 0x0); 4578 I915_WRITE(IER, 0x0);
4596 4579
4597 for_each_pipe(pipe) 4580 for_each_pipe(dev_priv, pipe)
4598 I915_WRITE(PIPESTAT(pipe), 4581 I915_WRITE(PIPESTAT(pipe),
4599 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4582 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4600 I915_WRITE(IIR, I915_READ(IIR)); 4583 I915_WRITE(IIR, I915_READ(IIR));
@@ -4652,8 +4635,8 @@ void intel_irq_init(struct drm_device *dev)
4652 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4635 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4653 4636
4654 /* Let's track the enabled rps events */ 4637 /* Let's track the enabled rps events */
4655 if (IS_VALLEYVIEW(dev)) 4638 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
4656 /* WaGsvRC0ResidenncyMethod:VLV */ 4639 /* WaGsvRC0ResidencyMethod:vlv */
4657 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4640 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4658 else 4641 else
4659 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4642 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
@@ -4680,6 +4663,14 @@ void intel_irq_init(struct drm_device *dev)
4680 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4663 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4681 } 4664 }
4682 4665
4666 /*
4667 * Opt out of the vblank disable timer on everything except gen2.
4668 * Gen2 doesn't have a hardware frame counter and so depends on
4669 * vblank interrupts to produce sane vblank seuquence numbers.
4670 */
4671 if (!IS_GEN2(dev))
4672 dev->vblank_disable_immediate = true;
4673
4683 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4674 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4684 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4675 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4685 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4676 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 9842fd2e742a..c91cb2033cc5 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -35,6 +35,7 @@ struct i915_params i915 __read_mostly = {
35 .vbt_sdvo_panel_type = -1, 35 .vbt_sdvo_panel_type = -1,
36 .enable_rc6 = -1, 36 .enable_rc6 = -1,
37 .enable_fbc = -1, 37 .enable_fbc = -1,
38 .enable_execlists = 0,
38 .enable_hangcheck = true, 39 .enable_hangcheck = true,
39 .enable_ppgtt = -1, 40 .enable_ppgtt = -1,
40 .enable_psr = 0, 41 .enable_psr = 0,
@@ -118,6 +119,11 @@ MODULE_PARM_DESC(enable_ppgtt,
118 "Override PPGTT usage. " 119 "Override PPGTT usage. "
119 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); 120 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
120 121
122module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
123MODULE_PARM_DESC(enable_execlists,
124 "Override execlists usage. "
125 "(-1=auto, 0=disabled [default], 1=enabled)");
126
121module_param_named(enable_psr, i915.enable_psr, int, 0600); 127module_param_named(enable_psr, i915.enable_psr, int, 0600);
122MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); 128MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
123 129
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f29b44c86a2f..c01e5f31430e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -143,6 +143,14 @@
143#define GAB_CTL 0x24000 143#define GAB_CTL 0x24000
144#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8) 144#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
145 145
146#define GEN7_BIOS_RESERVED 0x1082C0
147#define GEN7_BIOS_RESERVED_1M (0 << 5)
148#define GEN7_BIOS_RESERVED_256K (1 << 5)
149#define GEN8_BIOS_RESERVED_SHIFT 7
150#define GEN7_BIOS_RESERVED_MASK 0x1
151#define GEN8_BIOS_RESERVED_MASK 0x3
152
153
146/* VGA stuff */ 154/* VGA stuff */
147 155
148#define VGA_ST01_MDA 0x3ba 156#define VGA_ST01_MDA 0x3ba
@@ -272,6 +280,7 @@
272#define MI_SEMAPHORE_POLL (1<<15) 280#define MI_SEMAPHORE_POLL (1<<15)
273#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12) 281#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
274#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) 282#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
283#define MI_STORE_DWORD_IMM_GEN8 MI_INSTR(0x20, 2)
275#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ 284#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
276#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) 285#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
277#define MI_STORE_DWORD_INDEX_SHIFT 2 286#define MI_STORE_DWORD_INDEX_SHIFT 2
@@ -282,6 +291,7 @@
282 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 291 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
283 */ 292 */
284#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1) 293#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
294#define MI_LRI_FORCE_POSTED (1<<12)
285#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1) 295#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
286#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1) 296#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
287#define MI_SRM_LRM_GLOBAL_GTT (1<<22) 297#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
@@ -501,10 +511,26 @@
501#define BUNIT_REG_BISOC 0x11 511#define BUNIT_REG_BISOC 0x11
502 512
503#define PUNIT_REG_DSPFREQ 0x36 513#define PUNIT_REG_DSPFREQ 0x36
514#define DSPFREQSTAT_SHIFT_CHV 24
515#define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV)
516#define DSPFREQGUAR_SHIFT_CHV 8
517#define DSPFREQGUAR_MASK_CHV (0x1f << DSPFREQGUAR_SHIFT_CHV)
504#define DSPFREQSTAT_SHIFT 30 518#define DSPFREQSTAT_SHIFT 30
505#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) 519#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
506#define DSPFREQGUAR_SHIFT 14 520#define DSPFREQGUAR_SHIFT 14
507#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) 521#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
522#define _DP_SSC(val, pipe) ((val) << (2 * (pipe)))
523#define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe))
524#define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe))
525#define DP_SSC_CLK_GATE(pipe) _DP_SSC(0x1, (pipe))
526#define DP_SSC_RESET(pipe) _DP_SSC(0x2, (pipe))
527#define DP_SSC_PWR_GATE(pipe) _DP_SSC(0x3, (pipe))
528#define _DP_SSS(val, pipe) ((val) << (2 * (pipe) + 16))
529#define DP_SSS_MASK(pipe) _DP_SSS(0x3, (pipe))
530#define DP_SSS_PWR_ON(pipe) _DP_SSS(0x0, (pipe))
531#define DP_SSS_CLK_GATE(pipe) _DP_SSS(0x1, (pipe))
532#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe))
533#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe))
508 534
509/* See the PUNIT HAS v0.8 for the below bits */ 535/* See the PUNIT HAS v0.8 for the below bits */
510enum punit_power_well { 536enum punit_power_well {
@@ -518,6 +544,11 @@ enum punit_power_well {
518 PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9, 544 PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9,
519 PUNIT_POWER_WELL_DPIO_RX0 = 10, 545 PUNIT_POWER_WELL_DPIO_RX0 = 10,
520 PUNIT_POWER_WELL_DPIO_RX1 = 11, 546 PUNIT_POWER_WELL_DPIO_RX1 = 11,
547 PUNIT_POWER_WELL_DPIO_CMN_D = 12,
548 /* FIXME: guesswork below */
549 PUNIT_POWER_WELL_DPIO_TX_D_LANES_01 = 13,
550 PUNIT_POWER_WELL_DPIO_TX_D_LANES_23 = 14,
551 PUNIT_POWER_WELL_DPIO_RX2 = 15,
521 552
522 PUNIT_POWER_WELL_NUM, 553 PUNIT_POWER_WELL_NUM,
523}; 554};
@@ -838,8 +869,8 @@ enum punit_power_well {
838 869
839#define _VLV_TX_DW2_CH0 0x8288 870#define _VLV_TX_DW2_CH0 0x8288
840#define _VLV_TX_DW2_CH1 0x8488 871#define _VLV_TX_DW2_CH1 0x8488
841#define DPIO_SWING_MARGIN_SHIFT 16 872#define DPIO_SWING_MARGIN000_SHIFT 16
842#define DPIO_SWING_MARGIN_MASK (0xff << DPIO_SWING_MARGIN_SHIFT) 873#define DPIO_SWING_MARGIN000_MASK (0xff << DPIO_SWING_MARGIN000_SHIFT)
843#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8 874#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8
844#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1) 875#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
845 876
@@ -847,12 +878,16 @@ enum punit_power_well {
847#define _VLV_TX_DW3_CH1 0x848c 878#define _VLV_TX_DW3_CH1 0x848c
848/* The following bit for CHV phy */ 879/* The following bit for CHV phy */
849#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27) 880#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27)
881#define DPIO_SWING_MARGIN101_SHIFT 16
882#define DPIO_SWING_MARGIN101_MASK (0xff << DPIO_SWING_MARGIN101_SHIFT)
850#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1) 883#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
851 884
852#define _VLV_TX_DW4_CH0 0x8290 885#define _VLV_TX_DW4_CH0 0x8290
853#define _VLV_TX_DW4_CH1 0x8490 886#define _VLV_TX_DW4_CH1 0x8490
854#define DPIO_SWING_DEEMPH9P5_SHIFT 24 887#define DPIO_SWING_DEEMPH9P5_SHIFT 24
855#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT) 888#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT)
889#define DPIO_SWING_DEEMPH6P0_SHIFT 16
890#define DPIO_SWING_DEEMPH6P0_MASK (0xff << DPIO_SWING_DEEMPH6P0_SHIFT)
856#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1) 891#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
857 892
858#define _VLV_TX3_DW4_CH0 0x690 893#define _VLV_TX3_DW4_CH0 0x690
@@ -1003,6 +1038,13 @@ enum punit_power_well {
1003#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */ 1038#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
1004#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */ 1039#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
1005#define PGTBL_ER 0x02024 1040#define PGTBL_ER 0x02024
1041#define PRB0_BASE (0x2030-0x30)
1042#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */
1043#define PRB2_BASE (0x2050-0x30) /* gen3 */
1044#define SRB0_BASE (0x2100-0x30) /* gen2 */
1045#define SRB1_BASE (0x2110-0x30) /* gen2 */
1046#define SRB2_BASE (0x2120-0x30) /* 830 */
1047#define SRB3_BASE (0x2130-0x30) /* 830 */
1006#define RENDER_RING_BASE 0x02000 1048#define RENDER_RING_BASE 0x02000
1007#define BSD_RING_BASE 0x04000 1049#define BSD_RING_BASE 0x04000
1008#define GEN6_BSD_RING_BASE 0x12000 1050#define GEN6_BSD_RING_BASE 0x12000
@@ -1064,6 +1106,7 @@ enum punit_power_well {
1064#define RING_ACTHD_UDW(base) ((base)+0x5c) 1106#define RING_ACTHD_UDW(base) ((base)+0x5c)
1065#define RING_NOPID(base) ((base)+0x94) 1107#define RING_NOPID(base) ((base)+0x94)
1066#define RING_IMR(base) ((base)+0xa8) 1108#define RING_IMR(base) ((base)+0xa8)
1109#define RING_HWSTAM(base) ((base)+0x98)
1067#define RING_TIMESTAMP(base) ((base)+0x358) 1110#define RING_TIMESTAMP(base) ((base)+0x358)
1068#define TAIL_ADDR 0x001FFFF8 1111#define TAIL_ADDR 0x001FFFF8
1069#define HEAD_WRAP_COUNT 0xFFE00000 1112#define HEAD_WRAP_COUNT 0xFFE00000
@@ -1248,6 +1291,10 @@ enum punit_power_well {
1248#define INSTPM_TLB_INVALIDATE (1<<9) 1291#define INSTPM_TLB_INVALIDATE (1<<9)
1249#define INSTPM_SYNC_FLUSH (1<<5) 1292#define INSTPM_SYNC_FLUSH (1<<5)
1250#define ACTHD 0x020c8 1293#define ACTHD 0x020c8
1294#define MEM_MODE 0x020cc
1295#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */
1296#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */
1297#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */
1251#define FW_BLC 0x020d8 1298#define FW_BLC 0x020d8
1252#define FW_BLC2 0x020dc 1299#define FW_BLC2 0x020dc
1253#define FW_BLC_SELF 0x020e0 /* 915+ only */ 1300#define FW_BLC_SELF 0x020e0 /* 915+ only */
@@ -1380,6 +1427,7 @@ enum punit_power_well {
1380#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15) 1427#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
1381#define GT_BSD_USER_INTERRUPT (1 << 12) 1428#define GT_BSD_USER_INTERRUPT (1 << 12)
1382#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */ 1429#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
1430#define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8)
1383#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */ 1431#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
1384#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4) 1432#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
1385#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3) 1433#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
@@ -1519,6 +1567,7 @@ enum punit_power_well {
1519/* Framebuffer compression for Ironlake */ 1567/* Framebuffer compression for Ironlake */
1520#define ILK_DPFC_CB_BASE 0x43200 1568#define ILK_DPFC_CB_BASE 0x43200
1521#define ILK_DPFC_CONTROL 0x43208 1569#define ILK_DPFC_CONTROL 0x43208
1570#define FBC_CTL_FALSE_COLOR (1<<10)
1522/* The bit 28-8 is reserved */ 1571/* The bit 28-8 is reserved */
1523#define DPFC_RESERVED (0x1FFFFF00) 1572#define DPFC_RESERVED (0x1FFFFF00)
1524#define ILK_DPFC_RECOMP_CTL 0x4320c 1573#define ILK_DPFC_RECOMP_CTL 0x4320c
@@ -1675,12 +1724,9 @@ enum punit_power_well {
1675#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240) 1724#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
1676#define DPLL_PORTD_READY_MASK (0xf) 1725#define DPLL_PORTD_READY_MASK (0xf)
1677#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100) 1726#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
1678#define PHY_COM_LANE_RESET_DEASSERT(phy, val) \ 1727#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
1679 ((phy == DPIO_PHY0) ? (val | 1) : (val | 2))
1680#define PHY_COM_LANE_RESET_ASSERT(phy, val) \
1681 ((phy == DPIO_PHY0) ? (val & ~1) : (val & ~2))
1682#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104) 1728#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
1683#define PHY_POWERGOOD(phy) ((phy == DPIO_PHY0) ? (1<<31) : (1<<30)) 1729#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
1684 1730
1685/* 1731/*
1686 * The i830 generation, in LVDS mode, defines P1 as the bit number set within 1732 * The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -2397,6 +2443,7 @@ enum punit_power_well {
2397#define _PIPEASRC 0x6001c 2443#define _PIPEASRC 0x6001c
2398#define _BCLRPAT_A 0x60020 2444#define _BCLRPAT_A 0x60020
2399#define _VSYNCSHIFT_A 0x60028 2445#define _VSYNCSHIFT_A 0x60028
2446#define _PIPE_MULT_A 0x6002c
2400 2447
2401/* Pipe B timing regs */ 2448/* Pipe B timing regs */
2402#define _HTOTAL_B 0x61000 2449#define _HTOTAL_B 0x61000
@@ -2408,6 +2455,7 @@ enum punit_power_well {
2408#define _PIPEBSRC 0x6101c 2455#define _PIPEBSRC 0x6101c
2409#define _BCLRPAT_B 0x61020 2456#define _BCLRPAT_B 0x61020
2410#define _VSYNCSHIFT_B 0x61028 2457#define _VSYNCSHIFT_B 0x61028
2458#define _PIPE_MULT_B 0x6102c
2411 2459
2412#define TRANSCODER_A_OFFSET 0x60000 2460#define TRANSCODER_A_OFFSET 0x60000
2413#define TRANSCODER_B_OFFSET 0x61000 2461#define TRANSCODER_B_OFFSET 0x61000
@@ -2428,6 +2476,7 @@ enum punit_power_well {
2428#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A) 2476#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
2429#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A) 2477#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
2430#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC) 2478#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
2479#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
2431 2480
2432/* HSW+ eDP PSR registers */ 2481/* HSW+ eDP PSR registers */
2433#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) 2482#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
@@ -3476,6 +3525,8 @@ enum punit_power_well {
3476#define DP_LINK_TRAIN_OFF (3 << 28) 3525#define DP_LINK_TRAIN_OFF (3 << 28)
3477#define DP_LINK_TRAIN_MASK (3 << 28) 3526#define DP_LINK_TRAIN_MASK (3 << 28)
3478#define DP_LINK_TRAIN_SHIFT 28 3527#define DP_LINK_TRAIN_SHIFT 28
3528#define DP_LINK_TRAIN_PAT_3_CHV (1 << 14)
3529#define DP_LINK_TRAIN_MASK_CHV ((3 << 28)|(1<<14))
3479 3530
3480/* CPT Link training mode */ 3531/* CPT Link training mode */
3481#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8) 3532#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
@@ -3732,7 +3783,6 @@ enum punit_power_well {
3732#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) 3783#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
3733#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) 3784#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
3734#define PIPE_DPST_EVENT_STATUS (1UL<<7) 3785#define PIPE_DPST_EVENT_STATUS (1UL<<7)
3735#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
3736#define PIPE_A_PSR_STATUS_VLV (1UL<<6) 3786#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
3737#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) 3787#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
3738#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) 3788#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
@@ -3842,73 +3892,151 @@ enum punit_power_well {
3842#define DSPARB_BEND_SHIFT 9 /* on 855 */ 3892#define DSPARB_BEND_SHIFT 9 /* on 855 */
3843#define DSPARB_AEND_SHIFT 0 3893#define DSPARB_AEND_SHIFT 0
3844 3894
3895/* pnv/gen4/g4x/vlv/chv */
3845#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) 3896#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
3846#define DSPFW_SR_SHIFT 23 3897#define DSPFW_SR_SHIFT 23
3847#define DSPFW_SR_MASK (0x1ff<<23) 3898#define DSPFW_SR_MASK (0x1ff<<23)
3848#define DSPFW_CURSORB_SHIFT 16 3899#define DSPFW_CURSORB_SHIFT 16
3849#define DSPFW_CURSORB_MASK (0x3f<<16) 3900#define DSPFW_CURSORB_MASK (0x3f<<16)
3850#define DSPFW_PLANEB_SHIFT 8 3901#define DSPFW_PLANEB_SHIFT 8
3851#define DSPFW_PLANEB_MASK (0x7f<<8) 3902#define DSPFW_PLANEB_MASK (0x7f<<8)
3852#define DSPFW_PLANEA_MASK (0x7f) 3903#define DSPFW_PLANEB_MASK_VLV (0xff<<8) /* vlv/chv */
3904#define DSPFW_PLANEA_SHIFT 0
3905#define DSPFW_PLANEA_MASK (0x7f<<0)
3906#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */
3853#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038) 3907#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038)
3854#define DSPFW_CURSORA_MASK 0x00003f00 3908#define DSPFW_FBC_SR_EN (1<<31) /* g4x */
3855#define DSPFW_CURSORA_SHIFT 8 3909#define DSPFW_FBC_SR_SHIFT 28
3856#define DSPFW_PLANEC_MASK (0x7f) 3910#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */
3911#define DSPFW_FBC_HPLL_SR_SHIFT 24
3912#define DSPFW_FBC_HPLL_SR_MASK (0xf<<24) /* g4x */
3913#define DSPFW_SPRITEB_SHIFT (16)
3914#define DSPFW_SPRITEB_MASK (0x7f<<16) /* g4x */
3915#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */
3916#define DSPFW_CURSORA_SHIFT 8
3917#define DSPFW_CURSORA_MASK (0x3f<<8)
3918#define DSPFW_PLANEC_SHIFT_OLD 0
3919#define DSPFW_PLANEC_MASK_OLD (0x7f<<0) /* pre-gen4 sprite C */
3920#define DSPFW_SPRITEA_SHIFT 0
3921#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
3922#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
3857#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c) 3923#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c)
3858#define DSPFW_HPLL_SR_EN (1<<31) 3924#define DSPFW_HPLL_SR_EN (1<<31)
3859#define DSPFW_CURSOR_SR_SHIFT 24
3860#define PINEVIEW_SELF_REFRESH_EN (1<<30) 3925#define PINEVIEW_SELF_REFRESH_EN (1<<30)
3926#define DSPFW_CURSOR_SR_SHIFT 24
3861#define DSPFW_CURSOR_SR_MASK (0x3f<<24) 3927#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
3862#define DSPFW_HPLL_CURSOR_SHIFT 16 3928#define DSPFW_HPLL_CURSOR_SHIFT 16
3863#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) 3929#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
3864#define DSPFW_HPLL_SR_MASK (0x1ff) 3930#define DSPFW_HPLL_SR_SHIFT 0
3865#define DSPFW4 (dev_priv->info.display_mmio_offset + 0x70070) 3931#define DSPFW_HPLL_SR_MASK (0x1ff<<0)
3866#define DSPFW7 (dev_priv->info.display_mmio_offset + 0x7007c) 3932
3933/* vlv/chv */
3934#define DSPFW4 (VLV_DISPLAY_BASE + 0x70070)
3935#define DSPFW_SPRITEB_WM1_SHIFT 16
3936#define DSPFW_SPRITEB_WM1_MASK (0xff<<16)
3937#define DSPFW_CURSORA_WM1_SHIFT 8
3938#define DSPFW_CURSORA_WM1_MASK (0x3f<<8)
3939#define DSPFW_SPRITEA_WM1_SHIFT 0
3940#define DSPFW_SPRITEA_WM1_MASK (0xff<<0)
3941#define DSPFW5 (VLV_DISPLAY_BASE + 0x70074)
3942#define DSPFW_PLANEB_WM1_SHIFT 24
3943#define DSPFW_PLANEB_WM1_MASK (0xff<<24)
3944#define DSPFW_PLANEA_WM1_SHIFT 16
3945#define DSPFW_PLANEA_WM1_MASK (0xff<<16)
3946#define DSPFW_CURSORB_WM1_SHIFT 8
3947#define DSPFW_CURSORB_WM1_MASK (0x3f<<8)
3948#define DSPFW_CURSOR_SR_WM1_SHIFT 0
3949#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0)
3950#define DSPFW6 (VLV_DISPLAY_BASE + 0x70078)
3951#define DSPFW_SR_WM1_SHIFT 0
3952#define DSPFW_SR_WM1_MASK (0x1ff<<0)
3953#define DSPFW7 (VLV_DISPLAY_BASE + 0x7007c)
3954#define DSPFW7_CHV (VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
3955#define DSPFW_SPRITED_WM1_SHIFT 24
3956#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
3957#define DSPFW_SPRITED_SHIFT 16
3958#define DSPFW_SPRITED_MASK (0xff<<16)
3959#define DSPFW_SPRITEC_WM1_SHIFT 8
3960#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
3961#define DSPFW_SPRITEC_SHIFT 0
3962#define DSPFW_SPRITEC_MASK (0xff<<0)
3963#define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8)
3964#define DSPFW_SPRITEF_WM1_SHIFT 24
3965#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
3966#define DSPFW_SPRITEF_SHIFT 16
3967#define DSPFW_SPRITEF_MASK (0xff<<16)
3968#define DSPFW_SPRITEE_WM1_SHIFT 8
3969#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
3970#define DSPFW_SPRITEE_SHIFT 0
3971#define DSPFW_SPRITEE_MASK (0xff<<0)
3972#define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
3973#define DSPFW_PLANEC_WM1_SHIFT 24
3974#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
3975#define DSPFW_PLANEC_SHIFT 16
3976#define DSPFW_PLANEC_MASK (0xff<<16)
3977#define DSPFW_CURSORC_WM1_SHIFT 8
3978#define DSPFW_CURSORC_WM1_MASK (0x3f<<16)
3979#define DSPFW_CURSORC_SHIFT 0
3980#define DSPFW_CURSORC_MASK (0x3f<<0)
3981
3982/* vlv/chv high order bits */
3983#define DSPHOWM (VLV_DISPLAY_BASE + 0x70064)
3984#define DSPFW_SR_HI_SHIFT 24
3985#define DSPFW_SR_HI_MASK (1<<24)
3986#define DSPFW_SPRITEF_HI_SHIFT 23
3987#define DSPFW_SPRITEF_HI_MASK (1<<23)
3988#define DSPFW_SPRITEE_HI_SHIFT 22
3989#define DSPFW_SPRITEE_HI_MASK (1<<22)
3990#define DSPFW_PLANEC_HI_SHIFT 21
3991#define DSPFW_PLANEC_HI_MASK (1<<21)
3992#define DSPFW_SPRITED_HI_SHIFT 20
3993#define DSPFW_SPRITED_HI_MASK (1<<20)
3994#define DSPFW_SPRITEC_HI_SHIFT 16
3995#define DSPFW_SPRITEC_HI_MASK (1<<16)
3996#define DSPFW_PLANEB_HI_SHIFT 12
3997#define DSPFW_PLANEB_HI_MASK (1<<12)
3998#define DSPFW_SPRITEB_HI_SHIFT 8
3999#define DSPFW_SPRITEB_HI_MASK (1<<8)
4000#define DSPFW_SPRITEA_HI_SHIFT 4
4001#define DSPFW_SPRITEA_HI_MASK (1<<4)
4002#define DSPFW_PLANEA_HI_SHIFT 0
4003#define DSPFW_PLANEA_HI_MASK (1<<0)
4004#define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068)
4005#define DSPFW_SR_WM1_HI_SHIFT 24
4006#define DSPFW_SR_WM1_HI_MASK (1<<24)
4007#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
4008#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23)
4009#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
4010#define DSPFW_SPRITEE_WM1_HI_MASK (1<<22)
4011#define DSPFW_PLANEC_WM1_HI_SHIFT 21
4012#define DSPFW_PLANEC_WM1_HI_MASK (1<<21)
4013#define DSPFW_SPRITED_WM1_HI_SHIFT 20
4014#define DSPFW_SPRITED_WM1_HI_MASK (1<<20)
4015#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
4016#define DSPFW_SPRITEC_WM1_HI_MASK (1<<16)
4017#define DSPFW_PLANEB_WM1_HI_SHIFT 12
4018#define DSPFW_PLANEB_WM1_HI_MASK (1<<12)
4019#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
4020#define DSPFW_SPRITEB_WM1_HI_MASK (1<<8)
4021#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
4022#define DSPFW_SPRITEA_WM1_HI_MASK (1<<4)
4023#define DSPFW_PLANEA_WM1_HI_SHIFT 0
4024#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
3867 4025
3868/* drain latency register values*/ 4026/* drain latency register values*/
3869#define DRAIN_LATENCY_PRECISION_32 32 4027#define DRAIN_LATENCY_PRECISION_32 32
3870#define DRAIN_LATENCY_PRECISION_64 64 4028#define DRAIN_LATENCY_PRECISION_64 64
3871#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050) 4029#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
3872#define DDL_CURSORA_PRECISION_64 (1<<31) 4030#define DDL_CURSOR_PRECISION_64 (1<<31)
3873#define DDL_CURSORA_PRECISION_32 (0<<31) 4031#define DDL_CURSOR_PRECISION_32 (0<<31)
3874#define DDL_CURSORA_SHIFT 24 4032#define DDL_CURSOR_SHIFT 24
3875#define DDL_SPRITEB_PRECISION_64 (1<<23) 4033#define DDL_SPRITE_PRECISION_64(sprite) (1<<(15+8*(sprite)))
3876#define DDL_SPRITEB_PRECISION_32 (0<<23) 4034#define DDL_SPRITE_PRECISION_32(sprite) (0<<(15+8*(sprite)))
3877#define DDL_SPRITEB_SHIFT 16 4035#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
3878#define DDL_SPRITEA_PRECISION_64 (1<<15) 4036#define DDL_PLANE_PRECISION_64 (1<<7)
3879#define DDL_SPRITEA_PRECISION_32 (0<<15) 4037#define DDL_PLANE_PRECISION_32 (0<<7)
3880#define DDL_SPRITEA_SHIFT 8 4038#define DDL_PLANE_SHIFT 0
3881#define DDL_PLANEA_PRECISION_64 (1<<7) 4039#define DRAIN_LATENCY_MASK 0x7f
3882#define DDL_PLANEA_PRECISION_32 (0<<7)
3883#define DDL_PLANEA_SHIFT 0
3884
3885#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
3886#define DDL_CURSORB_PRECISION_64 (1<<31)
3887#define DDL_CURSORB_PRECISION_32 (0<<31)
3888#define DDL_CURSORB_SHIFT 24
3889#define DDL_SPRITED_PRECISION_64 (1<<23)
3890#define DDL_SPRITED_PRECISION_32 (0<<23)
3891#define DDL_SPRITED_SHIFT 16
3892#define DDL_SPRITEC_PRECISION_64 (1<<15)
3893#define DDL_SPRITEC_PRECISION_32 (0<<15)
3894#define DDL_SPRITEC_SHIFT 8
3895#define DDL_PLANEB_PRECISION_64 (1<<7)
3896#define DDL_PLANEB_PRECISION_32 (0<<7)
3897#define DDL_PLANEB_SHIFT 0
3898
3899#define VLV_DDL3 (VLV_DISPLAY_BASE + 0x70058)
3900#define DDL_CURSORC_PRECISION_64 (1<<31)
3901#define DDL_CURSORC_PRECISION_32 (0<<31)
3902#define DDL_CURSORC_SHIFT 24
3903#define DDL_SPRITEF_PRECISION_64 (1<<23)
3904#define DDL_SPRITEF_PRECISION_32 (0<<23)
3905#define DDL_SPRITEF_SHIFT 16
3906#define DDL_SPRITEE_PRECISION_64 (1<<15)
3907#define DDL_SPRITEE_PRECISION_32 (0<<15)
3908#define DDL_SPRITEE_SHIFT 8
3909#define DDL_PLANEC_PRECISION_64 (1<<7)
3910#define DDL_PLANEC_PRECISION_32 (0<<7)
3911#define DDL_PLANEC_SHIFT 0
3912 4040
3913/* FIFO watermark sizes etc */ 4041/* FIFO watermark sizes etc */
3914#define G4X_FIFO_LINE_SIZE 64 4042#define G4X_FIFO_LINE_SIZE 64
@@ -4026,7 +4154,8 @@ enum punit_power_well {
4026/* Old style CUR*CNTR flags (desktop 8xx) */ 4154/* Old style CUR*CNTR flags (desktop 8xx) */
4027#define CURSOR_ENABLE 0x80000000 4155#define CURSOR_ENABLE 0x80000000
4028#define CURSOR_GAMMA_ENABLE 0x40000000 4156#define CURSOR_GAMMA_ENABLE 0x40000000
4029#define CURSOR_STRIDE_MASK 0x30000000 4157#define CURSOR_STRIDE_SHIFT 28
4158#define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
4030#define CURSOR_PIPE_CSC_ENABLE (1<<24) 4159#define CURSOR_PIPE_CSC_ENABLE (1<<24)
4031#define CURSOR_FORMAT_SHIFT 24 4160#define CURSOR_FORMAT_SHIFT 24
4032#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) 4161#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
@@ -4111,6 +4240,7 @@ enum punit_power_well {
4111#define DISPPLANE_NO_LINE_DOUBLE 0 4240#define DISPPLANE_NO_LINE_DOUBLE 0
4112#define DISPPLANE_STEREO_POLARITY_FIRST 0 4241#define DISPPLANE_STEREO_POLARITY_FIRST 0
4113#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 4242#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
4243#define DISPPLANE_ROTATE_180 (1<<15)
4114#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ 4244#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
4115#define DISPPLANE_TILED (1<<10) 4245#define DISPPLANE_TILED (1<<10)
4116#define _DSPAADDR 0x70184 4246#define _DSPAADDR 0x70184
@@ -4195,6 +4325,7 @@ enum punit_power_well {
4195#define DVS_YUV_ORDER_UYVY (1<<16) 4325#define DVS_YUV_ORDER_UYVY (1<<16)
4196#define DVS_YUV_ORDER_YVYU (2<<16) 4326#define DVS_YUV_ORDER_YVYU (2<<16)
4197#define DVS_YUV_ORDER_VYUY (3<<16) 4327#define DVS_YUV_ORDER_VYUY (3<<16)
4328#define DVS_ROTATE_180 (1<<15)
4198#define DVS_DEST_KEY (1<<2) 4329#define DVS_DEST_KEY (1<<2)
4199#define DVS_TRICKLE_FEED_DISABLE (1<<14) 4330#define DVS_TRICKLE_FEED_DISABLE (1<<14)
4200#define DVS_TILED (1<<10) 4331#define DVS_TILED (1<<10)
@@ -4265,6 +4396,7 @@ enum punit_power_well {
4265#define SPRITE_YUV_ORDER_UYVY (1<<16) 4396#define SPRITE_YUV_ORDER_UYVY (1<<16)
4266#define SPRITE_YUV_ORDER_YVYU (2<<16) 4397#define SPRITE_YUV_ORDER_YVYU (2<<16)
4267#define SPRITE_YUV_ORDER_VYUY (3<<16) 4398#define SPRITE_YUV_ORDER_VYUY (3<<16)
4399#define SPRITE_ROTATE_180 (1<<15)
4268#define SPRITE_TRICKLE_FEED_DISABLE (1<<14) 4400#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
4269#define SPRITE_INT_GAMMA_ENABLE (1<<13) 4401#define SPRITE_INT_GAMMA_ENABLE (1<<13)
4270#define SPRITE_TILED (1<<10) 4402#define SPRITE_TILED (1<<10)
@@ -4338,6 +4470,7 @@ enum punit_power_well {
4338#define SP_YUV_ORDER_UYVY (1<<16) 4470#define SP_YUV_ORDER_UYVY (1<<16)
4339#define SP_YUV_ORDER_YVYU (2<<16) 4471#define SP_YUV_ORDER_YVYU (2<<16)
4340#define SP_YUV_ORDER_VYUY (3<<16) 4472#define SP_YUV_ORDER_VYUY (3<<16)
4473#define SP_ROTATE_180 (1<<15)
4341#define SP_TILED (1<<10) 4474#define SP_TILED (1<<10)
4342#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184) 4475#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
4343#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188) 4476#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
@@ -5246,8 +5379,7 @@ enum punit_power_well {
5246#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) 5379#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
5247#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) 5380#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
5248#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) 5381#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
5249#define PANEL_PORT_SELECT_DPB_VLV (1 << 30) 5382#define PANEL_PORT_SELECT_VLV(port) ((port) << 30)
5250#define PANEL_PORT_SELECT_DPC_VLV (2 << 30)
5251#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) 5383#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
5252#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) 5384#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
5253 5385
@@ -5407,7 +5539,6 @@ enum punit_power_well {
5407#define VLV_GTLC_ALLOWWAKEERR (1 << 1) 5539#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
5408#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) 5540#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
5409#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) 5541#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
5410#define VLV_GTLC_SURVIVABILITY_REG 0x130098
5411#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 5542#define FORCEWAKE_MT 0xa188 /* multi-threaded */
5412#define FORCEWAKE_KERNEL 0x1 5543#define FORCEWAKE_KERNEL 0x1
5413#define FORCEWAKE_USER 0x2 5544#define FORCEWAKE_USER 0x2
@@ -5545,12 +5676,6 @@ enum punit_power_well {
5545 GEN6_PM_RP_DOWN_THRESHOLD | \ 5676 GEN6_PM_RP_DOWN_THRESHOLD | \
5546 GEN6_PM_RP_DOWN_TIMEOUT) 5677 GEN6_PM_RP_DOWN_TIMEOUT)
5547 5678
5548#define CHV_CZ_CLOCK_FREQ_MODE_200 200
5549#define CHV_CZ_CLOCK_FREQ_MODE_267 267
5550#define CHV_CZ_CLOCK_FREQ_MODE_320 320
5551#define CHV_CZ_CLOCK_FREQ_MODE_333 333
5552#define CHV_CZ_CLOCK_FREQ_MODE_400 400
5553
5554#define GEN7_GT_SCRATCH_BASE 0x4F100 5679#define GEN7_GT_SCRATCH_BASE 0x4F100
5555#define GEN7_GT_SCRATCH_REG_NUM 8 5680#define GEN7_GT_SCRATCH_REG_NUM 8
5556 5681
@@ -5866,15 +5991,7 @@ enum punit_power_well {
5866#define DDI_BUF_CTL_B 0x64100 5991#define DDI_BUF_CTL_B 0x64100
5867#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) 5992#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
5868#define DDI_BUF_CTL_ENABLE (1<<31) 5993#define DDI_BUF_CTL_ENABLE (1<<31)
5869#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ 5994#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
5870#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
5871#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
5872#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
5873#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
5874#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
5875#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
5876#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
5877#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
5878#define DDI_BUF_EMP_MASK (0xf<<24) 5995#define DDI_BUF_EMP_MASK (0xf<<24)
5879#define DDI_BUF_PORT_REVERSAL (1<<16) 5996#define DDI_BUF_PORT_REVERSAL (1<<16)
5880#define DDI_BUF_IS_IDLE (1<<7) 5997#define DDI_BUF_IS_IDLE (1<<7)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index ae7fd8fc27f0..503847f18fdd 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -540,7 +540,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
540 540
541 memset(&error_priv, 0, sizeof(error_priv)); 541 memset(&error_priv, 0, sizeof(error_priv));
542 542
543 ret = i915_error_state_buf_init(&error_str, count, off); 543 ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
544 if (ret) 544 if (ret)
545 return ret; 545 return ret;
546 546
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index afcc8dd40bdd..a4bd90f36a03 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -627,16 +627,16 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
627 627
628 switch (edp_link_params->preemphasis) { 628 switch (edp_link_params->preemphasis) {
629 case EDP_PREEMPHASIS_NONE: 629 case EDP_PREEMPHASIS_NONE:
630 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; 630 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
631 break; 631 break;
632 case EDP_PREEMPHASIS_3_5dB: 632 case EDP_PREEMPHASIS_3_5dB:
633 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; 633 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
634 break; 634 break;
635 case EDP_PREEMPHASIS_6dB: 635 case EDP_PREEMPHASIS_6dB:
636 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; 636 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
637 break; 637 break;
638 case EDP_PREEMPHASIS_9_5dB: 638 case EDP_PREEMPHASIS_9_5dB:
639 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; 639 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
640 break; 640 break;
641 default: 641 default:
642 DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n", 642 DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
@@ -646,16 +646,16 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
646 646
647 switch (edp_link_params->vswing) { 647 switch (edp_link_params->vswing) {
648 case EDP_VSWING_0_4V: 648 case EDP_VSWING_0_4V:
649 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; 649 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
650 break; 650 break;
651 case EDP_VSWING_0_6V: 651 case EDP_VSWING_0_6V:
652 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; 652 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
653 break; 653 break;
654 case EDP_VSWING_0_8V: 654 case EDP_VSWING_0_8V:
655 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; 655 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
656 break; 656 break;
657 case EDP_VSWING_1_2V: 657 case EDP_VSWING_1_2V:
658 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; 658 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
659 break; 659 break;
660 default: 660 default:
661 DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n", 661 DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
@@ -976,12 +976,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
976 if (bdb->version >= 158) { 976 if (bdb->version >= 158) {
977 /* The VBT HDMI level shift values match the table we have. */ 977 /* The VBT HDMI level shift values match the table we have. */
978 hdmi_level_shift = child->raw[7] & 0xF; 978 hdmi_level_shift = child->raw[7] & 0xF;
979 if (hdmi_level_shift < 0xC) { 979 DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
980 DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n", 980 port_name(port),
981 port_name(port), 981 hdmi_level_shift);
982 hdmi_level_shift); 982 info->hdmi_level_shift = hdmi_level_shift;
983 info->hdmi_level_shift = hdmi_level_shift;
984 }
985 } 983 }
986} 984}
987 985
@@ -1114,8 +1112,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
1114 struct ddi_vbt_port_info *info = 1112 struct ddi_vbt_port_info *info =
1115 &dev_priv->vbt.ddi_port_info[port]; 1113 &dev_priv->vbt.ddi_port_info[port];
1116 1114
1117 /* Recommended BSpec default: 800mV 0dB. */ 1115 info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
1118 info->hdmi_level_shift = 6;
1119 1116
1120 info->supports_dvi = (port != PORT_A && port != PORT_E); 1117 info->supports_dvi = (port != PORT_A && port != PORT_E);
1121 info->supports_hdmi = info->supports_dvi; 1118 info->supports_hdmi = info->supports_dvi;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index b98667796337..905999bee2ac 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -802,7 +802,8 @@ struct mipi_config {
802 802
803 u16 rsvd4; 803 u16 rsvd4;
804 804
805 u8 rsvd5[5]; 805 u8 rsvd5;
806 u32 target_burst_mode_freq;
806 u32 dsi_ddr_clk; 807 u32 dsi_ddr_clk;
807 u32 bridge_ref_clk; 808 u32 bridge_ref_clk;
808 809
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 5db0b5552e39..b63d4fa204a3 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -28,87 +28,103 @@
28#include "i915_drv.h" 28#include "i915_drv.h"
29#include "intel_drv.h" 29#include "intel_drv.h"
30 30
31struct ddi_buf_trans {
32 u32 trans1; /* balance leg enable, de-emph level */
33 u32 trans2; /* vref sel, vswing */
34};
35
31/* HDMI/DVI modes ignore everything but the last 2 items. So we share 36/* HDMI/DVI modes ignore everything but the last 2 items. So we share
32 * them for both DP and FDI transports, allowing those ports to 37 * them for both DP and FDI transports, allowing those ports to
33 * automatically adapt to HDMI connections as well 38 * automatically adapt to HDMI connections as well
34 */ 39 */
35static const u32 hsw_ddi_translations_dp[] = { 40static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
36 0x00FFFFFF, 0x0006000E, /* DP parameters */ 41 { 0x00FFFFFF, 0x0006000E },
37 0x00D75FFF, 0x0005000A, 42 { 0x00D75FFF, 0x0005000A },
38 0x00C30FFF, 0x00040006, 43 { 0x00C30FFF, 0x00040006 },
39 0x80AAAFFF, 0x000B0000, 44 { 0x80AAAFFF, 0x000B0000 },
40 0x00FFFFFF, 0x0005000A, 45 { 0x00FFFFFF, 0x0005000A },
41 0x00D75FFF, 0x000C0004, 46 { 0x00D75FFF, 0x000C0004 },
42 0x80C30FFF, 0x000B0000, 47 { 0x80C30FFF, 0x000B0000 },
43 0x00FFFFFF, 0x00040006, 48 { 0x00FFFFFF, 0x00040006 },
44 0x80D75FFF, 0x000B0000, 49 { 0x80D75FFF, 0x000B0000 },
45}; 50};
46 51
47static const u32 hsw_ddi_translations_fdi[] = { 52static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
48 0x00FFFFFF, 0x0007000E, /* FDI parameters */ 53 { 0x00FFFFFF, 0x0007000E },
49 0x00D75FFF, 0x000F000A, 54 { 0x00D75FFF, 0x000F000A },
50 0x00C30FFF, 0x00060006, 55 { 0x00C30FFF, 0x00060006 },
51 0x00AAAFFF, 0x001E0000, 56 { 0x00AAAFFF, 0x001E0000 },
52 0x00FFFFFF, 0x000F000A, 57 { 0x00FFFFFF, 0x000F000A },
53 0x00D75FFF, 0x00160004, 58 { 0x00D75FFF, 0x00160004 },
54 0x00C30FFF, 0x001E0000, 59 { 0x00C30FFF, 0x001E0000 },
55 0x00FFFFFF, 0x00060006, 60 { 0x00FFFFFF, 0x00060006 },
56 0x00D75FFF, 0x001E0000, 61 { 0x00D75FFF, 0x001E0000 },
57}; 62};
58 63
59static const u32 hsw_ddi_translations_hdmi[] = { 64static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
60 /* Idx NT mV diff T mV diff db */ 65 /* Idx NT mV d T mV d db */
61 0x00FFFFFF, 0x0006000E, /* 0: 400 400 0 */ 66 { 0x00FFFFFF, 0x0006000E }, /* 0: 400 400 0 */
62 0x00E79FFF, 0x000E000C, /* 1: 400 500 2 */ 67 { 0x00E79FFF, 0x000E000C }, /* 1: 400 500 2 */
63 0x00D75FFF, 0x0005000A, /* 2: 400 600 3.5 */ 68 { 0x00D75FFF, 0x0005000A }, /* 2: 400 600 3.5 */
64 0x00FFFFFF, 0x0005000A, /* 3: 600 600 0 */ 69 { 0x00FFFFFF, 0x0005000A }, /* 3: 600 600 0 */
65 0x00E79FFF, 0x001D0007, /* 4: 600 750 2 */ 70 { 0x00E79FFF, 0x001D0007 }, /* 4: 600 750 2 */
66 0x00D75FFF, 0x000C0004, /* 5: 600 900 3.5 */ 71 { 0x00D75FFF, 0x000C0004 }, /* 5: 600 900 3.5 */
67 0x00FFFFFF, 0x00040006, /* 6: 800 800 0 */ 72 { 0x00FFFFFF, 0x00040006 }, /* 6: 800 800 0 */
68 0x80E79FFF, 0x00030002, /* 7: 800 1000 2 */ 73 { 0x80E79FFF, 0x00030002 }, /* 7: 800 1000 2 */
69 0x00FFFFFF, 0x00140005, /* 8: 850 850 0 */ 74 { 0x00FFFFFF, 0x00140005 }, /* 8: 850 850 0 */
70 0x00FFFFFF, 0x000C0004, /* 9: 900 900 0 */ 75 { 0x00FFFFFF, 0x000C0004 }, /* 9: 900 900 0 */
71 0x00FFFFFF, 0x001C0003, /* 10: 950 950 0 */ 76 { 0x00FFFFFF, 0x001C0003 }, /* 10: 950 950 0 */
72 0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */ 77 { 0x80FFFFFF, 0x00030002 }, /* 11: 1000 1000 0 */
73}; 78};
74 79
75static const u32 bdw_ddi_translations_edp[] = { 80static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
76 0x00FFFFFF, 0x00000012, /* eDP parameters */ 81 { 0x00FFFFFF, 0x00000012 },
77 0x00EBAFFF, 0x00020011, 82 { 0x00EBAFFF, 0x00020011 },
78 0x00C71FFF, 0x0006000F, 83 { 0x00C71FFF, 0x0006000F },
79 0x00AAAFFF, 0x000E000A, 84 { 0x00AAAFFF, 0x000E000A },
80 0x00FFFFFF, 0x00020011, 85 { 0x00FFFFFF, 0x00020011 },
81 0x00DB6FFF, 0x0005000F, 86 { 0x00DB6FFF, 0x0005000F },
82 0x00BEEFFF, 0x000A000C, 87 { 0x00BEEFFF, 0x000A000C },
83 0x00FFFFFF, 0x0005000F, 88 { 0x00FFFFFF, 0x0005000F },
84 0x00DB6FFF, 0x000A000C, 89 { 0x00DB6FFF, 0x000A000C },
85 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
86}; 90};
87 91
88static const u32 bdw_ddi_translations_dp[] = { 92static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
89 0x00FFFFFF, 0x0007000E, /* DP parameters */ 93 { 0x00FFFFFF, 0x0007000E },
90 0x00D75FFF, 0x000E000A, 94 { 0x00D75FFF, 0x000E000A },
91 0x00BEFFFF, 0x00140006, 95 { 0x00BEFFFF, 0x00140006 },
92 0x80B2CFFF, 0x001B0002, 96 { 0x80B2CFFF, 0x001B0002 },
93 0x00FFFFFF, 0x000E000A, 97 { 0x00FFFFFF, 0x000E000A },
94 0x00D75FFF, 0x00180004, 98 { 0x00D75FFF, 0x00180004 },
95 0x80CB2FFF, 0x001B0002, 99 { 0x80CB2FFF, 0x001B0002 },
96 0x00F7DFFF, 0x00180004, 100 { 0x00F7DFFF, 0x00180004 },
97 0x80D75FFF, 0x001B0002, 101 { 0x80D75FFF, 0x001B0002 },
98 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
99}; 102};
100 103
101static const u32 bdw_ddi_translations_fdi[] = { 104static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
102 0x00FFFFFF, 0x0001000E, /* FDI parameters */ 105 { 0x00FFFFFF, 0x0001000E },
103 0x00D75FFF, 0x0004000A, 106 { 0x00D75FFF, 0x0004000A },
104 0x00C30FFF, 0x00070006, 107 { 0x00C30FFF, 0x00070006 },
105 0x00AAAFFF, 0x000C0000, 108 { 0x00AAAFFF, 0x000C0000 },
106 0x00FFFFFF, 0x0004000A, 109 { 0x00FFFFFF, 0x0004000A },
107 0x00D75FFF, 0x00090004, 110 { 0x00D75FFF, 0x00090004 },
108 0x00C30FFF, 0x000C0000, 111 { 0x00C30FFF, 0x000C0000 },
109 0x00FFFFFF, 0x00070006, 112 { 0x00FFFFFF, 0x00070006 },
110 0x00D75FFF, 0x000C0000, 113 { 0x00D75FFF, 0x000C0000 },
111 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ 114};
115
116static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
117 /* Idx NT mV d T mV df db */
118 { 0x00FFFFFF, 0x0007000E }, /* 0: 400 400 0 */
119 { 0x00D75FFF, 0x000E000A }, /* 1: 400 600 3.5 */
120 { 0x00BEFFFF, 0x00140006 }, /* 2: 400 800 6 */
121 { 0x00FFFFFF, 0x0009000D }, /* 3: 450 450 0 */
122 { 0x00FFFFFF, 0x000E000A }, /* 4: 600 600 0 */
123 { 0x00D7FFFF, 0x00140006 }, /* 5: 600 800 2.5 */
124 { 0x80CB2FFF, 0x001B0002 }, /* 6: 600 1000 4.5 */
125 { 0x00FFFFFF, 0x00140006 }, /* 7: 800 800 0 */
126 { 0x80E79FFF, 0x001B0002 }, /* 8: 800 1000 2 */
127 { 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */
112}; 128};
113 129
114enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) 130enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
@@ -145,26 +161,36 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
145{ 161{
146 struct drm_i915_private *dev_priv = dev->dev_private; 162 struct drm_i915_private *dev_priv = dev->dev_private;
147 u32 reg; 163 u32 reg;
148 int i; 164 int i, n_hdmi_entries, hdmi_800mV_0dB;
149 int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; 165 int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
150 const u32 *ddi_translations_fdi; 166 const struct ddi_buf_trans *ddi_translations_fdi;
151 const u32 *ddi_translations_dp; 167 const struct ddi_buf_trans *ddi_translations_dp;
152 const u32 *ddi_translations_edp; 168 const struct ddi_buf_trans *ddi_translations_edp;
153 const u32 *ddi_translations; 169 const struct ddi_buf_trans *ddi_translations_hdmi;
170 const struct ddi_buf_trans *ddi_translations;
154 171
155 if (IS_BROADWELL(dev)) { 172 if (IS_BROADWELL(dev)) {
156 ddi_translations_fdi = bdw_ddi_translations_fdi; 173 ddi_translations_fdi = bdw_ddi_translations_fdi;
157 ddi_translations_dp = bdw_ddi_translations_dp; 174 ddi_translations_dp = bdw_ddi_translations_dp;
158 ddi_translations_edp = bdw_ddi_translations_edp; 175 ddi_translations_edp = bdw_ddi_translations_edp;
176 ddi_translations_hdmi = bdw_ddi_translations_hdmi;
177 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
178 hdmi_800mV_0dB = 7;
159 } else if (IS_HASWELL(dev)) { 179 } else if (IS_HASWELL(dev)) {
160 ddi_translations_fdi = hsw_ddi_translations_fdi; 180 ddi_translations_fdi = hsw_ddi_translations_fdi;
161 ddi_translations_dp = hsw_ddi_translations_dp; 181 ddi_translations_dp = hsw_ddi_translations_dp;
162 ddi_translations_edp = hsw_ddi_translations_dp; 182 ddi_translations_edp = hsw_ddi_translations_dp;
183 ddi_translations_hdmi = hsw_ddi_translations_hdmi;
184 n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
185 hdmi_800mV_0dB = 6;
163 } else { 186 } else {
164 WARN(1, "ddi translation table missing\n"); 187 WARN(1, "ddi translation table missing\n");
165 ddi_translations_edp = bdw_ddi_translations_dp; 188 ddi_translations_edp = bdw_ddi_translations_dp;
166 ddi_translations_fdi = bdw_ddi_translations_fdi; 189 ddi_translations_fdi = bdw_ddi_translations_fdi;
167 ddi_translations_dp = bdw_ddi_translations_dp; 190 ddi_translations_dp = bdw_ddi_translations_dp;
191 ddi_translations_hdmi = bdw_ddi_translations_hdmi;
192 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
193 hdmi_800mV_0dB = 7;
168 } 194 }
169 195
170 switch (port) { 196 switch (port) {
@@ -190,14 +216,22 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
190 216
191 for (i = 0, reg = DDI_BUF_TRANS(port); 217 for (i = 0, reg = DDI_BUF_TRANS(port);
192 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { 218 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
193 I915_WRITE(reg, ddi_translations[i]); 219 I915_WRITE(reg, ddi_translations[i].trans1);
194 reg += 4; 220 reg += 4;
195 } 221 I915_WRITE(reg, ddi_translations[i].trans2);
196 /* Entry 9 is for HDMI: */
197 for (i = 0; i < 2; i++) {
198 I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
199 reg += 4; 222 reg += 4;
200 } 223 }
224
225 /* Choose a good default if VBT is badly populated */
226 if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
227 hdmi_level >= n_hdmi_entries)
228 hdmi_level = hdmi_800mV_0dB;
229
230 /* Entry 9 is for HDMI: */
231 I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1);
232 reg += 4;
233 I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans2);
234 reg += 4;
201} 235}
202 236
203/* Program DDI buffers translations for DP. By default, program ports A-D in DP 237/* Program DDI buffers translations for DP. By default, program ports A-D in DP
@@ -214,18 +248,6 @@ void intel_prepare_ddi(struct drm_device *dev)
214 intel_prepare_ddi_buffers(dev, port); 248 intel_prepare_ddi_buffers(dev, port);
215} 249}
216 250
217static const long hsw_ddi_buf_ctl_values[] = {
218 DDI_BUF_EMP_400MV_0DB_HSW,
219 DDI_BUF_EMP_400MV_3_5DB_HSW,
220 DDI_BUF_EMP_400MV_6DB_HSW,
221 DDI_BUF_EMP_400MV_9_5DB_HSW,
222 DDI_BUF_EMP_600MV_0DB_HSW,
223 DDI_BUF_EMP_600MV_3_5DB_HSW,
224 DDI_BUF_EMP_600MV_6DB_HSW,
225 DDI_BUF_EMP_800MV_0DB_HSW,
226 DDI_BUF_EMP_800MV_3_5DB_HSW
227};
228
229static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, 251static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
230 enum port port) 252 enum port port)
231{ 253{
@@ -285,7 +307,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
285 307
286 /* Start the training iterating through available voltages and emphasis, 308 /* Start the training iterating through available voltages and emphasis,
287 * testing each value twice. */ 309 * testing each value twice. */
288 for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) { 310 for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
289 /* Configure DP_TP_CTL with auto-training */ 311 /* Configure DP_TP_CTL with auto-training */
290 I915_WRITE(DP_TP_CTL(PORT_E), 312 I915_WRITE(DP_TP_CTL(PORT_E),
291 DP_TP_CTL_FDI_AUTOTRAIN | 313 DP_TP_CTL_FDI_AUTOTRAIN |
@@ -300,7 +322,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
300 I915_WRITE(DDI_BUF_CTL(PORT_E), 322 I915_WRITE(DDI_BUF_CTL(PORT_E),
301 DDI_BUF_CTL_ENABLE | 323 DDI_BUF_CTL_ENABLE |
302 ((intel_crtc->config.fdi_lanes - 1) << 1) | 324 ((intel_crtc->config.fdi_lanes - 1) << 1) |
303 hsw_ddi_buf_ctl_values[i / 2]); 325 DDI_BUF_TRANS_SELECT(i / 2));
304 POSTING_READ(DDI_BUF_CTL(PORT_E)); 326 POSTING_READ(DDI_BUF_CTL(PORT_E));
305 327
306 udelay(600); 328 udelay(600);
@@ -375,7 +397,7 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
375 enc_to_dig_port(&encoder->base); 397 enc_to_dig_port(&encoder->base);
376 398
377 intel_dp->DP = intel_dig_port->saved_port_bits | 399 intel_dp->DP = intel_dig_port->saved_port_bits |
378 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; 400 DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
379 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); 401 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
380 402
381} 403}
@@ -402,7 +424,7 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
402} 424}
403 425
404#define LC_FREQ 2700 426#define LC_FREQ 2700
405#define LC_FREQ_2K (LC_FREQ * 2000) 427#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
406 428
407#define P_MIN 2 429#define P_MIN 2
408#define P_MAX 64 430#define P_MAX 64
@@ -414,7 +436,11 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
414#define VCO_MIN 2400 436#define VCO_MIN 2400
415#define VCO_MAX 4800 437#define VCO_MAX 4800
416 438
417#define ABS_DIFF(a, b) ((a > b) ? (a - b) : (b - a)) 439#define abs_diff(a, b) ({ \
440 typeof(a) __a = (a); \
441 typeof(b) __b = (b); \
442 (void) (&__a == &__b); \
443 __a > __b ? (__a - __b) : (__b - __a); })
418 444
419struct wrpll_rnp { 445struct wrpll_rnp {
420 unsigned p, n2, r2; 446 unsigned p, n2, r2;
@@ -524,9 +550,9 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
524 */ 550 */
525 a = freq2k * budget * p * r2; 551 a = freq2k * budget * p * r2;
526 b = freq2k * budget * best->p * best->r2; 552 b = freq2k * budget * best->p * best->r2;
527 diff = ABS_DIFF((freq2k * p * r2), (LC_FREQ_2K * n2)); 553 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
528 diff_best = ABS_DIFF((freq2k * best->p * best->r2), 554 diff_best = abs_diff(freq2k * best->p * best->r2,
529 (LC_FREQ_2K * best->n2)); 555 LC_FREQ_2K * best->n2);
530 c = 1000000 * diff; 556 c = 1000000 * diff;
531 d = 1000000 * diff_best; 557 d = 1000000 * diff_best;
532 558
@@ -587,8 +613,8 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
587 return (refclk * n * 100) / (p * r); 613 return (refclk * n * 100) / (p * r);
588} 614}
589 615
590void intel_ddi_clock_get(struct intel_encoder *encoder, 616static void hsw_ddi_clock_get(struct intel_encoder *encoder,
591 struct intel_crtc_config *pipe_config) 617 struct intel_crtc_config *pipe_config)
592{ 618{
593 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 619 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
594 int link_clock = 0; 620 int link_clock = 0;
@@ -643,9 +669,15 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
643 pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock; 669 pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
644} 670}
645 671
672void intel_ddi_clock_get(struct intel_encoder *encoder,
673 struct intel_crtc_config *pipe_config)
674{
675 hsw_ddi_clock_get(encoder, pipe_config);
676}
677
646static void 678static void
647intel_ddi_calculate_wrpll(int clock /* in Hz */, 679hsw_ddi_calculate_wrpll(int clock /* in Hz */,
648 unsigned *r2_out, unsigned *n2_out, unsigned *p_out) 680 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
649{ 681{
650 uint64_t freq2k; 682 uint64_t freq2k;
651 unsigned p, n2, r2; 683 unsigned p, n2, r2;
@@ -708,27 +740,17 @@ intel_ddi_calculate_wrpll(int clock /* in Hz */,
708 *r2_out = best.r2; 740 *r2_out = best.r2;
709} 741}
710 742
711/* 743static bool
712 * Tries to find a PLL for the CRTC. If it finds, it increases the refcount and 744hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
713 * stores it in intel_crtc->ddi_pll_sel, so other mode sets won't be able to 745 struct intel_encoder *intel_encoder,
714 * steal the selected PLL. You need to call intel_ddi_pll_enable to actually 746 int clock)
715 * enable the PLL.
716 */
717bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
718{ 747{
719 struct drm_crtc *crtc = &intel_crtc->base; 748 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
720 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
721 int type = intel_encoder->type;
722 int clock = intel_crtc->config.port_clock;
723
724 intel_put_shared_dpll(intel_crtc);
725
726 if (type == INTEL_OUTPUT_HDMI) {
727 struct intel_shared_dpll *pll; 749 struct intel_shared_dpll *pll;
728 uint32_t val; 750 uint32_t val;
729 unsigned p, n2, r2; 751 unsigned p, n2, r2;
730 752
731 intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); 753 hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
732 754
733 val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL | 755 val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
734 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | 756 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
@@ -749,6 +771,25 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
749 return true; 771 return true;
750} 772}
751 773
774
775/*
776 * Tries to find a *shared* PLL for the CRTC and store it in
777 * intel_crtc->ddi_pll_sel.
778 *
779 * For private DPLLs, compute_config() should do the selection for us. This
780 * function should be folded into compute_config() eventually.
781 */
782bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
783{
784 struct drm_crtc *crtc = &intel_crtc->base;
785 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
786 int clock = intel_crtc->config.port_clock;
787
788 intel_put_shared_dpll(intel_crtc);
789
790 return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
791}
792
752void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) 793void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
753{ 794{
754 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 795 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -1183,31 +1224,52 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1183 } 1224 }
1184} 1225}
1185 1226
1186int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) 1227static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv)
1228{
1229 uint32_t lcpll = I915_READ(LCPLL_CTL);
1230 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
1231
1232 if (lcpll & LCPLL_CD_SOURCE_FCLK)
1233 return 800000;
1234 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
1235 return 450000;
1236 else if (freq == LCPLL_CLK_FREQ_450)
1237 return 450000;
1238 else if (freq == LCPLL_CLK_FREQ_54O_BDW)
1239 return 540000;
1240 else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
1241 return 337500;
1242 else
1243 return 675000;
1244}
1245
1246static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv)
1187{ 1247{
1188 struct drm_device *dev = dev_priv->dev; 1248 struct drm_device *dev = dev_priv->dev;
1189 uint32_t lcpll = I915_READ(LCPLL_CTL); 1249 uint32_t lcpll = I915_READ(LCPLL_CTL);
1190 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 1250 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
1191 1251
1192 if (lcpll & LCPLL_CD_SOURCE_FCLK) { 1252 if (lcpll & LCPLL_CD_SOURCE_FCLK)
1193 return 800000; 1253 return 800000;
1194 } else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) { 1254 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
1195 return 450000; 1255 return 450000;
1196 } else if (freq == LCPLL_CLK_FREQ_450) { 1256 else if (freq == LCPLL_CLK_FREQ_450)
1197 return 450000; 1257 return 450000;
1198 } else if (IS_HASWELL(dev)) { 1258 else if (IS_ULT(dev))
1199 if (IS_ULT(dev)) 1259 return 337500;
1200 return 337500; 1260 else
1201 else 1261 return 540000;
1202 return 540000; 1262}
1203 } else { 1263
1204 if (freq == LCPLL_CLK_FREQ_54O_BDW) 1264int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1205 return 540000; 1265{
1206 else if (freq == LCPLL_CLK_FREQ_337_5_BDW) 1266 struct drm_device *dev = dev_priv->dev;
1207 return 337500; 1267
1208 else 1268 if (IS_BROADWELL(dev))
1209 return 675000; 1269 return bdw_get_cdclk_freq(dev_priv);
1210 } 1270
1271 /* Haswell */
1272 return hsw_get_cdclk_freq(dev_priv);
1211} 1273}
1212 1274
1213static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, 1275static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
@@ -1248,10 +1310,8 @@ static const char * const hsw_ddi_pll_names[] = {
1248 "WRPLL 2", 1310 "WRPLL 2",
1249}; 1311};
1250 1312
1251void intel_ddi_pll_init(struct drm_device *dev) 1313static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
1252{ 1314{
1253 struct drm_i915_private *dev_priv = dev->dev_private;
1254 uint32_t val = I915_READ(LCPLL_CTL);
1255 int i; 1315 int i;
1256 1316
1257 dev_priv->num_shared_dpll = 2; 1317 dev_priv->num_shared_dpll = 2;
@@ -1264,6 +1324,14 @@ void intel_ddi_pll_init(struct drm_device *dev)
1264 dev_priv->shared_dplls[i].get_hw_state = 1324 dev_priv->shared_dplls[i].get_hw_state =
1265 hsw_ddi_pll_get_hw_state; 1325 hsw_ddi_pll_get_hw_state;
1266 } 1326 }
1327}
1328
1329void intel_ddi_pll_init(struct drm_device *dev)
1330{
1331 struct drm_i915_private *dev_priv = dev->dev_private;
1332 uint32_t val = I915_READ(LCPLL_CTL);
1333
1334 hsw_shared_dplls_init(dev_priv);
1267 1335
1268 /* The LCPLL register should be turned on by the BIOS. For now let's 1336 /* The LCPLL register should be turned on by the BIOS. For now let's
1269 * just check its state and print errors in case something is wrong. 1337 * just check its state and print errors in case something is wrong.
@@ -1444,7 +1512,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1444 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; 1512 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1445 } 1513 }
1446 1514
1447 intel_ddi_clock_get(encoder, pipe_config); 1515 hsw_ddi_clock_get(encoder, pipe_config);
1448} 1516}
1449 1517
1450static void intel_ddi_destroy(struct drm_encoder *encoder) 1518static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d8324c69fa86..507370513f3d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -91,15 +91,16 @@ static int intel_framebuffer_init(struct drm_device *dev,
91 struct intel_framebuffer *ifb, 91 struct intel_framebuffer *ifb,
92 struct drm_mode_fb_cmd2 *mode_cmd, 92 struct drm_mode_fb_cmd2 *mode_cmd,
93 struct drm_i915_gem_object *obj); 93 struct drm_i915_gem_object *obj);
94static void intel_dp_set_m_n(struct intel_crtc *crtc);
95static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 94static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
96static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 95static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
97static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 96static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
98 struct intel_link_m_n *m_n); 97 struct intel_link_m_n *m_n,
98 struct intel_link_m_n *m2_n2);
99static void ironlake_set_pipeconf(struct drm_crtc *crtc); 99static void ironlake_set_pipeconf(struct drm_crtc *crtc);
100static void haswell_set_pipeconf(struct drm_crtc *crtc); 100static void haswell_set_pipeconf(struct drm_crtc *crtc);
101static void intel_set_pipe_csc(struct drm_crtc *crtc); 101static void intel_set_pipe_csc(struct drm_crtc *crtc);
102static void vlv_prepare_pll(struct intel_crtc *crtc); 102static void vlv_prepare_pll(struct intel_crtc *crtc);
103static void chv_prepare_pll(struct intel_crtc *crtc);
103 104
104static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe) 105static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
105{ 106{
@@ -899,7 +900,8 @@ static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
899 frame = I915_READ(frame_reg); 900 frame = I915_READ(frame_reg);
900 901
901 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50)) 902 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
902 WARN(1, "vblank wait timed out\n"); 903 WARN(1, "vblank wait on pipe %c timed out\n",
904 pipe_name(pipe));
903} 905}
904 906
905/** 907/**
@@ -940,7 +942,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
940 if (wait_for(I915_READ(pipestat_reg) & 942 if (wait_for(I915_READ(pipestat_reg) &
941 PIPE_VBLANK_INTERRUPT_STATUS, 943 PIPE_VBLANK_INTERRUPT_STATUS,
942 50)) 944 50))
943 DRM_DEBUG_KMS("vblank wait timed out\n"); 945 DRM_DEBUG_KMS("vblank wait on pipe %c timed out\n",
946 pipe_name(pipe));
944} 947}
945 948
946static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) 949static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
@@ -964,8 +967,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
964 967
965/* 968/*
966 * intel_wait_for_pipe_off - wait for pipe to turn off 969 * intel_wait_for_pipe_off - wait for pipe to turn off
967 * @dev: drm device 970 * @crtc: crtc whose pipe to wait for
968 * @pipe: pipe to wait for
969 * 971 *
970 * After disabling a pipe, we can't wait for vblank in the usual way, 972 * After disabling a pipe, we can't wait for vblank in the usual way,
971 * spinning on the vblank interrupt status bit, since we won't actually 973 * spinning on the vblank interrupt status bit, since we won't actually
@@ -979,11 +981,12 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
979 * ends up stopping at the start of the next frame). 981 * ends up stopping at the start of the next frame).
980 * 982 *
981 */ 983 */
982void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 984static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
983{ 985{
986 struct drm_device *dev = crtc->base.dev;
984 struct drm_i915_private *dev_priv = dev->dev_private; 987 struct drm_i915_private *dev_priv = dev->dev_private;
985 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 988 enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
986 pipe); 989 enum pipe pipe = crtc->pipe;
987 990
988 if (INTEL_INFO(dev)->gen >= 4) { 991 if (INTEL_INFO(dev)->gen >= 4) {
989 int reg = PIPECONF(cpu_transcoder); 992 int reg = PIPECONF(cpu_transcoder);
@@ -1192,27 +1195,40 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1192static void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1195static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1193 enum pipe pipe) 1196 enum pipe pipe)
1194{ 1197{
1195 int pp_reg, lvds_reg; 1198 struct drm_device *dev = dev_priv->dev;
1199 int pp_reg;
1196 u32 val; 1200 u32 val;
1197 enum pipe panel_pipe = PIPE_A; 1201 enum pipe panel_pipe = PIPE_A;
1198 bool locked = true; 1202 bool locked = true;
1199 1203
1200 if (HAS_PCH_SPLIT(dev_priv->dev)) { 1204 if (WARN_ON(HAS_DDI(dev)))
1205 return;
1206
1207 if (HAS_PCH_SPLIT(dev)) {
1208 u32 port_sel;
1209
1201 pp_reg = PCH_PP_CONTROL; 1210 pp_reg = PCH_PP_CONTROL;
1202 lvds_reg = PCH_LVDS; 1211 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1212
1213 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1214 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1215 panel_pipe = PIPE_B;
1216 /* XXX: else fix for eDP */
1217 } else if (IS_VALLEYVIEW(dev)) {
1218 /* presumably write lock depends on pipe, not port select */
1219 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1220 panel_pipe = pipe;
1203 } else { 1221 } else {
1204 pp_reg = PP_CONTROL; 1222 pp_reg = PP_CONTROL;
1205 lvds_reg = LVDS; 1223 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1224 panel_pipe = PIPE_B;
1206 } 1225 }
1207 1226
1208 val = I915_READ(pp_reg); 1227 val = I915_READ(pp_reg);
1209 if (!(val & PANEL_POWER_ON) || 1228 if (!(val & PANEL_POWER_ON) ||
1210 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) 1229 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1211 locked = false; 1230 locked = false;
1212 1231
1213 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1214 panel_pipe = PIPE_B;
1215
1216 WARN(panel_pipe == pipe && locked, 1232 WARN(panel_pipe == pipe && locked,
1217 "panel assertion failure, pipe %c regs locked\n", 1233 "panel assertion failure, pipe %c regs locked\n",
1218 pipe_name(pipe)); 1234 pipe_name(pipe));
@@ -1245,8 +1261,9 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1245 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1261 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1246 pipe); 1262 pipe);
1247 1263
1248 /* if we need the pipe A quirk it must be always on */ 1264 /* if we need the pipe quirk it must be always on */
1249 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 1265 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1266 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1250 state = true; 1267 state = true;
1251 1268
1252 if (!intel_display_power_enabled(dev_priv, 1269 if (!intel_display_power_enabled(dev_priv,
@@ -1300,7 +1317,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1300 } 1317 }
1301 1318
1302 /* Need to check both planes against the pipe */ 1319 /* Need to check both planes against the pipe */
1303 for_each_pipe(i) { 1320 for_each_pipe(dev_priv, i) {
1304 reg = DSPCNTR(i); 1321 reg = DSPCNTR(i);
1305 val = I915_READ(reg); 1322 val = I915_READ(reg);
1306 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1323 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
@@ -1341,6 +1358,12 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1341 } 1358 }
1342} 1359}
1343 1360
1361static void assert_vblank_disabled(struct drm_crtc *crtc)
1362{
1363 if (WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1364 drm_crtc_vblank_put(crtc);
1365}
1366
1344static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1367static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1345{ 1368{
1346 u32 val; 1369 u32 val;
@@ -1513,34 +1536,6 @@ static void intel_init_dpio(struct drm_device *dev)
1513 } 1536 }
1514} 1537}
1515 1538
1516static void intel_reset_dpio(struct drm_device *dev)
1517{
1518 struct drm_i915_private *dev_priv = dev->dev_private;
1519
1520 if (IS_CHERRYVIEW(dev)) {
1521 enum dpio_phy phy;
1522 u32 val;
1523
1524 for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
1525 /* Poll for phypwrgood signal */
1526 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
1527 PHY_POWERGOOD(phy), 1))
1528 DRM_ERROR("Display PHY %d is not power up\n", phy);
1529
1530 /*
1531 * Deassert common lane reset for PHY.
1532 *
1533 * This should only be done on init and resume from S3
1534 * with both PLLs disabled, or we risk losing DPIO and
1535 * PLL synchronization.
1536 */
1537 val = I915_READ(DISPLAY_PHY_CONTROL);
1538 I915_WRITE(DISPLAY_PHY_CONTROL,
1539 PHY_COM_LANE_RESET_DEASSERT(phy, val));
1540 }
1541 }
1542}
1543
1544static void vlv_enable_pll(struct intel_crtc *crtc) 1539static void vlv_enable_pll(struct intel_crtc *crtc)
1545{ 1540{
1546 struct drm_device *dev = crtc->base.dev; 1541 struct drm_device *dev = crtc->base.dev;
@@ -1554,7 +1549,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc)
1554 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); 1549 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1555 1550
1556 /* PLL is protected by panel, make sure we can write it */ 1551 /* PLL is protected by panel, make sure we can write it */
1557 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) 1552 if (IS_MOBILE(dev_priv->dev))
1558 assert_panel_unlocked(dev_priv, crtc->pipe); 1553 assert_panel_unlocked(dev_priv, crtc->pipe);
1559 1554
1560 I915_WRITE(reg, dpll); 1555 I915_WRITE(reg, dpll);
@@ -1617,6 +1612,18 @@ static void chv_enable_pll(struct intel_crtc *crtc)
1617 mutex_unlock(&dev_priv->dpio_lock); 1612 mutex_unlock(&dev_priv->dpio_lock);
1618} 1613}
1619 1614
1615static int intel_num_dvo_pipes(struct drm_device *dev)
1616{
1617 struct intel_crtc *crtc;
1618 int count = 0;
1619
1620 for_each_intel_crtc(dev, crtc)
1621 count += crtc->active &&
1622 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO);
1623
1624 return count;
1625}
1626
1620static void i9xx_enable_pll(struct intel_crtc *crtc) 1627static void i9xx_enable_pll(struct intel_crtc *crtc)
1621{ 1628{
1622 struct drm_device *dev = crtc->base.dev; 1629 struct drm_device *dev = crtc->base.dev;
@@ -1633,7 +1640,18 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1633 if (IS_MOBILE(dev) && !IS_I830(dev)) 1640 if (IS_MOBILE(dev) && !IS_I830(dev))
1634 assert_panel_unlocked(dev_priv, crtc->pipe); 1641 assert_panel_unlocked(dev_priv, crtc->pipe);
1635 1642
1636 I915_WRITE(reg, dpll); 1643 /* Enable DVO 2x clock on both PLLs if necessary */
1644 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1645 /*
1646 * It appears to be important that we don't enable this
1647 * for the current pipe before otherwise configuring the
1648 * PLL. No idea how this should be handled if multiple
1649 * DVO outputs are enabled simultaneosly.
1650 */
1651 dpll |= DPLL_DVO_2X_MODE;
1652 I915_WRITE(DPLL(!crtc->pipe),
1653 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1654 }
1637 1655
1638 /* Wait for the clocks to stabilize. */ 1656 /* Wait for the clocks to stabilize. */
1639 POSTING_READ(reg); 1657 POSTING_READ(reg);
@@ -1672,10 +1690,25 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1672 * 1690 *
1673 * Note! This is for pre-ILK only. 1691 * Note! This is for pre-ILK only.
1674 */ 1692 */
1675static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1693static void i9xx_disable_pll(struct intel_crtc *crtc)
1676{ 1694{
1677 /* Don't disable pipe A or pipe A PLLs if needed */ 1695 struct drm_device *dev = crtc->base.dev;
1678 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1696 struct drm_i915_private *dev_priv = dev->dev_private;
1697 enum pipe pipe = crtc->pipe;
1698
1699 /* Disable DVO 2x clock on both PLLs if necessary */
1700 if (IS_I830(dev) &&
1701 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) &&
1702 intel_num_dvo_pipes(dev) == 1) {
1703 I915_WRITE(DPLL(PIPE_B),
1704 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1705 I915_WRITE(DPLL(PIPE_A),
1706 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1707 }
1708
1709 /* Don't disable pipe or pipe PLLs if needed */
1710 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1711 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1679 return; 1712 return;
1680 1713
1681 /* Make sure the pipe isn't still relying on us */ 1714 /* Make sure the pipe isn't still relying on us */
@@ -1712,7 +1745,7 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1712 assert_pipe_disabled(dev_priv, pipe); 1745 assert_pipe_disabled(dev_priv, pipe);
1713 1746
1714 /* Set PLL en = 0 */ 1747 /* Set PLL en = 0 */
1715 val = DPLL_SSC_REF_CLOCK_CHV; 1748 val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
1716 if (pipe != PIPE_A) 1749 if (pipe != PIPE_A)
1717 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1750 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1718 I915_WRITE(DPLL(pipe), val); 1751 I915_WRITE(DPLL(pipe), val);
@@ -1806,7 +1839,7 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1806 if (WARN_ON(pll->refcount == 0)) 1839 if (WARN_ON(pll->refcount == 0))
1807 return; 1840 return;
1808 1841
1809 DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n", 1842 DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1810 pll->name, pll->active, pll->on, 1843 pll->name, pll->active, pll->on,
1811 crtc->base.base.id); 1844 crtc->base.base.id);
1812 1845
@@ -1824,7 +1857,7 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1824 pll->on = true; 1857 pll->on = true;
1825} 1858}
1826 1859
1827void intel_disable_shared_dpll(struct intel_crtc *crtc) 1860static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1828{ 1861{
1829 struct drm_device *dev = crtc->base.dev; 1862 struct drm_device *dev = crtc->base.dev;
1830 struct drm_i915_private *dev_priv = dev->dev_private; 1863 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1868,7 +1901,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1868 uint32_t reg, val, pipeconf_val; 1901 uint32_t reg, val, pipeconf_val;
1869 1902
1870 /* PCH only available on ILK+ */ 1903 /* PCH only available on ILK+ */
1871 BUG_ON(INTEL_INFO(dev)->gen < 5); 1904 BUG_ON(!HAS_PCH_SPLIT(dev));
1872 1905
1873 /* Make sure PCH DPLL is enabled */ 1906 /* Make sure PCH DPLL is enabled */
1874 assert_shared_dpll_enabled(dev_priv, 1907 assert_shared_dpll_enabled(dev_priv,
@@ -1921,7 +1954,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1921 u32 val, pipeconf_val; 1954 u32 val, pipeconf_val;
1922 1955
1923 /* PCH only available on ILK+ */ 1956 /* PCH only available on ILK+ */
1924 BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5); 1957 BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
1925 1958
1926 /* FDI must be feeding us bits for PCH ports */ 1959 /* FDI must be feeding us bits for PCH ports */
1927 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1960 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
@@ -2043,8 +2076,8 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
2043 reg = PIPECONF(cpu_transcoder); 2076 reg = PIPECONF(cpu_transcoder);
2044 val = I915_READ(reg); 2077 val = I915_READ(reg);
2045 if (val & PIPECONF_ENABLE) { 2078 if (val & PIPECONF_ENABLE) {
2046 WARN_ON(!(pipe == PIPE_A && 2079 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2047 dev_priv->quirks & QUIRK_PIPEA_FORCE)); 2080 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2048 return; 2081 return;
2049 } 2082 }
2050 2083
@@ -2054,21 +2087,19 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
2054 2087
2055/** 2088/**
2056 * intel_disable_pipe - disable a pipe, asserting requirements 2089 * intel_disable_pipe - disable a pipe, asserting requirements
2057 * @dev_priv: i915 private structure 2090 * @crtc: crtc whose pipes is to be disabled
2058 * @pipe: pipe to disable
2059 * 2091 *
2060 * Disable @pipe, making sure that various hardware specific requirements 2092 * Disable the pipe of @crtc, making sure that various hardware
2061 * are met, if applicable, e.g. plane disabled, panel fitter off, etc. 2093 * specific requirements are met, if applicable, e.g. plane
2062 * 2094 * disabled, panel fitter off, etc.
2063 * @pipe should be %PIPE_A or %PIPE_B.
2064 * 2095 *
2065 * Will wait until the pipe has shut down before returning. 2096 * Will wait until the pipe has shut down before returning.
2066 */ 2097 */
2067static void intel_disable_pipe(struct drm_i915_private *dev_priv, 2098static void intel_disable_pipe(struct intel_crtc *crtc)
2068 enum pipe pipe)
2069{ 2099{
2070 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 2100 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2071 pipe); 2101 enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
2102 enum pipe pipe = crtc->pipe;
2072 int reg; 2103 int reg;
2073 u32 val; 2104 u32 val;
2074 2105
@@ -2080,17 +2111,26 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
2080 assert_cursor_disabled(dev_priv, pipe); 2111 assert_cursor_disabled(dev_priv, pipe);
2081 assert_sprites_disabled(dev_priv, pipe); 2112 assert_sprites_disabled(dev_priv, pipe);
2082 2113
2083 /* Don't disable pipe A or pipe A PLLs if needed */
2084 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
2085 return;
2086
2087 reg = PIPECONF(cpu_transcoder); 2114 reg = PIPECONF(cpu_transcoder);
2088 val = I915_READ(reg); 2115 val = I915_READ(reg);
2089 if ((val & PIPECONF_ENABLE) == 0) 2116 if ((val & PIPECONF_ENABLE) == 0)
2090 return; 2117 return;
2091 2118
2092 I915_WRITE(reg, val & ~PIPECONF_ENABLE); 2119 /*
2093 intel_wait_for_pipe_off(dev_priv->dev, pipe); 2120 * Double wide has implications for planes
2121 * so best keep it disabled when not needed.
2122 */
2123 if (crtc->config.double_wide)
2124 val &= ~PIPECONF_DOUBLE_WIDE;
2125
2126 /* Don't disable pipe or pipe PLLs if needed */
2127 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2128 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2129 val &= ~PIPECONF_ENABLE;
2130
2131 I915_WRITE(reg, val);
2132 if ((val & PIPECONF_ENABLE) == 0)
2133 intel_wait_for_pipe_off(crtc);
2094} 2134}
2095 2135
2096/* 2136/*
@@ -2109,35 +2149,28 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2109 2149
2110/** 2150/**
2111 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe 2151 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2112 * @dev_priv: i915 private structure 2152 * @plane: plane to be enabled
2113 * @plane: plane to enable 2153 * @crtc: crtc for the plane
2114 * @pipe: pipe being fed
2115 * 2154 *
2116 * Enable @plane on @pipe, making sure that @pipe is running first. 2155 * Enable @plane on @crtc, making sure that the pipe is running first.
2117 */ 2156 */
2118static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv, 2157static void intel_enable_primary_hw_plane(struct drm_plane *plane,
2119 enum plane plane, enum pipe pipe) 2158 struct drm_crtc *crtc)
2120{ 2159{
2121 struct drm_device *dev = dev_priv->dev; 2160 struct drm_device *dev = plane->dev;
2122 struct intel_crtc *intel_crtc = 2161 struct drm_i915_private *dev_priv = dev->dev_private;
2123 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2162 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2124 int reg;
2125 u32 val;
2126 2163
2127 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 2164 /* If the pipe isn't enabled, we can't pump pixels and may hang */
2128 assert_pipe_enabled(dev_priv, pipe); 2165 assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2129 2166
2130 if (intel_crtc->primary_enabled) 2167 if (intel_crtc->primary_enabled)
2131 return; 2168 return;
2132 2169
2133 intel_crtc->primary_enabled = true; 2170 intel_crtc->primary_enabled = true;
2134 2171
2135 reg = DSPCNTR(plane); 2172 dev_priv->display.update_primary_plane(crtc, plane->fb,
2136 val = I915_READ(reg); 2173 crtc->x, crtc->y);
2137 WARN_ON(val & DISPLAY_PLANE_ENABLE);
2138
2139 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
2140 intel_flush_primary_plane(dev_priv, plane);
2141 2174
2142 /* 2175 /*
2143 * BDW signals flip done immediately if the plane 2176 * BDW signals flip done immediately if the plane
@@ -2150,31 +2183,27 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
2150 2183
2151/** 2184/**
2152 * intel_disable_primary_hw_plane - disable the primary hardware plane 2185 * intel_disable_primary_hw_plane - disable the primary hardware plane
2153 * @dev_priv: i915 private structure 2186 * @plane: plane to be disabled
2154 * @plane: plane to disable 2187 * @crtc: crtc for the plane
2155 * @pipe: pipe consuming the data
2156 * 2188 *
2157 * Disable @plane; should be an independent operation. 2189 * Disable @plane on @crtc, making sure that the pipe is running first.
2158 */ 2190 */
2159static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv, 2191static void intel_disable_primary_hw_plane(struct drm_plane *plane,
2160 enum plane plane, enum pipe pipe) 2192 struct drm_crtc *crtc)
2161{ 2193{
2162 struct intel_crtc *intel_crtc = 2194 struct drm_device *dev = plane->dev;
2163 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2195 struct drm_i915_private *dev_priv = dev->dev_private;
2164 int reg; 2196 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2165 u32 val; 2197
2198 assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2166 2199
2167 if (!intel_crtc->primary_enabled) 2200 if (!intel_crtc->primary_enabled)
2168 return; 2201 return;
2169 2202
2170 intel_crtc->primary_enabled = false; 2203 intel_crtc->primary_enabled = false;
2171 2204
2172 reg = DSPCNTR(plane); 2205 dev_priv->display.update_primary_plane(crtc, plane->fb,
2173 val = I915_READ(reg); 2206 crtc->x, crtc->y);
2174 WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
2175
2176 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
2177 intel_flush_primary_plane(dev_priv, plane);
2178} 2207}
2179 2208
2180static bool need_vtd_wa(struct drm_device *dev) 2209static bool need_vtd_wa(struct drm_device *dev)
@@ -2422,16 +2451,46 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2422 struct drm_device *dev = crtc->dev; 2451 struct drm_device *dev = crtc->dev;
2423 struct drm_i915_private *dev_priv = dev->dev_private; 2452 struct drm_i915_private *dev_priv = dev->dev_private;
2424 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2453 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2425 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2454 struct drm_i915_gem_object *obj;
2426 int plane = intel_crtc->plane; 2455 int plane = intel_crtc->plane;
2427 unsigned long linear_offset; 2456 unsigned long linear_offset;
2428 u32 dspcntr; 2457 u32 dspcntr;
2429 u32 reg; 2458 u32 reg = DSPCNTR(plane);
2459 int pixel_size;
2460
2461 if (!intel_crtc->primary_enabled) {
2462 I915_WRITE(reg, 0);
2463 if (INTEL_INFO(dev)->gen >= 4)
2464 I915_WRITE(DSPSURF(plane), 0);
2465 else
2466 I915_WRITE(DSPADDR(plane), 0);
2467 POSTING_READ(reg);
2468 return;
2469 }
2470
2471 obj = intel_fb_obj(fb);
2472 if (WARN_ON(obj == NULL))
2473 return;
2474
2475 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2476
2477 dspcntr = DISPPLANE_GAMMA_ENABLE;
2478
2479 dspcntr |= DISPLAY_PLANE_ENABLE;
2480
2481 if (INTEL_INFO(dev)->gen < 4) {
2482 if (intel_crtc->pipe == PIPE_B)
2483 dspcntr |= DISPPLANE_SEL_PIPE_B;
2484
2485 /* pipesrc and dspsize control the size that is scaled from,
2486 * which should always be the user's requested size.
2487 */
2488 I915_WRITE(DSPSIZE(plane),
2489 ((intel_crtc->config.pipe_src_h - 1) << 16) |
2490 (intel_crtc->config.pipe_src_w - 1));
2491 I915_WRITE(DSPPOS(plane), 0);
2492 }
2430 2493
2431 reg = DSPCNTR(plane);
2432 dspcntr = I915_READ(reg);
2433 /* Mask out pixel format bits in case we change it */
2434 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2435 switch (fb->pixel_format) { 2494 switch (fb->pixel_format) {
2436 case DRM_FORMAT_C8: 2495 case DRM_FORMAT_C8:
2437 dspcntr |= DISPPLANE_8BPP; 2496 dspcntr |= DISPPLANE_8BPP;
@@ -2463,30 +2522,40 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2463 BUG(); 2522 BUG();
2464 } 2523 }
2465 2524
2466 if (INTEL_INFO(dev)->gen >= 4) { 2525 if (INTEL_INFO(dev)->gen >= 4 &&
2467 if (obj->tiling_mode != I915_TILING_NONE) 2526 obj->tiling_mode != I915_TILING_NONE)
2468 dspcntr |= DISPPLANE_TILED; 2527 dspcntr |= DISPPLANE_TILED;
2469 else
2470 dspcntr &= ~DISPPLANE_TILED;
2471 }
2472 2528
2473 if (IS_G4X(dev)) 2529 if (IS_G4X(dev))
2474 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2530 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2475 2531
2476 I915_WRITE(reg, dspcntr); 2532 linear_offset = y * fb->pitches[0] + x * pixel_size;
2477
2478 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2479 2533
2480 if (INTEL_INFO(dev)->gen >= 4) { 2534 if (INTEL_INFO(dev)->gen >= 4) {
2481 intel_crtc->dspaddr_offset = 2535 intel_crtc->dspaddr_offset =
2482 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2536 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2483 fb->bits_per_pixel / 8, 2537 pixel_size,
2484 fb->pitches[0]); 2538 fb->pitches[0]);
2485 linear_offset -= intel_crtc->dspaddr_offset; 2539 linear_offset -= intel_crtc->dspaddr_offset;
2486 } else { 2540 } else {
2487 intel_crtc->dspaddr_offset = linear_offset; 2541 intel_crtc->dspaddr_offset = linear_offset;
2488 } 2542 }
2489 2543
2544 if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2545 dspcntr |= DISPPLANE_ROTATE_180;
2546
2547 x += (intel_crtc->config.pipe_src_w - 1);
2548 y += (intel_crtc->config.pipe_src_h - 1);
2549
2550 /* Finding the last pixel of the last line of the display
2551 data and adding to linear_offset*/
2552 linear_offset +=
2553 (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
2554 (intel_crtc->config.pipe_src_w - 1) * pixel_size;
2555 }
2556
2557 I915_WRITE(reg, dspcntr);
2558
2490 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 2559 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2491 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, 2560 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2492 fb->pitches[0]); 2561 fb->pitches[0]);
@@ -2508,16 +2577,33 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2508 struct drm_device *dev = crtc->dev; 2577 struct drm_device *dev = crtc->dev;
2509 struct drm_i915_private *dev_priv = dev->dev_private; 2578 struct drm_i915_private *dev_priv = dev->dev_private;
2510 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2579 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2511 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2580 struct drm_i915_gem_object *obj;
2512 int plane = intel_crtc->plane; 2581 int plane = intel_crtc->plane;
2513 unsigned long linear_offset; 2582 unsigned long linear_offset;
2514 u32 dspcntr; 2583 u32 dspcntr;
2515 u32 reg; 2584 u32 reg = DSPCNTR(plane);
2585 int pixel_size;
2586
2587 if (!intel_crtc->primary_enabled) {
2588 I915_WRITE(reg, 0);
2589 I915_WRITE(DSPSURF(plane), 0);
2590 POSTING_READ(reg);
2591 return;
2592 }
2593
2594 obj = intel_fb_obj(fb);
2595 if (WARN_ON(obj == NULL))
2596 return;
2597
2598 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2599
2600 dspcntr = DISPPLANE_GAMMA_ENABLE;
2601
2602 dspcntr |= DISPLAY_PLANE_ENABLE;
2603
2604 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2605 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2516 2606
2517 reg = DSPCNTR(plane);
2518 dspcntr = I915_READ(reg);
2519 /* Mask out pixel format bits in case we change it */
2520 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2521 switch (fb->pixel_format) { 2607 switch (fb->pixel_format) {
2522 case DRM_FORMAT_C8: 2608 case DRM_FORMAT_C8:
2523 dspcntr |= DISPPLANE_8BPP; 2609 dspcntr |= DISPPLANE_8BPP;
@@ -2547,22 +2633,32 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2547 2633
2548 if (obj->tiling_mode != I915_TILING_NONE) 2634 if (obj->tiling_mode != I915_TILING_NONE)
2549 dspcntr |= DISPPLANE_TILED; 2635 dspcntr |= DISPPLANE_TILED;
2550 else
2551 dspcntr &= ~DISPPLANE_TILED;
2552 2636
2553 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2637 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2554 dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2555 else
2556 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2638 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2557 2639
2558 I915_WRITE(reg, dspcntr); 2640 linear_offset = y * fb->pitches[0] + x * pixel_size;
2559
2560 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2561 intel_crtc->dspaddr_offset = 2641 intel_crtc->dspaddr_offset =
2562 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2642 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2563 fb->bits_per_pixel / 8, 2643 pixel_size,
2564 fb->pitches[0]); 2644 fb->pitches[0]);
2565 linear_offset -= intel_crtc->dspaddr_offset; 2645 linear_offset -= intel_crtc->dspaddr_offset;
2646 if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2647 dspcntr |= DISPPLANE_ROTATE_180;
2648
2649 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2650 x += (intel_crtc->config.pipe_src_w - 1);
2651 y += (intel_crtc->config.pipe_src_h - 1);
2652
2653 /* Finding the last pixel of the last line of the display
2654 data and adding to linear_offset*/
2655 linear_offset +=
2656 (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
2657 (intel_crtc->config.pipe_src_w - 1) * pixel_size;
2658 }
2659 }
2660
2661 I915_WRITE(reg, dspcntr);
2566 2662
2567 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 2663 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2568 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, 2664 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
@@ -3346,23 +3442,54 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3346 return false; 3442 return false;
3347} 3443}
3348 3444
3445static void page_flip_completed(struct intel_crtc *intel_crtc)
3446{
3447 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3448 struct intel_unpin_work *work = intel_crtc->unpin_work;
3449
3450 /* ensure that the unpin work is consistent wrt ->pending. */
3451 smp_rmb();
3452 intel_crtc->unpin_work = NULL;
3453
3454 if (work->event)
3455 drm_send_vblank_event(intel_crtc->base.dev,
3456 intel_crtc->pipe,
3457 work->event);
3458
3459 drm_crtc_vblank_put(&intel_crtc->base);
3460
3461 wake_up_all(&dev_priv->pending_flip_queue);
3462 queue_work(dev_priv->wq, &work->work);
3463
3464 trace_i915_flip_complete(intel_crtc->plane,
3465 work->pending_flip_obj);
3466}
3467
3349void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3468void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3350{ 3469{
3351 struct drm_device *dev = crtc->dev; 3470 struct drm_device *dev = crtc->dev;
3352 struct drm_i915_private *dev_priv = dev->dev_private; 3471 struct drm_i915_private *dev_priv = dev->dev_private;
3353 3472
3354 if (crtc->primary->fb == NULL)
3355 return;
3356
3357 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3473 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3474 if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3475 !intel_crtc_has_pending_flip(crtc),
3476 60*HZ) == 0)) {
3477 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3478 unsigned long flags;
3358 3479
3359 WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, 3480 spin_lock_irqsave(&dev->event_lock, flags);
3360 !intel_crtc_has_pending_flip(crtc), 3481 if (intel_crtc->unpin_work) {
3361 60*HZ) == 0); 3482 WARN_ONCE(1, "Removing stuck page flip\n");
3483 page_flip_completed(intel_crtc);
3484 }
3485 spin_unlock_irqrestore(&dev->event_lock, flags);
3486 }
3362 3487
3363 mutex_lock(&dev->struct_mutex); 3488 if (crtc->primary->fb) {
3364 intel_finish_fb(crtc->primary->fb); 3489 mutex_lock(&dev->struct_mutex);
3365 mutex_unlock(&dev->struct_mutex); 3490 intel_finish_fb(crtc->primary->fb);
3491 mutex_unlock(&dev->struct_mutex);
3492 }
3366} 3493}
3367 3494
3368/* Program iCLKIP clock to the desired frequency */ 3495/* Program iCLKIP clock to the desired frequency */
@@ -3911,14 +4038,14 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3911static void intel_crtc_enable_planes(struct drm_crtc *crtc) 4038static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3912{ 4039{
3913 struct drm_device *dev = crtc->dev; 4040 struct drm_device *dev = crtc->dev;
3914 struct drm_i915_private *dev_priv = dev->dev_private;
3915 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4041 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3916 int pipe = intel_crtc->pipe; 4042 int pipe = intel_crtc->pipe;
3917 int plane = intel_crtc->plane; 4043
4044 assert_vblank_disabled(crtc);
3918 4045
3919 drm_vblank_on(dev, pipe); 4046 drm_vblank_on(dev, pipe);
3920 4047
3921 intel_enable_primary_hw_plane(dev_priv, plane, pipe); 4048 intel_enable_primary_hw_plane(crtc->primary, crtc);
3922 intel_enable_planes(crtc); 4049 intel_enable_planes(crtc);
3923 intel_crtc_update_cursor(crtc, true); 4050 intel_crtc_update_cursor(crtc, true);
3924 intel_crtc_dpms_overlay(intel_crtc, true); 4051 intel_crtc_dpms_overlay(intel_crtc, true);
@@ -3955,7 +4082,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3955 intel_crtc_dpms_overlay(intel_crtc, false); 4082 intel_crtc_dpms_overlay(intel_crtc, false);
3956 intel_crtc_update_cursor(crtc, false); 4083 intel_crtc_update_cursor(crtc, false);
3957 intel_disable_planes(crtc); 4084 intel_disable_planes(crtc);
3958 intel_disable_primary_hw_plane(dev_priv, plane, pipe); 4085 intel_disable_primary_hw_plane(crtc->primary, crtc);
3959 4086
3960 /* 4087 /*
3961 * FIXME: Once we grow proper nuclear flip support out of this we need 4088 * FIXME: Once we grow proper nuclear flip support out of this we need
@@ -3965,6 +4092,8 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3965 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4092 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
3966 4093
3967 drm_vblank_off(dev, pipe); 4094 drm_vblank_off(dev, pipe);
4095
4096 assert_vblank_disabled(crtc);
3968} 4097}
3969 4098
3970static void ironlake_crtc_enable(struct drm_crtc *crtc) 4099static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -3974,7 +4103,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3974 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4103 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3975 struct intel_encoder *encoder; 4104 struct intel_encoder *encoder;
3976 int pipe = intel_crtc->pipe; 4105 int pipe = intel_crtc->pipe;
3977 enum plane plane = intel_crtc->plane;
3978 4106
3979 WARN_ON(!crtc->enabled); 4107 WARN_ON(!crtc->enabled);
3980 4108
@@ -3991,18 +4119,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3991 4119
3992 if (intel_crtc->config.has_pch_encoder) { 4120 if (intel_crtc->config.has_pch_encoder) {
3993 intel_cpu_transcoder_set_m_n(intel_crtc, 4121 intel_cpu_transcoder_set_m_n(intel_crtc,
3994 &intel_crtc->config.fdi_m_n); 4122 &intel_crtc->config.fdi_m_n, NULL);
3995 } 4123 }
3996 4124
3997 ironlake_set_pipeconf(crtc); 4125 ironlake_set_pipeconf(crtc);
3998 4126
3999 /* Set up the display plane register */
4000 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
4001 POSTING_READ(DSPCNTR(plane));
4002
4003 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4004 crtc->x, crtc->y);
4005
4006 intel_crtc->active = true; 4127 intel_crtc->active = true;
4007 4128
4008 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4129 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -4087,7 +4208,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4087 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4208 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4088 struct intel_encoder *encoder; 4209 struct intel_encoder *encoder;
4089 int pipe = intel_crtc->pipe; 4210 int pipe = intel_crtc->pipe;
4090 enum plane plane = intel_crtc->plane;
4091 4211
4092 WARN_ON(!crtc->enabled); 4212 WARN_ON(!crtc->enabled);
4093 4213
@@ -4102,22 +4222,20 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4102 4222
4103 intel_set_pipe_timings(intel_crtc); 4223 intel_set_pipe_timings(intel_crtc);
4104 4224
4225 if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
4226 I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
4227 intel_crtc->config.pixel_multiplier - 1);
4228 }
4229
4105 if (intel_crtc->config.has_pch_encoder) { 4230 if (intel_crtc->config.has_pch_encoder) {
4106 intel_cpu_transcoder_set_m_n(intel_crtc, 4231 intel_cpu_transcoder_set_m_n(intel_crtc,
4107 &intel_crtc->config.fdi_m_n); 4232 &intel_crtc->config.fdi_m_n, NULL);
4108 } 4233 }
4109 4234
4110 haswell_set_pipeconf(crtc); 4235 haswell_set_pipeconf(crtc);
4111 4236
4112 intel_set_pipe_csc(crtc); 4237 intel_set_pipe_csc(crtc);
4113 4238
4114 /* Set up the display plane register */
4115 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
4116 POSTING_READ(DSPCNTR(plane));
4117
4118 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4119 crtc->x, crtc->y);
4120
4121 intel_crtc->active = true; 4239 intel_crtc->active = true;
4122 4240
4123 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4241 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -4198,7 +4316,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4198 if (intel_crtc->config.has_pch_encoder) 4316 if (intel_crtc->config.has_pch_encoder)
4199 intel_set_pch_fifo_underrun_reporting(dev, pipe, false); 4317 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
4200 4318
4201 intel_disable_pipe(dev_priv, pipe); 4319 intel_disable_pipe(intel_crtc);
4320
4202 ironlake_pfit_disable(intel_crtc); 4321 ironlake_pfit_disable(intel_crtc);
4203 4322
4204 for_each_encoder_on_crtc(dev, crtc, encoder) 4323 for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4246,7 +4365,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4246 struct drm_i915_private *dev_priv = dev->dev_private; 4365 struct drm_i915_private *dev_priv = dev->dev_private;
4247 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4366 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4248 struct intel_encoder *encoder; 4367 struct intel_encoder *encoder;
4249 int pipe = intel_crtc->pipe;
4250 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 4368 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4251 4369
4252 if (!intel_crtc->active) 4370 if (!intel_crtc->active)
@@ -4261,7 +4379,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4261 4379
4262 if (intel_crtc->config.has_pch_encoder) 4380 if (intel_crtc->config.has_pch_encoder)
4263 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); 4381 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
4264 intel_disable_pipe(dev_priv, pipe); 4382 intel_disable_pipe(intel_crtc);
4265 4383
4266 if (intel_crtc->config.dp_encoder_is_mst) 4384 if (intel_crtc->config.dp_encoder_is_mst)
4267 intel_ddi_set_vc_payload_alloc(crtc, false); 4385 intel_ddi_set_vc_payload_alloc(crtc, false);
@@ -4539,12 +4657,57 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4539 vlv_update_cdclk(dev); 4657 vlv_update_cdclk(dev);
4540} 4658}
4541 4659
4660static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
4661{
4662 struct drm_i915_private *dev_priv = dev->dev_private;
4663 u32 val, cmd;
4664
4665 WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4666
4667 switch (cdclk) {
4668 case 400000:
4669 cmd = 3;
4670 break;
4671 case 333333:
4672 case 320000:
4673 cmd = 2;
4674 break;
4675 case 266667:
4676 cmd = 1;
4677 break;
4678 case 200000:
4679 cmd = 0;
4680 break;
4681 default:
4682 WARN_ON(1);
4683 return;
4684 }
4685
4686 mutex_lock(&dev_priv->rps.hw_lock);
4687 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4688 val &= ~DSPFREQGUAR_MASK_CHV;
4689 val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
4690 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4691 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4692 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
4693 50)) {
4694 DRM_ERROR("timed out waiting for CDclk change\n");
4695 }
4696 mutex_unlock(&dev_priv->rps.hw_lock);
4697
4698 vlv_update_cdclk(dev);
4699}
4700
4542static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 4701static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4543 int max_pixclk) 4702 int max_pixclk)
4544{ 4703{
4545 int vco = valleyview_get_vco(dev_priv); 4704 int vco = valleyview_get_vco(dev_priv);
4546 int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000; 4705 int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
4547 4706
4707 /* FIXME: Punit isn't quite ready yet */
4708 if (IS_CHERRYVIEW(dev_priv->dev))
4709 return 400000;
4710
4548 /* 4711 /*
4549 * Really only a few cases to deal with, as only 4 CDclks are supported: 4712 * Really only a few cases to deal with, as only 4 CDclks are supported:
4550 * 200MHz 4713 * 200MHz
@@ -4607,21 +4770,23 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
4607 int max_pixclk = intel_mode_max_pixclk(dev_priv); 4770 int max_pixclk = intel_mode_max_pixclk(dev_priv);
4608 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 4771 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4609 4772
4610 if (req_cdclk != dev_priv->vlv_cdclk_freq) 4773 if (req_cdclk != dev_priv->vlv_cdclk_freq) {
4611 valleyview_set_cdclk(dev, req_cdclk); 4774 if (IS_CHERRYVIEW(dev))
4775 cherryview_set_cdclk(dev, req_cdclk);
4776 else
4777 valleyview_set_cdclk(dev, req_cdclk);
4778 }
4779
4612 modeset_update_crtc_power_domains(dev); 4780 modeset_update_crtc_power_domains(dev);
4613} 4781}
4614 4782
4615static void valleyview_crtc_enable(struct drm_crtc *crtc) 4783static void valleyview_crtc_enable(struct drm_crtc *crtc)
4616{ 4784{
4617 struct drm_device *dev = crtc->dev; 4785 struct drm_device *dev = crtc->dev;
4618 struct drm_i915_private *dev_priv = dev->dev_private;
4619 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4786 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4620 struct intel_encoder *encoder; 4787 struct intel_encoder *encoder;
4621 int pipe = intel_crtc->pipe; 4788 int pipe = intel_crtc->pipe;
4622 int plane = intel_crtc->plane;
4623 bool is_dsi; 4789 bool is_dsi;
4624 u32 dspcntr;
4625 4790
4626 WARN_ON(!crtc->enabled); 4791 WARN_ON(!crtc->enabled);
4627 4792
@@ -4630,33 +4795,20 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4630 4795
4631 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI); 4796 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4632 4797
4633 if (!is_dsi && !IS_CHERRYVIEW(dev)) 4798 if (!is_dsi) {
4634 vlv_prepare_pll(intel_crtc); 4799 if (IS_CHERRYVIEW(dev))
4635 4800 chv_prepare_pll(intel_crtc);
4636 /* Set up the display plane register */ 4801 else
4637 dspcntr = DISPPLANE_GAMMA_ENABLE; 4802 vlv_prepare_pll(intel_crtc);
4803 }
4638 4804
4639 if (intel_crtc->config.has_dp_encoder) 4805 if (intel_crtc->config.has_dp_encoder)
4640 intel_dp_set_m_n(intel_crtc); 4806 intel_dp_set_m_n(intel_crtc);
4641 4807
4642 intel_set_pipe_timings(intel_crtc); 4808 intel_set_pipe_timings(intel_crtc);
4643 4809
4644 /* pipesrc and dspsize control the size that is scaled from,
4645 * which should always be the user's requested size.
4646 */
4647 I915_WRITE(DSPSIZE(plane),
4648 ((intel_crtc->config.pipe_src_h - 1) << 16) |
4649 (intel_crtc->config.pipe_src_w - 1));
4650 I915_WRITE(DSPPOS(plane), 0);
4651
4652 i9xx_set_pipeconf(intel_crtc); 4810 i9xx_set_pipeconf(intel_crtc);
4653 4811
4654 I915_WRITE(DSPCNTR(plane), dspcntr);
4655 POSTING_READ(DSPCNTR(plane));
4656
4657 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4658 crtc->x, crtc->y);
4659
4660 intel_crtc->active = true; 4812 intel_crtc->active = true;
4661 4813
4662 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4814 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -4704,12 +4856,9 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
4704static void i9xx_crtc_enable(struct drm_crtc *crtc) 4856static void i9xx_crtc_enable(struct drm_crtc *crtc)
4705{ 4857{
4706 struct drm_device *dev = crtc->dev; 4858 struct drm_device *dev = crtc->dev;
4707 struct drm_i915_private *dev_priv = dev->dev_private;
4708 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4859 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4709 struct intel_encoder *encoder; 4860 struct intel_encoder *encoder;
4710 int pipe = intel_crtc->pipe; 4861 int pipe = intel_crtc->pipe;
4711 int plane = intel_crtc->plane;
4712 u32 dspcntr;
4713 4862
4714 WARN_ON(!crtc->enabled); 4863 WARN_ON(!crtc->enabled);
4715 4864
@@ -4718,35 +4867,13 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
4718 4867
4719 i9xx_set_pll_dividers(intel_crtc); 4868 i9xx_set_pll_dividers(intel_crtc);
4720 4869
4721 /* Set up the display plane register */
4722 dspcntr = DISPPLANE_GAMMA_ENABLE;
4723
4724 if (pipe == 0)
4725 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4726 else
4727 dspcntr |= DISPPLANE_SEL_PIPE_B;
4728
4729 if (intel_crtc->config.has_dp_encoder) 4870 if (intel_crtc->config.has_dp_encoder)
4730 intel_dp_set_m_n(intel_crtc); 4871 intel_dp_set_m_n(intel_crtc);
4731 4872
4732 intel_set_pipe_timings(intel_crtc); 4873 intel_set_pipe_timings(intel_crtc);
4733 4874
4734 /* pipesrc and dspsize control the size that is scaled from,
4735 * which should always be the user's requested size.
4736 */
4737 I915_WRITE(DSPSIZE(plane),
4738 ((intel_crtc->config.pipe_src_h - 1) << 16) |
4739 (intel_crtc->config.pipe_src_w - 1));
4740 I915_WRITE(DSPPOS(plane), 0);
4741
4742 i9xx_set_pipeconf(intel_crtc); 4875 i9xx_set_pipeconf(intel_crtc);
4743 4876
4744 I915_WRITE(DSPCNTR(plane), dspcntr);
4745 POSTING_READ(DSPCNTR(plane));
4746
4747 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4748 crtc->x, crtc->y);
4749
4750 intel_crtc->active = true; 4877 intel_crtc->active = true;
4751 4878
4752 if (!IS_GEN2(dev)) 4879 if (!IS_GEN2(dev))
@@ -4842,7 +4969,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4842 */ 4969 */
4843 intel_wait_for_vblank(dev, pipe); 4970 intel_wait_for_vblank(dev, pipe);
4844 4971
4845 intel_disable_pipe(dev_priv, pipe); 4972 intel_disable_pipe(intel_crtc);
4846 4973
4847 i9xx_pfit_disable(intel_crtc); 4974 i9xx_pfit_disable(intel_crtc);
4848 4975
@@ -4856,7 +4983,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4856 else if (IS_VALLEYVIEW(dev)) 4983 else if (IS_VALLEYVIEW(dev))
4857 vlv_disable_pll(dev_priv, pipe); 4984 vlv_disable_pll(dev_priv, pipe);
4858 else 4985 else
4859 i9xx_disable_pll(dev_priv, pipe); 4986 i9xx_disable_pll(intel_crtc);
4860 } 4987 }
4861 4988
4862 if (!IS_GEN2(dev)) 4989 if (!IS_GEN2(dev))
@@ -5275,6 +5402,10 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
5275 u32 val; 5402 u32 val;
5276 int divider; 5403 int divider;
5277 5404
5405 /* FIXME: Punit isn't quite ready yet */
5406 if (IS_CHERRYVIEW(dev))
5407 return 400000;
5408
5278 mutex_lock(&dev_priv->dpio_lock); 5409 mutex_lock(&dev_priv->dpio_lock);
5279 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5410 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5280 mutex_unlock(&dev_priv->dpio_lock); 5411 mutex_unlock(&dev_priv->dpio_lock);
@@ -5519,7 +5650,8 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5519} 5650}
5520 5651
5521static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 5652static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5522 struct intel_link_m_n *m_n) 5653 struct intel_link_m_n *m_n,
5654 struct intel_link_m_n *m2_n2)
5523{ 5655{
5524 struct drm_device *dev = crtc->base.dev; 5656 struct drm_device *dev = crtc->base.dev;
5525 struct drm_i915_private *dev_priv = dev->dev_private; 5657 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5531,6 +5663,18 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5531 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 5663 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5532 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 5664 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5533 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 5665 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5666 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
5667 * for gen < 8) and if DRRS is supported (to make sure the
5668 * registers are not unnecessarily accessed).
5669 */
5670 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
5671 crtc->config.has_drrs) {
5672 I915_WRITE(PIPE_DATA_M2(transcoder),
5673 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
5674 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
5675 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
5676 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
5677 }
5534 } else { 5678 } else {
5535 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 5679 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5536 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 5680 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
@@ -5539,12 +5683,13 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5539 } 5683 }
5540} 5684}
5541 5685
5542static void intel_dp_set_m_n(struct intel_crtc *crtc) 5686void intel_dp_set_m_n(struct intel_crtc *crtc)
5543{ 5687{
5544 if (crtc->config.has_pch_encoder) 5688 if (crtc->config.has_pch_encoder)
5545 intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); 5689 intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5546 else 5690 else
5547 intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); 5691 intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n,
5692 &crtc->config.dp_m2_n2);
5548} 5693}
5549 5694
5550static void vlv_update_pll(struct intel_crtc *crtc) 5695static void vlv_update_pll(struct intel_crtc *crtc)
@@ -5662,6 +5807,18 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
5662 5807
5663static void chv_update_pll(struct intel_crtc *crtc) 5808static void chv_update_pll(struct intel_crtc *crtc)
5664{ 5809{
5810 crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
5811 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5812 DPLL_VCO_ENABLE;
5813 if (crtc->pipe != PIPE_A)
5814 crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5815
5816 crtc->config.dpll_hw_state.dpll_md =
5817 (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5818}
5819
5820static void chv_prepare_pll(struct intel_crtc *crtc)
5821{
5665 struct drm_device *dev = crtc->base.dev; 5822 struct drm_device *dev = crtc->base.dev;
5666 struct drm_i915_private *dev_priv = dev->dev_private; 5823 struct drm_i915_private *dev_priv = dev->dev_private;
5667 int pipe = crtc->pipe; 5824 int pipe = crtc->pipe;
@@ -5671,15 +5828,6 @@ static void chv_update_pll(struct intel_crtc *crtc)
5671 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 5828 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
5672 int refclk; 5829 int refclk;
5673 5830
5674 crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
5675 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5676 DPLL_VCO_ENABLE;
5677 if (pipe != PIPE_A)
5678 crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5679
5680 crtc->config.dpll_hw_state.dpll_md =
5681 (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5682
5683 bestn = crtc->config.dpll.n; 5831 bestn = crtc->config.dpll.n;
5684 bestm2_frac = crtc->config.dpll.m2 & 0x3fffff; 5832 bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
5685 bestm1 = crtc->config.dpll.m1; 5833 bestm1 = crtc->config.dpll.m1;
@@ -5839,7 +5987,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
5839 dpll |= PLL_P2_DIVIDE_BY_4; 5987 dpll |= PLL_P2_DIVIDE_BY_4;
5840 } 5988 }
5841 5989
5842 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO)) 5990 if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
5843 dpll |= DPLL_DVO_2X_MODE; 5991 dpll |= DPLL_DVO_2X_MODE;
5844 5992
5845 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5993 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
@@ -5990,9 +6138,9 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5990 6138
5991 pipeconf = 0; 6139 pipeconf = 0;
5992 6140
5993 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 6141 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
5994 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE) 6142 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
5995 pipeconf |= PIPECONF_ENABLE; 6143 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
5996 6144
5997 if (intel_crtc->config.double_wide) 6145 if (intel_crtc->config.double_wide)
5998 pipeconf |= PIPECONF_DOUBLE_WIDE; 6146 pipeconf |= PIPECONF_DOUBLE_WIDE;
@@ -6235,7 +6383,7 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
6235 crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; 6383 crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
6236 6384
6237 val = I915_READ(DSPSTRIDE(pipe)); 6385 val = I915_READ(DSPSTRIDE(pipe));
6238 crtc->base.primary->fb->pitches[0] = val & 0xffffff80; 6386 crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
6239 6387
6240 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, 6388 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6241 plane_config->tiled); 6389 plane_config->tiled);
@@ -6345,6 +6493,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6345 } 6493 }
6346 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 6494 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6347 if (!IS_VALLEYVIEW(dev)) { 6495 if (!IS_VALLEYVIEW(dev)) {
6496 /*
6497 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
6498 * on 830. Filter it out here so that we don't
6499 * report errors due to that.
6500 */
6501 if (IS_I830(dev))
6502 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
6503
6348 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 6504 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
6349 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 6505 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
6350 } else { 6506 } else {
@@ -6367,7 +6523,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6367static void ironlake_init_pch_refclk(struct drm_device *dev) 6523static void ironlake_init_pch_refclk(struct drm_device *dev)
6368{ 6524{
6369 struct drm_i915_private *dev_priv = dev->dev_private; 6525 struct drm_i915_private *dev_priv = dev->dev_private;
6370 struct drm_mode_config *mode_config = &dev->mode_config;
6371 struct intel_encoder *encoder; 6526 struct intel_encoder *encoder;
6372 u32 val, final; 6527 u32 val, final;
6373 bool has_lvds = false; 6528 bool has_lvds = false;
@@ -6377,8 +6532,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
6377 bool can_ssc = false; 6532 bool can_ssc = false;
6378 6533
6379 /* We need to take the global config into account */ 6534 /* We need to take the global config into account */
6380 list_for_each_entry(encoder, &mode_config->encoder_list, 6535 for_each_intel_encoder(dev, encoder) {
6381 base.head) {
6382 switch (encoder->type) { 6536 switch (encoder->type) {
6383 case INTEL_OUTPUT_LVDS: 6537 case INTEL_OUTPUT_LVDS:
6384 has_panel = true; 6538 has_panel = true;
@@ -6685,11 +6839,10 @@ static void lpt_disable_clkout_dp(struct drm_device *dev)
6685 6839
6686static void lpt_init_pch_refclk(struct drm_device *dev) 6840static void lpt_init_pch_refclk(struct drm_device *dev)
6687{ 6841{
6688 struct drm_mode_config *mode_config = &dev->mode_config;
6689 struct intel_encoder *encoder; 6842 struct intel_encoder *encoder;
6690 bool has_vga = false; 6843 bool has_vga = false;
6691 6844
6692 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 6845 for_each_intel_encoder(dev, encoder) {
6693 switch (encoder->type) { 6846 switch (encoder->type) {
6694 case INTEL_OUTPUT_ANALOG: 6847 case INTEL_OUTPUT_ANALOG:
6695 has_vga = true; 6848 has_vga = true;
@@ -7145,7 +7298,8 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7145 7298
7146static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 7299static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7147 enum transcoder transcoder, 7300 enum transcoder transcoder,
7148 struct intel_link_m_n *m_n) 7301 struct intel_link_m_n *m_n,
7302 struct intel_link_m_n *m2_n2)
7149{ 7303{
7150 struct drm_device *dev = crtc->base.dev; 7304 struct drm_device *dev = crtc->base.dev;
7151 struct drm_i915_private *dev_priv = dev->dev_private; 7305 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7159,6 +7313,20 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7159 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 7313 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
7160 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 7314 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
7161 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7315 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7316 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
7317 * gen < 8) and if DRRS is supported (to make sure the
7318 * registers are not unnecessarily read).
7319 */
7320 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
7321 crtc->config.has_drrs) {
7322 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
7323 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
7324 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
7325 & ~TU_SIZE_MASK;
7326 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
7327 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
7328 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7329 }
7162 } else { 7330 } else {
7163 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 7331 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
7164 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 7332 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
@@ -7177,14 +7345,15 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
7177 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 7345 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7178 else 7346 else
7179 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 7347 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7180 &pipe_config->dp_m_n); 7348 &pipe_config->dp_m_n,
7349 &pipe_config->dp_m2_n2);
7181} 7350}
7182 7351
7183static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 7352static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7184 struct intel_crtc_config *pipe_config) 7353 struct intel_crtc_config *pipe_config)
7185{ 7354{
7186 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 7355 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7187 &pipe_config->fdi_m_n); 7356 &pipe_config->fdi_m_n, NULL);
7188} 7357}
7189 7358
7190static void ironlake_get_pfit_config(struct intel_crtc *crtc, 7359static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -7255,7 +7424,7 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc,
7255 crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; 7424 crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
7256 7425
7257 val = I915_READ(DSPSTRIDE(pipe)); 7426 val = I915_READ(DSPSTRIDE(pipe));
7258 crtc->base.primary->fb->pitches[0] = val & 0xffffff80; 7427 crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
7259 7428
7260 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, 7429 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
7261 plane_config->tiled); 7430 plane_config->tiled);
@@ -7615,6 +7784,22 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
7615 return 0; 7784 return 0;
7616} 7785}
7617 7786
7787static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
7788 enum port port,
7789 struct intel_crtc_config *pipe_config)
7790{
7791 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
7792
7793 switch (pipe_config->ddi_pll_sel) {
7794 case PORT_CLK_SEL_WRPLL1:
7795 pipe_config->shared_dpll = DPLL_ID_WRPLL1;
7796 break;
7797 case PORT_CLK_SEL_WRPLL2:
7798 pipe_config->shared_dpll = DPLL_ID_WRPLL2;
7799 break;
7800 }
7801}
7802
7618static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 7803static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7619 struct intel_crtc_config *pipe_config) 7804 struct intel_crtc_config *pipe_config)
7620{ 7805{
@@ -7628,16 +7813,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7628 7813
7629 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 7814 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
7630 7815
7631 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 7816 haswell_get_ddi_pll(dev_priv, port, pipe_config);
7632
7633 switch (pipe_config->ddi_pll_sel) {
7634 case PORT_CLK_SEL_WRPLL1:
7635 pipe_config->shared_dpll = DPLL_ID_WRPLL1;
7636 break;
7637 case PORT_CLK_SEL_WRPLL2:
7638 pipe_config->shared_dpll = DPLL_ID_WRPLL2;
7639 break;
7640 }
7641 7817
7642 if (pipe_config->shared_dpll >= 0) { 7818 if (pipe_config->shared_dpll >= 0) {
7643 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 7819 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
@@ -7719,7 +7895,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7719 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 7895 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7720 (I915_READ(IPS_CTL) & IPS_ENABLE); 7896 (I915_READ(IPS_CTL) & IPS_ENABLE);
7721 7897
7722 pipe_config->pixel_multiplier = 1; 7898 if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
7899 pipe_config->pixel_multiplier =
7900 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
7901 } else {
7902 pipe_config->pixel_multiplier = 1;
7903 }
7723 7904
7724 return true; 7905 return true;
7725} 7906}
@@ -8037,74 +8218,62 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
8037 struct drm_device *dev = crtc->dev; 8218 struct drm_device *dev = crtc->dev;
8038 struct drm_i915_private *dev_priv = dev->dev_private; 8219 struct drm_i915_private *dev_priv = dev->dev_private;
8039 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8040 uint32_t cntl; 8221 uint32_t cntl = 0, size = 0;
8041 8222
8042 if (base != intel_crtc->cursor_base) { 8223 if (base) {
8043 /* On these chipsets we can only modify the base whilst 8224 unsigned int width = intel_crtc->cursor_width;
8044 * the cursor is disabled. 8225 unsigned int height = intel_crtc->cursor_height;
8045 */ 8226 unsigned int stride = roundup_pow_of_two(width) * 4;
8046 if (intel_crtc->cursor_cntl) { 8227
8047 I915_WRITE(_CURACNTR, 0); 8228 switch (stride) {
8048 POSTING_READ(_CURACNTR); 8229 default:
8049 intel_crtc->cursor_cntl = 0; 8230 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
8231 width, stride);
8232 stride = 256;
8233 /* fallthrough */
8234 case 256:
8235 case 512:
8236 case 1024:
8237 case 2048:
8238 break;
8050 } 8239 }
8051 8240
8052 I915_WRITE(_CURABASE, base); 8241 cntl |= CURSOR_ENABLE |
8053 POSTING_READ(_CURABASE); 8242 CURSOR_GAMMA_ENABLE |
8243 CURSOR_FORMAT_ARGB |
8244 CURSOR_STRIDE(stride);
8245
8246 size = (height << 12) | width;
8054 } 8247 }
8055 8248
8056 /* XXX width must be 64, stride 256 => 0x00 << 28 */ 8249 if (intel_crtc->cursor_cntl != 0 &&
8057 cntl = 0; 8250 (intel_crtc->cursor_base != base ||
8058 if (base) 8251 intel_crtc->cursor_size != size ||
8059 cntl = (CURSOR_ENABLE | 8252 intel_crtc->cursor_cntl != cntl)) {
8060 CURSOR_GAMMA_ENABLE | 8253 /* On these chipsets we can only modify the base/size/stride
8061 CURSOR_FORMAT_ARGB); 8254 * whilst the cursor is disabled.
8062 if (intel_crtc->cursor_cntl != cntl) { 8255 */
8063 I915_WRITE(_CURACNTR, cntl); 8256 I915_WRITE(_CURACNTR, 0);
8064 POSTING_READ(_CURACNTR); 8257 POSTING_READ(_CURACNTR);
8065 intel_crtc->cursor_cntl = cntl; 8258 intel_crtc->cursor_cntl = 0;
8066 } 8259 }
8067}
8068 8260
8069static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 8261 if (intel_crtc->cursor_base != base)
8070{ 8262 I915_WRITE(_CURABASE, base);
8071 struct drm_device *dev = crtc->dev;
8072 struct drm_i915_private *dev_priv = dev->dev_private;
8073 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8074 int pipe = intel_crtc->pipe;
8075 uint32_t cntl;
8076 8263
8077 cntl = 0; 8264 if (intel_crtc->cursor_size != size) {
8078 if (base) { 8265 I915_WRITE(CURSIZE, size);
8079 cntl = MCURSOR_GAMMA_ENABLE; 8266 intel_crtc->cursor_size = size;
8080 switch (intel_crtc->cursor_width) {
8081 case 64:
8082 cntl |= CURSOR_MODE_64_ARGB_AX;
8083 break;
8084 case 128:
8085 cntl |= CURSOR_MODE_128_ARGB_AX;
8086 break;
8087 case 256:
8088 cntl |= CURSOR_MODE_256_ARGB_AX;
8089 break;
8090 default:
8091 WARN_ON(1);
8092 return;
8093 }
8094 cntl |= pipe << 28; /* Connect to correct pipe */
8095 } 8267 }
8268
8096 if (intel_crtc->cursor_cntl != cntl) { 8269 if (intel_crtc->cursor_cntl != cntl) {
8097 I915_WRITE(CURCNTR(pipe), cntl); 8270 I915_WRITE(_CURACNTR, cntl);
8098 POSTING_READ(CURCNTR(pipe)); 8271 POSTING_READ(_CURACNTR);
8099 intel_crtc->cursor_cntl = cntl; 8272 intel_crtc->cursor_cntl = cntl;
8100 } 8273 }
8101
8102 /* and commit changes on next vblank */
8103 I915_WRITE(CURBASE(pipe), base);
8104 POSTING_READ(CURBASE(pipe));
8105} 8274}
8106 8275
8107static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) 8276static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8108{ 8277{
8109 struct drm_device *dev = crtc->dev; 8278 struct drm_device *dev = crtc->dev;
8110 struct drm_i915_private *dev_priv = dev->dev_private; 8279 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8129,6 +8298,7 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
8129 WARN_ON(1); 8298 WARN_ON(1);
8130 return; 8299 return;
8131 } 8300 }
8301 cntl |= pipe << 28; /* Connect to correct pipe */
8132 } 8302 }
8133 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 8303 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
8134 cntl |= CURSOR_PIPE_CSC_ENABLE; 8304 cntl |= CURSOR_PIPE_CSC_ENABLE;
@@ -8188,15 +8358,50 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
8188 8358
8189 I915_WRITE(CURPOS(pipe), pos); 8359 I915_WRITE(CURPOS(pipe), pos);
8190 8360
8191 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) 8361 if (IS_845G(dev) || IS_I865G(dev))
8192 ivb_update_cursor(crtc, base);
8193 else if (IS_845G(dev) || IS_I865G(dev))
8194 i845_update_cursor(crtc, base); 8362 i845_update_cursor(crtc, base);
8195 else 8363 else
8196 i9xx_update_cursor(crtc, base); 8364 i9xx_update_cursor(crtc, base);
8197 intel_crtc->cursor_base = base; 8365 intel_crtc->cursor_base = base;
8198} 8366}
8199 8367
8368static bool cursor_size_ok(struct drm_device *dev,
8369 uint32_t width, uint32_t height)
8370{
8371 if (width == 0 || height == 0)
8372 return false;
8373
8374 /*
8375 * 845g/865g are special in that they are only limited by
8376 * the width of their cursors, the height is arbitrary up to
8377 * the precision of the register. Everything else requires
8378 * square cursors, limited to a few power-of-two sizes.
8379 */
8380 if (IS_845G(dev) || IS_I865G(dev)) {
8381 if ((width & 63) != 0)
8382 return false;
8383
8384 if (width > (IS_845G(dev) ? 64 : 512))
8385 return false;
8386
8387 if (height > 1023)
8388 return false;
8389 } else {
8390 switch (width | height) {
8391 case 256:
8392 case 128:
8393 if (IS_GEN2(dev))
8394 return false;
8395 case 64:
8396 break;
8397 default:
8398 return false;
8399 }
8400 }
8401
8402 return true;
8403}
8404
8200/* 8405/*
8201 * intel_crtc_cursor_set_obj - Set cursor to specified GEM object 8406 * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
8202 * 8407 *
@@ -8212,7 +8417,7 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8212 struct drm_i915_private *dev_priv = dev->dev_private; 8417 struct drm_i915_private *dev_priv = dev->dev_private;
8213 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8418 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8214 enum pipe pipe = intel_crtc->pipe; 8419 enum pipe pipe = intel_crtc->pipe;
8215 unsigned old_width; 8420 unsigned old_width, stride;
8216 uint32_t addr; 8421 uint32_t addr;
8217 int ret; 8422 int ret;
8218 8423
@@ -8220,20 +8425,18 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8220 if (!obj) { 8425 if (!obj) {
8221 DRM_DEBUG_KMS("cursor off\n"); 8426 DRM_DEBUG_KMS("cursor off\n");
8222 addr = 0; 8427 addr = 0;
8223 obj = NULL;
8224 mutex_lock(&dev->struct_mutex); 8428 mutex_lock(&dev->struct_mutex);
8225 goto finish; 8429 goto finish;
8226 } 8430 }
8227 8431
8228 /* Check for which cursor types we support */ 8432 /* Check for which cursor types we support */
8229 if (!((width == 64 && height == 64) || 8433 if (!cursor_size_ok(dev, width, height)) {
8230 (width == 128 && height == 128 && !IS_GEN2(dev)) ||
8231 (width == 256 && height == 256 && !IS_GEN2(dev)))) {
8232 DRM_DEBUG("Cursor dimension not supported\n"); 8434 DRM_DEBUG("Cursor dimension not supported\n");
8233 return -EINVAL; 8435 return -EINVAL;
8234 } 8436 }
8235 8437
8236 if (obj->base.size < width * height * 4) { 8438 stride = roundup_pow_of_two(width) * 4;
8439 if (obj->base.size < stride * height) {
8237 DRM_DEBUG_KMS("buffer is too small\n"); 8440 DRM_DEBUG_KMS("buffer is too small\n");
8238 ret = -ENOMEM; 8441 ret = -ENOMEM;
8239 goto fail; 8442 goto fail;
@@ -8295,9 +8498,6 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8295 addr = obj->phys_handle->busaddr; 8498 addr = obj->phys_handle->busaddr;
8296 } 8499 }
8297 8500
8298 if (IS_GEN2(dev))
8299 I915_WRITE(CURSIZE, (height << 12) | width);
8300
8301 finish: 8501 finish:
8302 if (intel_crtc->cursor_bo) { 8502 if (intel_crtc->cursor_bo) {
8303 if (!INTEL_INFO(dev)->cursor_needs_physical) 8503 if (!INTEL_INFO(dev)->cursor_needs_physical)
@@ -8944,12 +9144,13 @@ static void intel_mark_fb_busy(struct drm_device *dev,
8944 unsigned frontbuffer_bits, 9144 unsigned frontbuffer_bits,
8945 struct intel_engine_cs *ring) 9145 struct intel_engine_cs *ring)
8946{ 9146{
9147 struct drm_i915_private *dev_priv = dev->dev_private;
8947 enum pipe pipe; 9148 enum pipe pipe;
8948 9149
8949 if (!i915.powersave) 9150 if (!i915.powersave)
8950 return; 9151 return;
8951 9152
8952 for_each_pipe(pipe) { 9153 for_each_pipe(dev_priv, pipe) {
8953 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe))) 9154 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
8954 continue; 9155 continue;
8955 9156
@@ -9019,6 +9220,14 @@ void intel_frontbuffer_flush(struct drm_device *dev,
9019 intel_mark_fb_busy(dev, frontbuffer_bits, NULL); 9220 intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
9020 9221
9021 intel_edp_psr_flush(dev, frontbuffer_bits); 9222 intel_edp_psr_flush(dev, frontbuffer_bits);
9223
9224 /*
9225 * FIXME: Unconditional fbc flushing here is a rather gross hack and
9226 * needs to be reworked into a proper frontbuffer tracking scheme like
9227 * psr employs.
9228 */
9229 if (IS_BROADWELL(dev))
9230 gen8_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
9022} 9231}
9023 9232
9024/** 9233/**
@@ -9151,7 +9360,6 @@ static void intel_unpin_work_fn(struct work_struct *__work)
9151static void do_intel_finish_page_flip(struct drm_device *dev, 9360static void do_intel_finish_page_flip(struct drm_device *dev,
9152 struct drm_crtc *crtc) 9361 struct drm_crtc *crtc)
9153{ 9362{
9154 struct drm_i915_private *dev_priv = dev->dev_private;
9155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9363 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9156 struct intel_unpin_work *work; 9364 struct intel_unpin_work *work;
9157 unsigned long flags; 9365 unsigned long flags;
@@ -9171,23 +9379,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
9171 return; 9379 return;
9172 } 9380 }
9173 9381
9174 /* and that the unpin work is consistent wrt ->pending. */ 9382 page_flip_completed(intel_crtc);
9175 smp_rmb();
9176
9177 intel_crtc->unpin_work = NULL;
9178
9179 if (work->event)
9180 drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
9181
9182 drm_crtc_vblank_put(crtc);
9183 9383
9184 spin_unlock_irqrestore(&dev->event_lock, flags); 9384 spin_unlock_irqrestore(&dev->event_lock, flags);
9185
9186 wake_up_all(&dev_priv->pending_flip_queue);
9187
9188 queue_work(dev_priv->wq, &work->work);
9189
9190 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
9191} 9385}
9192 9386
9193void intel_finish_page_flip(struct drm_device *dev, int pipe) 9387void intel_finish_page_flip(struct drm_device *dev, int pipe)
@@ -9532,6 +9726,8 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
9532 return false; 9726 return false;
9533 else if (i915.use_mmio_flip > 0) 9727 else if (i915.use_mmio_flip > 0)
9534 return true; 9728 return true;
9729 else if (i915.enable_execlists)
9730 return true;
9535 else 9731 else
9536 return ring != obj->ring; 9732 return ring != obj->ring;
9537} 9733}
@@ -9665,6 +9861,65 @@ static int intel_default_queue_flip(struct drm_device *dev,
9665 return -ENODEV; 9861 return -ENODEV;
9666} 9862}
9667 9863
9864static bool __intel_pageflip_stall_check(struct drm_device *dev,
9865 struct drm_crtc *crtc)
9866{
9867 struct drm_i915_private *dev_priv = dev->dev_private;
9868 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9869 struct intel_unpin_work *work = intel_crtc->unpin_work;
9870 u32 addr;
9871
9872 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
9873 return true;
9874
9875 if (!work->enable_stall_check)
9876 return false;
9877
9878 if (work->flip_ready_vblank == 0) {
9879 if (work->flip_queued_ring &&
9880 !i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
9881 work->flip_queued_seqno))
9882 return false;
9883
9884 work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe);
9885 }
9886
9887 if (drm_vblank_count(dev, intel_crtc->pipe) - work->flip_ready_vblank < 3)
9888 return false;
9889
9890 /* Potential stall - if we see that the flip has happened,
9891 * assume a missed interrupt. */
9892 if (INTEL_INFO(dev)->gen >= 4)
9893 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
9894 else
9895 addr = I915_READ(DSPADDR(intel_crtc->plane));
9896
9897 /* There is a potential issue here with a false positive after a flip
9898 * to the same address. We could address this by checking for a
9899 * non-incrementing frame counter.
9900 */
9901 return addr == work->gtt_offset;
9902}
9903
9904void intel_check_page_flip(struct drm_device *dev, int pipe)
9905{
9906 struct drm_i915_private *dev_priv = dev->dev_private;
9907 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9908 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9909 unsigned long flags;
9910
9911 if (crtc == NULL)
9912 return;
9913
9914 spin_lock_irqsave(&dev->event_lock, flags);
9915 if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
9916 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
9917 intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
9918 page_flip_completed(intel_crtc);
9919 }
9920 spin_unlock_irqrestore(&dev->event_lock, flags);
9921}
9922
9668static int intel_crtc_page_flip(struct drm_crtc *crtc, 9923static int intel_crtc_page_flip(struct drm_crtc *crtc,
9669 struct drm_framebuffer *fb, 9924 struct drm_framebuffer *fb,
9670 struct drm_pending_vblank_event *event, 9925 struct drm_pending_vblank_event *event,
@@ -9721,12 +9976,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9721 /* We borrow the event spin lock for protecting unpin_work */ 9976 /* We borrow the event spin lock for protecting unpin_work */
9722 spin_lock_irqsave(&dev->event_lock, flags); 9977 spin_lock_irqsave(&dev->event_lock, flags);
9723 if (intel_crtc->unpin_work) { 9978 if (intel_crtc->unpin_work) {
9724 spin_unlock_irqrestore(&dev->event_lock, flags); 9979 /* Before declaring the flip queue wedged, check if
9725 kfree(work); 9980 * the hardware completed the operation behind our backs.
9726 drm_crtc_vblank_put(crtc); 9981 */
9982 if (__intel_pageflip_stall_check(dev, crtc)) {
9983 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
9984 page_flip_completed(intel_crtc);
9985 } else {
9986 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
9987 spin_unlock_irqrestore(&dev->event_lock, flags);
9727 9988
9728 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 9989 drm_crtc_vblank_put(crtc);
9729 return -EBUSY; 9990 kfree(work);
9991 return -EBUSY;
9992 }
9730 } 9993 }
9731 intel_crtc->unpin_work = work; 9994 intel_crtc->unpin_work = work;
9732 spin_unlock_irqrestore(&dev->event_lock, flags); 9995 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -9746,8 +10009,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9746 10009
9747 work->pending_flip_obj = obj; 10010 work->pending_flip_obj = obj;
9748 10011
9749 work->enable_stall_check = true;
9750
9751 atomic_inc(&intel_crtc->unpin_work_count); 10012 atomic_inc(&intel_crtc->unpin_work_count);
9752 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 10013 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
9753 10014
@@ -9776,14 +10037,26 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9776 work->gtt_offset = 10037 work->gtt_offset =
9777 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset; 10038 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
9778 10039
9779 if (use_mmio_flip(ring, obj)) 10040 if (use_mmio_flip(ring, obj)) {
9780 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, 10041 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
9781 page_flip_flags); 10042 page_flip_flags);
9782 else 10043 if (ret)
10044 goto cleanup_unpin;
10045
10046 work->flip_queued_seqno = obj->last_write_seqno;
10047 work->flip_queued_ring = obj->ring;
10048 } else {
9783 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, 10049 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
9784 page_flip_flags); 10050 page_flip_flags);
9785 if (ret) 10051 if (ret)
9786 goto cleanup_unpin; 10052 goto cleanup_unpin;
10053
10054 work->flip_queued_seqno = intel_ring_get_seqno(ring);
10055 work->flip_queued_ring = ring;
10056 }
10057
10058 work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
10059 work->enable_stall_check = true;
9787 10060
9788 i915_gem_track_fb(work->old_fb_obj, obj, 10061 i915_gem_track_fb(work->old_fb_obj, obj,
9789 INTEL_FRONTBUFFER_PRIMARY(pipe)); 10062 INTEL_FRONTBUFFER_PRIMARY(pipe));
@@ -9818,8 +10091,11 @@ free_work:
9818out_hang: 10091out_hang:
9819 intel_crtc_wait_for_pending_flips(crtc); 10092 intel_crtc_wait_for_pending_flips(crtc);
9820 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); 10093 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
9821 if (ret == 0 && event) 10094 if (ret == 0 && event) {
10095 spin_lock_irqsave(&dev->event_lock, flags);
9822 drm_send_vblank_event(dev, pipe, event); 10096 drm_send_vblank_event(dev, pipe, event);
10097 spin_unlock_irqrestore(&dev->event_lock, flags);
10098 }
9823 } 10099 }
9824 return ret; 10100 return ret;
9825} 10101}
@@ -9847,8 +10123,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9847 to_intel_encoder(connector->base.encoder); 10123 to_intel_encoder(connector->base.encoder);
9848 } 10124 }
9849 10125
9850 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10126 for_each_intel_encoder(dev, encoder) {
9851 base.head) {
9852 encoder->new_crtc = 10127 encoder->new_crtc =
9853 to_intel_crtc(encoder->base.crtc); 10128 to_intel_crtc(encoder->base.crtc);
9854 } 10129 }
@@ -9879,8 +10154,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
9879 connector->base.encoder = &connector->new_encoder->base; 10154 connector->base.encoder = &connector->new_encoder->base;
9880 } 10155 }
9881 10156
9882 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10157 for_each_intel_encoder(dev, encoder) {
9883 base.head) {
9884 encoder->base.crtc = &encoder->new_crtc->base; 10158 encoder->base.crtc = &encoder->new_crtc->base;
9885 } 10159 }
9886 10160
@@ -10007,6 +10281,15 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
10007 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 10281 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
10008 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 10282 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
10009 pipe_config->dp_m_n.tu); 10283 pipe_config->dp_m_n.tu);
10284
10285 DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
10286 pipe_config->has_dp_encoder,
10287 pipe_config->dp_m2_n2.gmch_m,
10288 pipe_config->dp_m2_n2.gmch_n,
10289 pipe_config->dp_m2_n2.link_m,
10290 pipe_config->dp_m2_n2.link_n,
10291 pipe_config->dp_m2_n2.tu);
10292
10010 DRM_DEBUG_KMS("requested mode:\n"); 10293 DRM_DEBUG_KMS("requested mode:\n");
10011 drm_mode_debug_printmodeline(&pipe_config->requested_mode); 10294 drm_mode_debug_printmodeline(&pipe_config->requested_mode);
10012 DRM_DEBUG_KMS("adjusted mode:\n"); 10295 DRM_DEBUG_KMS("adjusted mode:\n");
@@ -10041,8 +10324,7 @@ static bool check_single_encoder_cloning(struct intel_crtc *crtc,
10041 struct drm_device *dev = crtc->base.dev; 10324 struct drm_device *dev = crtc->base.dev;
10042 struct intel_encoder *source_encoder; 10325 struct intel_encoder *source_encoder;
10043 10326
10044 list_for_each_entry(source_encoder, 10327 for_each_intel_encoder(dev, source_encoder) {
10045 &dev->mode_config.encoder_list, base.head) {
10046 if (source_encoder->new_crtc != crtc) 10328 if (source_encoder->new_crtc != crtc)
10047 continue; 10329 continue;
10048 10330
@@ -10058,8 +10340,7 @@ static bool check_encoder_cloning(struct intel_crtc *crtc)
10058 struct drm_device *dev = crtc->base.dev; 10340 struct drm_device *dev = crtc->base.dev;
10059 struct intel_encoder *encoder; 10341 struct intel_encoder *encoder;
10060 10342
10061 list_for_each_entry(encoder, 10343 for_each_intel_encoder(dev, encoder) {
10062 &dev->mode_config.encoder_list, base.head) {
10063 if (encoder->new_crtc != crtc) 10344 if (encoder->new_crtc != crtc)
10064 continue; 10345 continue;
10065 10346
@@ -10143,8 +10424,7 @@ encoder_retry:
10143 * adjust it according to limitations or connector properties, and also 10424 * adjust it according to limitations or connector properties, and also
10144 * a chance to reject the mode entirely. 10425 * a chance to reject the mode entirely.
10145 */ 10426 */
10146 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10427 for_each_intel_encoder(dev, encoder) {
10147 base.head) {
10148 10428
10149 if (&encoder->new_crtc->base != crtc) 10429 if (&encoder->new_crtc->base != crtc)
10150 continue; 10430 continue;
@@ -10222,8 +10502,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
10222 1 << connector->new_encoder->new_crtc->pipe; 10502 1 << connector->new_encoder->new_crtc->pipe;
10223 } 10503 }
10224 10504
10225 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10505 for_each_intel_encoder(dev, encoder) {
10226 base.head) {
10227 if (encoder->base.crtc == &encoder->new_crtc->base) 10506 if (encoder->base.crtc == &encoder->new_crtc->base)
10228 continue; 10507 continue;
10229 10508
@@ -10297,8 +10576,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10297 struct intel_crtc *intel_crtc; 10576 struct intel_crtc *intel_crtc;
10298 struct drm_connector *connector; 10577 struct drm_connector *connector;
10299 10578
10300 list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list, 10579 for_each_intel_encoder(dev, intel_encoder) {
10301 base.head) {
10302 if (!intel_encoder->base.crtc) 10580 if (!intel_encoder->base.crtc)
10303 continue; 10581 continue;
10304 10582
@@ -10387,6 +10665,22 @@ intel_pipe_config_compare(struct drm_device *dev,
10387 return false; \ 10665 return false; \
10388 } 10666 }
10389 10667
10668/* This is required for BDW+ where there is only one set of registers for
10669 * switching between high and low RR.
10670 * This macro can be used whenever a comparison has to be made between one
10671 * hw state and multiple sw state variables.
10672 */
10673#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
10674 if ((current_config->name != pipe_config->name) && \
10675 (current_config->alt_name != pipe_config->name)) { \
10676 DRM_ERROR("mismatch in " #name " " \
10677 "(expected %i or %i, found %i)\n", \
10678 current_config->name, \
10679 current_config->alt_name, \
10680 pipe_config->name); \
10681 return false; \
10682 }
10683
10390#define PIPE_CONF_CHECK_FLAGS(name, mask) \ 10684#define PIPE_CONF_CHECK_FLAGS(name, mask) \
10391 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 10685 if ((current_config->name ^ pipe_config->name) & (mask)) { \
10392 DRM_ERROR("mismatch in " #name "(" #mask ") " \ 10686 DRM_ERROR("mismatch in " #name "(" #mask ") " \
@@ -10419,11 +10713,28 @@ intel_pipe_config_compare(struct drm_device *dev,
10419 PIPE_CONF_CHECK_I(fdi_m_n.tu); 10713 PIPE_CONF_CHECK_I(fdi_m_n.tu);
10420 10714
10421 PIPE_CONF_CHECK_I(has_dp_encoder); 10715 PIPE_CONF_CHECK_I(has_dp_encoder);
10422 PIPE_CONF_CHECK_I(dp_m_n.gmch_m); 10716
10423 PIPE_CONF_CHECK_I(dp_m_n.gmch_n); 10717 if (INTEL_INFO(dev)->gen < 8) {
10424 PIPE_CONF_CHECK_I(dp_m_n.link_m); 10718 PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
10425 PIPE_CONF_CHECK_I(dp_m_n.link_n); 10719 PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
10426 PIPE_CONF_CHECK_I(dp_m_n.tu); 10720 PIPE_CONF_CHECK_I(dp_m_n.link_m);
10721 PIPE_CONF_CHECK_I(dp_m_n.link_n);
10722 PIPE_CONF_CHECK_I(dp_m_n.tu);
10723
10724 if (current_config->has_drrs) {
10725 PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
10726 PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
10727 PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
10728 PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
10729 PIPE_CONF_CHECK_I(dp_m2_n2.tu);
10730 }
10731 } else {
10732 PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
10733 PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
10734 PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
10735 PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
10736 PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
10737 }
10427 10738
10428 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay); 10739 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
10429 PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal); 10740 PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
@@ -10509,6 +10820,7 @@ intel_pipe_config_compare(struct drm_device *dev,
10509 10820
10510#undef PIPE_CONF_CHECK_X 10821#undef PIPE_CONF_CHECK_X
10511#undef PIPE_CONF_CHECK_I 10822#undef PIPE_CONF_CHECK_I
10823#undef PIPE_CONF_CHECK_I_ALT
10512#undef PIPE_CONF_CHECK_FLAGS 10824#undef PIPE_CONF_CHECK_FLAGS
10513#undef PIPE_CONF_CHECK_CLOCK_FUZZY 10825#undef PIPE_CONF_CHECK_CLOCK_FUZZY
10514#undef PIPE_CONF_QUIRK 10826#undef PIPE_CONF_QUIRK
@@ -10538,8 +10850,7 @@ check_encoder_state(struct drm_device *dev)
10538 struct intel_encoder *encoder; 10850 struct intel_encoder *encoder;
10539 struct intel_connector *connector; 10851 struct intel_connector *connector;
10540 10852
10541 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10853 for_each_intel_encoder(dev, encoder) {
10542 base.head) {
10543 bool enabled = false; 10854 bool enabled = false;
10544 bool active = false; 10855 bool active = false;
10545 enum pipe pipe, tracked_pipe; 10856 enum pipe pipe, tracked_pipe;
@@ -10618,8 +10929,7 @@ check_crtc_state(struct drm_device *dev)
10618 WARN(crtc->active && !crtc->base.enabled, 10929 WARN(crtc->active && !crtc->base.enabled,
10619 "active crtc, but not enabled in sw tracking\n"); 10930 "active crtc, but not enabled in sw tracking\n");
10620 10931
10621 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10932 for_each_intel_encoder(dev, encoder) {
10622 base.head) {
10623 if (encoder->base.crtc != &crtc->base) 10933 if (encoder->base.crtc != &crtc->base)
10624 continue; 10934 continue;
10625 enabled = true; 10935 enabled = true;
@@ -10637,12 +10947,12 @@ check_crtc_state(struct drm_device *dev)
10637 active = dev_priv->display.get_pipe_config(crtc, 10947 active = dev_priv->display.get_pipe_config(crtc,
10638 &pipe_config); 10948 &pipe_config);
10639 10949
10640 /* hw state is inconsistent with the pipe A quirk */ 10950 /* hw state is inconsistent with the pipe quirk */
10641 if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 10951 if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
10952 (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
10642 active = crtc->active; 10953 active = crtc->active;
10643 10954
10644 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10955 for_each_intel_encoder(dev, encoder) {
10645 base.head) {
10646 enum pipe pipe; 10956 enum pipe pipe;
10647 if (encoder->base.crtc != &crtc->base) 10957 if (encoder->base.crtc != &crtc->base)
10648 continue; 10958 continue;
@@ -11010,7 +11320,7 @@ static void intel_set_config_restore_state(struct drm_device *dev,
11010 } 11320 }
11011 11321
11012 count = 0; 11322 count = 0;
11013 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 11323 for_each_intel_encoder(dev, encoder) {
11014 encoder->new_crtc = 11324 encoder->new_crtc =
11015 to_intel_crtc(config->save_encoder_crtcs[count++]); 11325 to_intel_crtc(config->save_encoder_crtcs[count++]);
11016 } 11326 }
@@ -11169,8 +11479,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
11169 } 11479 }
11170 11480
11171 /* Check for any encoders that needs to be disabled. */ 11481 /* Check for any encoders that needs to be disabled. */
11172 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 11482 for_each_intel_encoder(dev, encoder) {
11173 base.head) {
11174 int num_connectors = 0; 11483 int num_connectors = 0;
11175 list_for_each_entry(connector, 11484 list_for_each_entry(connector,
11176 &dev->mode_config.connector_list, 11485 &dev->mode_config.connector_list,
@@ -11203,9 +11512,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
11203 for_each_intel_crtc(dev, crtc) { 11512 for_each_intel_crtc(dev, crtc) {
11204 crtc->new_enabled = false; 11513 crtc->new_enabled = false;
11205 11514
11206 list_for_each_entry(encoder, 11515 for_each_intel_encoder(dev, encoder) {
11207 &dev->mode_config.encoder_list,
11208 base.head) {
11209 if (encoder->new_crtc == crtc) { 11516 if (encoder->new_crtc == crtc) {
11210 crtc->new_enabled = true; 11517 crtc->new_enabled = true;
11211 break; 11518 break;
@@ -11242,7 +11549,7 @@ static void disable_crtc_nofb(struct intel_crtc *crtc)
11242 connector->new_encoder = NULL; 11549 connector->new_encoder = NULL;
11243 } 11550 }
11244 11551
11245 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 11552 for_each_intel_encoder(dev, encoder) {
11246 if (encoder->new_crtc == crtc) 11553 if (encoder->new_crtc == crtc)
11247 encoder->new_crtc = NULL; 11554 encoder->new_crtc = NULL;
11248 } 11555 }
@@ -11305,7 +11612,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
11305 ret = intel_set_mode(set->crtc, set->mode, 11612 ret = intel_set_mode(set->crtc, set->mode,
11306 set->x, set->y, set->fb); 11613 set->x, set->y, set->fb);
11307 } else if (config->fb_changed) { 11614 } else if (config->fb_changed) {
11308 struct drm_i915_private *dev_priv = dev->dev_private;
11309 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); 11615 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
11310 11616
11311 intel_crtc_wait_for_pending_flips(set->crtc); 11617 intel_crtc_wait_for_pending_flips(set->crtc);
@@ -11319,8 +11625,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
11319 */ 11625 */
11320 if (!intel_crtc->primary_enabled && ret == 0) { 11626 if (!intel_crtc->primary_enabled && ret == 0) {
11321 WARN_ON(!intel_crtc->active); 11627 WARN_ON(!intel_crtc->active);
11322 intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane, 11628 intel_enable_primary_hw_plane(set->crtc->primary, set->crtc);
11323 intel_crtc->pipe);
11324 } 11629 }
11325 11630
11326 /* 11631 /*
@@ -11473,8 +11778,6 @@ static int
11473intel_primary_plane_disable(struct drm_plane *plane) 11778intel_primary_plane_disable(struct drm_plane *plane)
11474{ 11779{
11475 struct drm_device *dev = plane->dev; 11780 struct drm_device *dev = plane->dev;
11476 struct drm_i915_private *dev_priv = dev->dev_private;
11477 struct intel_plane *intel_plane = to_intel_plane(plane);
11478 struct intel_crtc *intel_crtc; 11781 struct intel_crtc *intel_crtc;
11479 11782
11480 if (!plane->fb) 11783 if (!plane->fb)
@@ -11497,8 +11800,8 @@ intel_primary_plane_disable(struct drm_plane *plane)
11497 goto disable_unpin; 11800 goto disable_unpin;
11498 11801
11499 intel_crtc_wait_for_pending_flips(plane->crtc); 11802 intel_crtc_wait_for_pending_flips(plane->crtc);
11500 intel_disable_primary_hw_plane(dev_priv, intel_plane->plane, 11803 intel_disable_primary_hw_plane(plane, plane->crtc);
11501 intel_plane->pipe); 11804
11502disable_unpin: 11805disable_unpin:
11503 mutex_lock(&dev->struct_mutex); 11806 mutex_lock(&dev->struct_mutex);
11504 i915_gem_track_fb(intel_fb_obj(plane->fb), NULL, 11807 i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
@@ -11520,7 +11823,6 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11520 struct drm_device *dev = crtc->dev; 11823 struct drm_device *dev = crtc->dev;
11521 struct drm_i915_private *dev_priv = dev->dev_private; 11824 struct drm_i915_private *dev_priv = dev->dev_private;
11522 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11825 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11523 struct intel_plane *intel_plane = to_intel_plane(plane);
11524 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11826 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11525 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 11827 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11526 struct drm_rect dest = { 11828 struct drm_rect dest = {
@@ -11542,6 +11844,21 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11542 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, 11844 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11543 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, 11845 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11544 }; 11846 };
11847 const struct {
11848 int crtc_x, crtc_y;
11849 unsigned int crtc_w, crtc_h;
11850 uint32_t src_x, src_y, src_w, src_h;
11851 } orig = {
11852 .crtc_x = crtc_x,
11853 .crtc_y = crtc_y,
11854 .crtc_w = crtc_w,
11855 .crtc_h = crtc_h,
11856 .src_x = src_x,
11857 .src_y = src_y,
11858 .src_w = src_w,
11859 .src_h = src_h,
11860 };
11861 struct intel_plane *intel_plane = to_intel_plane(plane);
11545 bool visible; 11862 bool visible;
11546 int ret; 11863 int ret;
11547 11864
@@ -11607,9 +11924,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11607 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 11924 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11608 11925
11609 if (intel_crtc->primary_enabled) 11926 if (intel_crtc->primary_enabled)
11610 intel_disable_primary_hw_plane(dev_priv, 11927 intel_disable_primary_hw_plane(plane, crtc);
11611 intel_plane->plane,
11612 intel_plane->pipe);
11613 11928
11614 11929
11615 if (plane->fb != fb) 11930 if (plane->fb != fb)
@@ -11618,16 +11933,42 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11618 11933
11619 mutex_unlock(&dev->struct_mutex); 11934 mutex_unlock(&dev->struct_mutex);
11620 11935
11621 return 0; 11936 } else {
11622 } 11937 if (intel_crtc && intel_crtc->active &&
11938 intel_crtc->primary_enabled) {
11939 /*
11940 * FBC does not work on some platforms for rotated
11941 * planes, so disable it when rotation is not 0 and
11942 * update it when rotation is set back to 0.
11943 *
11944 * FIXME: This is redundant with the fbc update done in
11945 * the primary plane enable function except that that
11946 * one is done too late. We eventually need to unify
11947 * this.
11948 */
11949 if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11950 dev_priv->fbc.plane == intel_crtc->plane &&
11951 intel_plane->rotation != BIT(DRM_ROTATE_0)) {
11952 intel_disable_fbc(dev);
11953 }
11954 }
11955 ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
11956 if (ret)
11957 return ret;
11623 11958
11624 ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb); 11959 if (!intel_crtc->primary_enabled)
11625 if (ret) 11960 intel_enable_primary_hw_plane(plane, crtc);
11626 return ret; 11961 }
11627 11962
11628 if (!intel_crtc->primary_enabled) 11963 intel_plane->crtc_x = orig.crtc_x;
11629 intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane, 11964 intel_plane->crtc_y = orig.crtc_y;
11630 intel_crtc->pipe); 11965 intel_plane->crtc_w = orig.crtc_w;
11966 intel_plane->crtc_h = orig.crtc_h;
11967 intel_plane->src_x = orig.src_x;
11968 intel_plane->src_y = orig.src_y;
11969 intel_plane->src_w = orig.src_w;
11970 intel_plane->src_h = orig.src_h;
11971 intel_plane->obj = obj;
11631 11972
11632 return 0; 11973 return 0;
11633} 11974}
@@ -11644,6 +11985,7 @@ static const struct drm_plane_funcs intel_primary_plane_funcs = {
11644 .update_plane = intel_primary_plane_setplane, 11985 .update_plane = intel_primary_plane_setplane,
11645 .disable_plane = intel_primary_plane_disable, 11986 .disable_plane = intel_primary_plane_disable,
11646 .destroy = intel_plane_destroy, 11987 .destroy = intel_plane_destroy,
11988 .set_property = intel_plane_set_property
11647}; 11989};
11648 11990
11649static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, 11991static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
@@ -11661,6 +12003,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11661 primary->max_downscale = 1; 12003 primary->max_downscale = 1;
11662 primary->pipe = pipe; 12004 primary->pipe = pipe;
11663 primary->plane = pipe; 12005 primary->plane = pipe;
12006 primary->rotation = BIT(DRM_ROTATE_0);
11664 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) 12007 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
11665 primary->plane = !pipe; 12008 primary->plane = !pipe;
11666 12009
@@ -11676,6 +12019,19 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11676 &intel_primary_plane_funcs, 12019 &intel_primary_plane_funcs,
11677 intel_primary_formats, num_formats, 12020 intel_primary_formats, num_formats,
11678 DRM_PLANE_TYPE_PRIMARY); 12021 DRM_PLANE_TYPE_PRIMARY);
12022
12023 if (INTEL_INFO(dev)->gen >= 4) {
12024 if (!dev->mode_config.rotation_property)
12025 dev->mode_config.rotation_property =
12026 drm_mode_create_rotation_property(dev,
12027 BIT(DRM_ROTATE_0) |
12028 BIT(DRM_ROTATE_180));
12029 if (dev->mode_config.rotation_property)
12030 drm_object_attach_property(&primary->base.base,
12031 dev->mode_config.rotation_property,
12032 primary->rotation);
12033 }
12034
11679 return &primary->base; 12035 return &primary->base;
11680} 12036}
11681 12037
@@ -11736,6 +12092,10 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
11736 return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h); 12092 return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
11737 } else { 12093 } else {
11738 intel_crtc_update_cursor(crtc, visible); 12094 intel_crtc_update_cursor(crtc, visible);
12095
12096 intel_frontbuffer_flip(crtc->dev,
12097 INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
12098
11739 return 0; 12099 return 0;
11740 } 12100 }
11741} 12101}
@@ -11812,8 +12172,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
11812 12172
11813 intel_crtc->cursor_base = ~0; 12173 intel_crtc->cursor_base = ~0;
11814 intel_crtc->cursor_cntl = ~0; 12174 intel_crtc->cursor_cntl = ~0;
11815 12175 intel_crtc->cursor_size = ~0;
11816 init_waitqueue_head(&intel_crtc->vbl_wait);
11817 12176
11818 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 12177 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
11819 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 12178 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
@@ -11876,8 +12235,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
11876 int index_mask = 0; 12235 int index_mask = 0;
11877 int entry = 0; 12236 int entry = 0;
11878 12237
11879 list_for_each_entry(source_encoder, 12238 for_each_intel_encoder(dev, source_encoder) {
11880 &dev->mode_config.encoder_list, base.head) {
11881 if (encoders_cloneable(encoder, source_encoder)) 12239 if (encoders_cloneable(encoder, source_encoder))
11882 index_mask |= (1 << entry); 12240 index_mask |= (1 << entry);
11883 12241
@@ -12066,7 +12424,7 @@ static void intel_setup_outputs(struct drm_device *dev)
12066 12424
12067 intel_edp_psr_init(dev); 12425 intel_edp_psr_init(dev);
12068 12426
12069 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 12427 for_each_intel_encoder(dev, encoder) {
12070 encoder->base.possible_crtcs = encoder->crtc_mask; 12428 encoder->base.possible_crtcs = encoder->crtc_mask;
12071 encoder->base.possible_clones = 12429 encoder->base.possible_clones =
12072 intel_encoder_clones(encoder); 12430 intel_encoder_clones(encoder);
@@ -12332,29 +12690,27 @@ static void intel_init_display(struct drm_device *dev)
12332 dev_priv->display.get_display_clock_speed = 12690 dev_priv->display.get_display_clock_speed =
12333 i830_get_display_clock_speed; 12691 i830_get_display_clock_speed;
12334 12692
12335 if (HAS_PCH_SPLIT(dev)) { 12693 if (IS_G4X(dev)) {
12336 if (IS_GEN5(dev)) {
12337 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
12338 dev_priv->display.write_eld = ironlake_write_eld;
12339 } else if (IS_GEN6(dev)) {
12340 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
12341 dev_priv->display.write_eld = ironlake_write_eld;
12342 dev_priv->display.modeset_global_resources =
12343 snb_modeset_global_resources;
12344 } else if (IS_IVYBRIDGE(dev)) {
12345 /* FIXME: detect B0+ stepping and use auto training */
12346 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
12347 dev_priv->display.write_eld = ironlake_write_eld;
12348 dev_priv->display.modeset_global_resources =
12349 ivb_modeset_global_resources;
12350 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
12351 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
12352 dev_priv->display.write_eld = haswell_write_eld;
12353 dev_priv->display.modeset_global_resources =
12354 haswell_modeset_global_resources;
12355 }
12356 } else if (IS_G4X(dev)) {
12357 dev_priv->display.write_eld = g4x_write_eld; 12694 dev_priv->display.write_eld = g4x_write_eld;
12695 } else if (IS_GEN5(dev)) {
12696 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
12697 dev_priv->display.write_eld = ironlake_write_eld;
12698 } else if (IS_GEN6(dev)) {
12699 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
12700 dev_priv->display.write_eld = ironlake_write_eld;
12701 dev_priv->display.modeset_global_resources =
12702 snb_modeset_global_resources;
12703 } else if (IS_IVYBRIDGE(dev)) {
12704 /* FIXME: detect B0+ stepping and use auto training */
12705 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
12706 dev_priv->display.write_eld = ironlake_write_eld;
12707 dev_priv->display.modeset_global_resources =
12708 ivb_modeset_global_resources;
12709 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
12710 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
12711 dev_priv->display.write_eld = haswell_write_eld;
12712 dev_priv->display.modeset_global_resources =
12713 haswell_modeset_global_resources;
12358 } else if (IS_VALLEYVIEW(dev)) { 12714 } else if (IS_VALLEYVIEW(dev)) {
12359 dev_priv->display.modeset_global_resources = 12715 dev_priv->display.modeset_global_resources =
12360 valleyview_modeset_global_resources; 12716 valleyview_modeset_global_resources;
@@ -12388,6 +12744,8 @@ static void intel_init_display(struct drm_device *dev)
12388 } 12744 }
12389 12745
12390 intel_panel_init_backlight_funcs(dev); 12746 intel_panel_init_backlight_funcs(dev);
12747
12748 mutex_init(&dev_priv->pps_mutex);
12391} 12749}
12392 12750
12393/* 12751/*
@@ -12403,6 +12761,14 @@ static void quirk_pipea_force(struct drm_device *dev)
12403 DRM_INFO("applying pipe a force quirk\n"); 12761 DRM_INFO("applying pipe a force quirk\n");
12404} 12762}
12405 12763
12764static void quirk_pipeb_force(struct drm_device *dev)
12765{
12766 struct drm_i915_private *dev_priv = dev->dev_private;
12767
12768 dev_priv->quirks |= QUIRK_PIPEB_FORCE;
12769 DRM_INFO("applying pipe b force quirk\n");
12770}
12771
12406/* 12772/*
12407 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 12773 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
12408 */ 12774 */
@@ -12477,6 +12843,12 @@ static struct intel_quirk intel_quirks[] = {
12477 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 12843 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
12478 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 12844 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
12479 12845
12846 /* 830 needs to leave pipe A & dpll A up */
12847 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
12848
12849 /* 830 needs to leave pipe B & dpll B up */
12850 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
12851
12480 /* Lenovo U160 cannot use SSC on LVDS */ 12852 /* Lenovo U160 cannot use SSC on LVDS */
12481 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 12853 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
12482 12854
@@ -12550,7 +12922,11 @@ static void i915_disable_vga(struct drm_device *dev)
12550 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 12922 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
12551 udelay(300); 12923 udelay(300);
12552 12924
12553 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 12925 /*
12926 * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
12927 * from S3 without preserving (some of?) the other bits.
12928 */
12929 I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
12554 POSTING_READ(vga_reg); 12930 POSTING_READ(vga_reg);
12555} 12931}
12556 12932
@@ -12563,8 +12939,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
12563 12939
12564 intel_init_clock_gating(dev); 12940 intel_init_clock_gating(dev);
12565 12941
12566 intel_reset_dpio(dev);
12567
12568 intel_enable_gt_powersave(dev); 12942 intel_enable_gt_powersave(dev);
12569} 12943}
12570 12944
@@ -12610,7 +12984,10 @@ void intel_modeset_init(struct drm_device *dev)
12610 dev->mode_config.max_height = 8192; 12984 dev->mode_config.max_height = 8192;
12611 } 12985 }
12612 12986
12613 if (IS_GEN2(dev)) { 12987 if (IS_845G(dev) || IS_I865G(dev)) {
12988 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
12989 dev->mode_config.cursor_height = 1023;
12990 } else if (IS_GEN2(dev)) {
12614 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 12991 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
12615 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 12992 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
12616 } else { 12993 } else {
@@ -12624,7 +13001,7 @@ void intel_modeset_init(struct drm_device *dev)
12624 INTEL_INFO(dev)->num_pipes, 13001 INTEL_INFO(dev)->num_pipes,
12625 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 13002 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
12626 13003
12627 for_each_pipe(pipe) { 13004 for_each_pipe(dev_priv, pipe) {
12628 intel_crtc_init(dev, pipe); 13005 intel_crtc_init(dev, pipe);
12629 for_each_sprite(pipe, sprite) { 13006 for_each_sprite(pipe, sprite) {
12630 ret = intel_plane_init(dev, pipe, sprite); 13007 ret = intel_plane_init(dev, pipe, sprite);
@@ -12635,10 +13012,11 @@ void intel_modeset_init(struct drm_device *dev)
12635 } 13012 }
12636 13013
12637 intel_init_dpio(dev); 13014 intel_init_dpio(dev);
12638 intel_reset_dpio(dev);
12639 13015
12640 intel_shared_dpll_init(dev); 13016 intel_shared_dpll_init(dev);
12641 13017
13018 /* save the BIOS value before clobbering it */
13019 dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
12642 /* Just disable it once at startup */ 13020 /* Just disable it once at startup */
12643 i915_disable_vga(dev); 13021 i915_disable_vga(dev);
12644 intel_setup_outputs(dev); 13022 intel_setup_outputs(dev);
@@ -12730,9 +13108,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
12730 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 13108 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
12731 13109
12732 /* restore vblank interrupts to correct state */ 13110 /* restore vblank interrupts to correct state */
12733 if (crtc->active) 13111 if (crtc->active) {
13112 update_scanline_offset(crtc);
12734 drm_vblank_on(dev, crtc->pipe); 13113 drm_vblank_on(dev, crtc->pipe);
12735 else 13114 } else
12736 drm_vblank_off(dev, crtc->pipe); 13115 drm_vblank_off(dev, crtc->pipe);
12737 13116
12738 /* We need to sanitize the plane -> pipe mapping first because this will 13117 /* We need to sanitize the plane -> pipe mapping first because this will
@@ -12815,7 +13194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
12815 } 13194 }
12816 } 13195 }
12817 13196
12818 if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) { 13197 if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
12819 /* 13198 /*
12820 * We start out with underrun reporting disabled to avoid races. 13199 * We start out with underrun reporting disabled to avoid races.
12821 * For correct bookkeeping mark this on active crtcs. 13200 * For correct bookkeeping mark this on active crtcs.
@@ -12831,8 +13210,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
12831 */ 13210 */
12832 crtc->cpu_fifo_underrun_disabled = true; 13211 crtc->cpu_fifo_underrun_disabled = true;
12833 crtc->pch_fifo_underrun_disabled = true; 13212 crtc->pch_fifo_underrun_disabled = true;
12834
12835 update_scanline_offset(crtc);
12836 } 13213 }
12837} 13214}
12838 13215
@@ -12964,8 +13341,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
12964 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 13341 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
12965 } 13342 }
12966 13343
12967 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 13344 for_each_intel_encoder(dev, encoder) {
12968 base.head) {
12969 pipe = 0; 13345 pipe = 0;
12970 13346
12971 if (encoder->get_hw_state(encoder, &pipe)) { 13347 if (encoder->get_hw_state(encoder, &pipe)) {
@@ -13029,12 +13405,11 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
13029 } 13405 }
13030 13406
13031 /* HW state is read out, now we need to sanitize this mess. */ 13407 /* HW state is read out, now we need to sanitize this mess. */
13032 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 13408 for_each_intel_encoder(dev, encoder) {
13033 base.head) {
13034 intel_sanitize_encoder(encoder); 13409 intel_sanitize_encoder(encoder);
13035 } 13410 }
13036 13411
13037 for_each_pipe(pipe) { 13412 for_each_pipe(dev_priv, pipe) {
13038 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 13413 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
13039 intel_sanitize_crtc(crtc); 13414 intel_sanitize_crtc(crtc);
13040 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); 13415 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
@@ -13062,7 +13437,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
13062 * We need to use raw interfaces for restoring state to avoid 13437 * We need to use raw interfaces for restoring state to avoid
13063 * checking (bogus) intermediate states. 13438 * checking (bogus) intermediate states.
13064 */ 13439 */
13065 for_each_pipe(pipe) { 13440 for_each_pipe(dev_priv, pipe) {
13066 struct drm_crtc *crtc = 13441 struct drm_crtc *crtc =
13067 dev_priv->pipe_to_crtc_mapping[pipe]; 13442 dev_priv->pipe_to_crtc_mapping[pipe];
13068 13443
@@ -13283,7 +13658,7 @@ intel_display_capture_error_state(struct drm_device *dev)
13283 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 13658 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
13284 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 13659 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
13285 13660
13286 for_each_pipe(i) { 13661 for_each_pipe(dev_priv, i) {
13287 error->pipe[i].power_domain_on = 13662 error->pipe[i].power_domain_on =
13288 intel_display_power_enabled_unlocked(dev_priv, 13663 intel_display_power_enabled_unlocked(dev_priv,
13289 POWER_DOMAIN_PIPE(i)); 13664 POWER_DOMAIN_PIPE(i));
@@ -13347,6 +13722,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13347 struct drm_device *dev, 13722 struct drm_device *dev,
13348 struct intel_display_error_state *error) 13723 struct intel_display_error_state *error)
13349{ 13724{
13725 struct drm_i915_private *dev_priv = dev->dev_private;
13350 int i; 13726 int i;
13351 13727
13352 if (!error) 13728 if (!error)
@@ -13356,7 +13732,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13356 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 13732 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
13357 err_printf(m, "PWR_WELL_CTL2: %08x\n", 13733 err_printf(m, "PWR_WELL_CTL2: %08x\n",
13358 error->power_well_driver); 13734 error->power_well_driver);
13359 for_each_pipe(i) { 13735 for_each_pipe(dev_priv, i) {
13360 err_printf(m, "Pipe [%d]:\n", i); 13736 err_printf(m, "Pipe [%d]:\n", i);
13361 err_printf(m, " Power: %s\n", 13737 err_printf(m, " Power: %s\n",
13362 error->pipe[i].power_domain_on ? "on" : "off"); 13738 error->pipe[i].power_domain_on ? "on" : "off");
@@ -13397,3 +13773,25 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13397 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 13773 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
13398 } 13774 }
13399} 13775}
13776
13777void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
13778{
13779 struct intel_crtc *crtc;
13780
13781 for_each_intel_crtc(dev, crtc) {
13782 struct intel_unpin_work *work;
13783 unsigned long irqflags;
13784
13785 spin_lock_irqsave(&dev->event_lock, irqflags);
13786
13787 work = crtc->unpin_work;
13788
13789 if (work && work->event &&
13790 work->event->base.file_priv == file) {
13791 kfree(work->event);
13792 work->event = NULL;
13793 }
13794
13795 spin_unlock_irqrestore(&dev->event_lock, irqflags);
13796 }
13797}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fdff1d420c14..f6a3fdd5589e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -111,7 +111,7 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
111} 111}
112 112
113static void intel_dp_link_down(struct intel_dp *intel_dp); 113static void intel_dp_link_down(struct intel_dp *intel_dp);
114static bool _edp_panel_vdd_on(struct intel_dp *intel_dp); 114static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
115static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 115static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
116 116
117int 117int
@@ -290,32 +290,201 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
290 struct intel_dp *intel_dp, 290 struct intel_dp *intel_dp,
291 struct edp_power_seq *out); 291 struct edp_power_seq *out);
292 292
293static void pps_lock(struct intel_dp *intel_dp)
294{
295 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
296 struct intel_encoder *encoder = &intel_dig_port->base;
297 struct drm_device *dev = encoder->base.dev;
298 struct drm_i915_private *dev_priv = dev->dev_private;
299 enum intel_display_power_domain power_domain;
300
301 /*
302 * See vlv_power_sequencer_reset() why we need
303 * a power domain reference here.
304 */
305 power_domain = intel_display_port_power_domain(encoder);
306 intel_display_power_get(dev_priv, power_domain);
307
308 mutex_lock(&dev_priv->pps_mutex);
309}
310
311static void pps_unlock(struct intel_dp *intel_dp)
312{
313 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
314 struct intel_encoder *encoder = &intel_dig_port->base;
315 struct drm_device *dev = encoder->base.dev;
316 struct drm_i915_private *dev_priv = dev->dev_private;
317 enum intel_display_power_domain power_domain;
318
319 mutex_unlock(&dev_priv->pps_mutex);
320
321 power_domain = intel_display_port_power_domain(encoder);
322 intel_display_power_put(dev_priv, power_domain);
323}
324
293static enum pipe 325static enum pipe
294vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 326vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
295{ 327{
296 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 328 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
297 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
298 struct drm_device *dev = intel_dig_port->base.base.dev; 329 struct drm_device *dev = intel_dig_port->base.base.dev;
299 struct drm_i915_private *dev_priv = dev->dev_private; 330 struct drm_i915_private *dev_priv = dev->dev_private;
300 enum port port = intel_dig_port->port; 331 struct intel_encoder *encoder;
301 enum pipe pipe; 332 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
333 struct edp_power_seq power_seq;
334
335 lockdep_assert_held(&dev_priv->pps_mutex);
336
337 if (intel_dp->pps_pipe != INVALID_PIPE)
338 return intel_dp->pps_pipe;
339
340 /*
341 * We don't have power sequencer currently.
342 * Pick one that's not used by other ports.
343 */
344 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
345 base.head) {
346 struct intel_dp *tmp;
347
348 if (encoder->type != INTEL_OUTPUT_EDP)
349 continue;
350
351 tmp = enc_to_intel_dp(&encoder->base);
352
353 if (tmp->pps_pipe != INVALID_PIPE)
354 pipes &= ~(1 << tmp->pps_pipe);
355 }
356
357 /*
358 * Didn't find one. This should not happen since there
359 * are two power sequencers and up to two eDP ports.
360 */
361 if (WARN_ON(pipes == 0))
362 return PIPE_A;
363
364 intel_dp->pps_pipe = ffs(pipes) - 1;
365
366 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
367 pipe_name(intel_dp->pps_pipe),
368 port_name(intel_dig_port->port));
302 369
303 /* modeset should have pipe */ 370 /* init power sequencer on this pipe and port */
304 if (crtc) 371 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
305 return to_intel_crtc(crtc)->pipe; 372 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
373 &power_seq);
374
375 return intel_dp->pps_pipe;
376}
377
378typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
379 enum pipe pipe);
380
381static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
382 enum pipe pipe)
383{
384 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
385}
386
387static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
388 enum pipe pipe)
389{
390 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
391}
392
393static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
394 enum pipe pipe)
395{
396 return true;
397}
398
399static enum pipe
400vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
401 enum port port,
402 vlv_pipe_check pipe_check)
403{
404 enum pipe pipe;
306 405
307 /* init time, try to find a pipe with this port selected */
308 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 406 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
309 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) & 407 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
310 PANEL_PORT_SELECT_MASK; 408 PANEL_PORT_SELECT_MASK;
311 if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B) 409
312 return pipe; 410 if (port_sel != PANEL_PORT_SELECT_VLV(port))
313 if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C) 411 continue;
314 return pipe; 412
413 if (!pipe_check(dev_priv, pipe))
414 continue;
415
416 return pipe;
417 }
418
419 return INVALID_PIPE;
420}
421
422static void
423vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
424{
425 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
426 struct drm_device *dev = intel_dig_port->base.base.dev;
427 struct drm_i915_private *dev_priv = dev->dev_private;
428 struct edp_power_seq power_seq;
429 enum port port = intel_dig_port->port;
430
431 lockdep_assert_held(&dev_priv->pps_mutex);
432
433 /* try to find a pipe with this port selected */
434 /* first pick one where the panel is on */
435 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
436 vlv_pipe_has_pp_on);
437 /* didn't find one? pick one where vdd is on */
438 if (intel_dp->pps_pipe == INVALID_PIPE)
439 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
440 vlv_pipe_has_vdd_on);
441 /* didn't find one? pick one with just the correct port */
442 if (intel_dp->pps_pipe == INVALID_PIPE)
443 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
444 vlv_pipe_any);
445
446 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
447 if (intel_dp->pps_pipe == INVALID_PIPE) {
448 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
449 port_name(port));
450 return;
315 } 451 }
316 452
317 /* shrug */ 453 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
318 return PIPE_A; 454 port_name(port), pipe_name(intel_dp->pps_pipe));
455
456 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
457 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
458 &power_seq);
459}
460
461void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
462{
463 struct drm_device *dev = dev_priv->dev;
464 struct intel_encoder *encoder;
465
466 if (WARN_ON(!IS_VALLEYVIEW(dev)))
467 return;
468
469 /*
470 * We can't grab pps_mutex here due to deadlock with power_domain
471 * mutex when power_domain functions are called while holding pps_mutex.
472 * That also means that in order to use pps_pipe the code needs to
473 * hold both a power domain reference and pps_mutex, and the power domain
474 * reference get/put must be done while _not_ holding pps_mutex.
475 * pps_{lock,unlock}() do these steps in the correct order, so one
476 * should use them always.
477 */
478
479 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
480 struct intel_dp *intel_dp;
481
482 if (encoder->type != INTEL_OUTPUT_EDP)
483 continue;
484
485 intel_dp = enc_to_intel_dp(&encoder->base);
486 intel_dp->pps_pipe = INVALID_PIPE;
487 }
319} 488}
320 489
321static u32 _pp_ctrl_reg(struct intel_dp *intel_dp) 490static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
@@ -349,12 +518,15 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
349 struct drm_i915_private *dev_priv = dev->dev_private; 518 struct drm_i915_private *dev_priv = dev->dev_private;
350 u32 pp_div; 519 u32 pp_div;
351 u32 pp_ctrl_reg, pp_div_reg; 520 u32 pp_ctrl_reg, pp_div_reg;
352 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
353 521
354 if (!is_edp(intel_dp) || code != SYS_RESTART) 522 if (!is_edp(intel_dp) || code != SYS_RESTART)
355 return 0; 523 return 0;
356 524
525 pps_lock(intel_dp);
526
357 if (IS_VALLEYVIEW(dev)) { 527 if (IS_VALLEYVIEW(dev)) {
528 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
529
358 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); 530 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
359 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); 531 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
360 pp_div = I915_READ(pp_div_reg); 532 pp_div = I915_READ(pp_div_reg);
@@ -366,6 +538,8 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
366 msleep(intel_dp->panel_power_cycle_delay); 538 msleep(intel_dp->panel_power_cycle_delay);
367 } 539 }
368 540
541 pps_unlock(intel_dp);
542
369 return 0; 543 return 0;
370} 544}
371 545
@@ -374,6 +548,8 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
374 struct drm_device *dev = intel_dp_to_dev(intel_dp); 548 struct drm_device *dev = intel_dp_to_dev(intel_dp);
375 struct drm_i915_private *dev_priv = dev->dev_private; 549 struct drm_i915_private *dev_priv = dev->dev_private;
376 550
551 lockdep_assert_held(&dev_priv->pps_mutex);
552
377 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; 553 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
378} 554}
379 555
@@ -381,13 +557,10 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
381{ 557{
382 struct drm_device *dev = intel_dp_to_dev(intel_dp); 558 struct drm_device *dev = intel_dp_to_dev(intel_dp);
383 struct drm_i915_private *dev_priv = dev->dev_private; 559 struct drm_i915_private *dev_priv = dev->dev_private;
384 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
385 struct intel_encoder *intel_encoder = &intel_dig_port->base;
386 enum intel_display_power_domain power_domain;
387 560
388 power_domain = intel_display_port_power_domain(intel_encoder); 561 lockdep_assert_held(&dev_priv->pps_mutex);
389 return intel_display_power_enabled(dev_priv, power_domain) && 562
390 (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0; 563 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
391} 564}
392 565
393static void 566static void
@@ -535,7 +708,15 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
535 bool has_aux_irq = HAS_AUX_IRQ(dev); 708 bool has_aux_irq = HAS_AUX_IRQ(dev);
536 bool vdd; 709 bool vdd;
537 710
538 vdd = _edp_panel_vdd_on(intel_dp); 711 pps_lock(intel_dp);
712
713 /*
714 * We will be called with VDD already enabled for dpcd/edid/oui reads.
715 * In such cases we want to leave VDD enabled and it's up to upper layers
716 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
717 * ourselves.
718 */
719 vdd = edp_panel_vdd_on(intel_dp);
539 720
540 /* dp aux is extremely sensitive to irq latency, hence request the 721 /* dp aux is extremely sensitive to irq latency, hence request the
541 * lowest possible wakeup latency and so prevent the cpu from going into 722 * lowest possible wakeup latency and so prevent the cpu from going into
@@ -644,6 +825,8 @@ out:
644 if (vdd) 825 if (vdd)
645 edp_panel_vdd_off(intel_dp, false); 826 edp_panel_vdd_off(intel_dp, false);
646 827
828 pps_unlock(intel_dp);
829
647 return ret; 830 return ret;
648} 831}
649 832
@@ -828,20 +1011,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
828 } 1011 }
829} 1012}
830 1013
831static void
832intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
833{
834 struct drm_device *dev = crtc->base.dev;
835 struct drm_i915_private *dev_priv = dev->dev_private;
836 enum transcoder transcoder = crtc->config.cpu_transcoder;
837
838 I915_WRITE(PIPE_DATA_M2(transcoder),
839 TU_SIZE(m_n->tu) | m_n->gmch_m);
840 I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
841 I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
842 I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
843}
844
845bool 1014bool
846intel_dp_compute_config(struct intel_encoder *encoder, 1015intel_dp_compute_config(struct intel_encoder *encoder,
847 struct intel_crtc_config *pipe_config) 1016 struct intel_crtc_config *pipe_config)
@@ -867,6 +1036,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
867 pipe_config->has_pch_encoder = true; 1036 pipe_config->has_pch_encoder = true;
868 1037
869 pipe_config->has_dp_encoder = true; 1038 pipe_config->has_dp_encoder = true;
1039 pipe_config->has_drrs = false;
870 pipe_config->has_audio = intel_dp->has_audio; 1040 pipe_config->has_audio = intel_dp->has_audio;
871 1041
872 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 1042 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
@@ -898,23 +1068,15 @@ intel_dp_compute_config(struct intel_encoder *encoder,
898 bpp = dev_priv->vbt.edp_bpp; 1068 bpp = dev_priv->vbt.edp_bpp;
899 } 1069 }
900 1070
901 if (IS_BROADWELL(dev)) { 1071 /*
902 /* Yes, it's an ugly hack. */ 1072 * Use the maximum clock and number of lanes the eDP panel
903 min_lane_count = max_lane_count; 1073 * advertizes being capable of. The panels are generally
904 DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n", 1074 * designed to support only a single clock and lane
905 min_lane_count); 1075 * configuration, and typically these values correspond to the
906 } else if (dev_priv->vbt.edp_lanes) { 1076 * native resolution of the panel.
907 min_lane_count = min(dev_priv->vbt.edp_lanes, 1077 */
908 max_lane_count); 1078 min_lane_count = max_lane_count;
909 DRM_DEBUG_KMS("using min %u lanes per VBT\n", 1079 min_clock = max_clock;
910 min_lane_count);
911 }
912
913 if (dev_priv->vbt.edp_rate) {
914 min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
915 DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
916 bws[min_clock]);
917 }
918 } 1080 }
919 1081
920 for (; bpp >= 6*3; bpp -= 2*3) { 1082 for (; bpp >= 6*3; bpp -= 2*3) {
@@ -970,13 +1132,14 @@ found:
970 1132
971 if (intel_connector->panel.downclock_mode != NULL && 1133 if (intel_connector->panel.downclock_mode != NULL &&
972 intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) { 1134 intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
1135 pipe_config->has_drrs = true;
973 intel_link_compute_m_n(bpp, lane_count, 1136 intel_link_compute_m_n(bpp, lane_count,
974 intel_connector->panel.downclock_mode->clock, 1137 intel_connector->panel.downclock_mode->clock,
975 pipe_config->port_clock, 1138 pipe_config->port_clock,
976 &pipe_config->dp_m2_n2); 1139 &pipe_config->dp_m2_n2);
977 } 1140 }
978 1141
979 if (HAS_DDI(dev)) 1142 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
980 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); 1143 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
981 else 1144 else
982 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 1145 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -1110,6 +1273,8 @@ static void wait_panel_status(struct intel_dp *intel_dp,
1110 struct drm_i915_private *dev_priv = dev->dev_private; 1273 struct drm_i915_private *dev_priv = dev->dev_private;
1111 u32 pp_stat_reg, pp_ctrl_reg; 1274 u32 pp_stat_reg, pp_ctrl_reg;
1112 1275
1276 lockdep_assert_held(&dev_priv->pps_mutex);
1277
1113 pp_stat_reg = _pp_stat_reg(intel_dp); 1278 pp_stat_reg = _pp_stat_reg(intel_dp);
1114 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1279 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1115 1280
@@ -1173,13 +1338,20 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1173 struct drm_i915_private *dev_priv = dev->dev_private; 1338 struct drm_i915_private *dev_priv = dev->dev_private;
1174 u32 control; 1339 u32 control;
1175 1340
1341 lockdep_assert_held(&dev_priv->pps_mutex);
1342
1176 control = I915_READ(_pp_ctrl_reg(intel_dp)); 1343 control = I915_READ(_pp_ctrl_reg(intel_dp));
1177 control &= ~PANEL_UNLOCK_MASK; 1344 control &= ~PANEL_UNLOCK_MASK;
1178 control |= PANEL_UNLOCK_REGS; 1345 control |= PANEL_UNLOCK_REGS;
1179 return control; 1346 return control;
1180} 1347}
1181 1348
1182static bool _edp_panel_vdd_on(struct intel_dp *intel_dp) 1349/*
1350 * Must be paired with edp_panel_vdd_off().
1351 * Must hold pps_mutex around the whole on/off sequence.
1352 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1353 */
1354static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1183{ 1355{
1184 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1356 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1185 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1357 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -1190,6 +1362,8 @@ static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1190 u32 pp_stat_reg, pp_ctrl_reg; 1362 u32 pp_stat_reg, pp_ctrl_reg;
1191 bool need_to_disable = !intel_dp->want_panel_vdd; 1363 bool need_to_disable = !intel_dp->want_panel_vdd;
1192 1364
1365 lockdep_assert_held(&dev_priv->pps_mutex);
1366
1193 if (!is_edp(intel_dp)) 1367 if (!is_edp(intel_dp))
1194 return false; 1368 return false;
1195 1369
@@ -1227,62 +1401,76 @@ static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1227 return need_to_disable; 1401 return need_to_disable;
1228} 1402}
1229 1403
1404/*
1405 * Must be paired with intel_edp_panel_vdd_off() or
1406 * intel_edp_panel_off().
1407 * Nested calls to these functions are not allowed since
1408 * we drop the lock. Caller must use some higher level
1409 * locking to prevent nested calls from other threads.
1410 */
1230void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 1411void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1231{ 1412{
1232 if (is_edp(intel_dp)) { 1413 bool vdd;
1233 bool vdd = _edp_panel_vdd_on(intel_dp);
1234 1414
1235 WARN(!vdd, "eDP VDD already requested on\n"); 1415 if (!is_edp(intel_dp))
1236 } 1416 return;
1417
1418 pps_lock(intel_dp);
1419 vdd = edp_panel_vdd_on(intel_dp);
1420 pps_unlock(intel_dp);
1421
1422 WARN(!vdd, "eDP VDD already requested on\n");
1237} 1423}
1238 1424
1239static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 1425static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1240{ 1426{
1241 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1427 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1242 struct drm_i915_private *dev_priv = dev->dev_private; 1428 struct drm_i915_private *dev_priv = dev->dev_private;
1429 struct intel_digital_port *intel_dig_port =
1430 dp_to_dig_port(intel_dp);
1431 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1432 enum intel_display_power_domain power_domain;
1243 u32 pp; 1433 u32 pp;
1244 u32 pp_stat_reg, pp_ctrl_reg; 1434 u32 pp_stat_reg, pp_ctrl_reg;
1245 1435
1246 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 1436 lockdep_assert_held(&dev_priv->pps_mutex);
1247 1437
1248 if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) { 1438 WARN_ON(intel_dp->want_panel_vdd);
1249 struct intel_digital_port *intel_dig_port = 1439
1250 dp_to_dig_port(intel_dp); 1440 if (!edp_have_panel_vdd(intel_dp))
1251 struct intel_encoder *intel_encoder = &intel_dig_port->base; 1441 return;
1252 enum intel_display_power_domain power_domain;
1253 1442
1254 DRM_DEBUG_KMS("Turning eDP VDD off\n"); 1443 DRM_DEBUG_KMS("Turning eDP VDD off\n");
1255 1444
1256 pp = ironlake_get_pp_control(intel_dp); 1445 pp = ironlake_get_pp_control(intel_dp);
1257 pp &= ~EDP_FORCE_VDD; 1446 pp &= ~EDP_FORCE_VDD;
1258 1447
1259 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1448 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1260 pp_stat_reg = _pp_stat_reg(intel_dp); 1449 pp_stat_reg = _pp_stat_reg(intel_dp);
1261 1450
1262 I915_WRITE(pp_ctrl_reg, pp); 1451 I915_WRITE(pp_ctrl_reg, pp);
1263 POSTING_READ(pp_ctrl_reg); 1452 POSTING_READ(pp_ctrl_reg);
1264 1453
1265 /* Make sure sequencer is idle before allowing subsequent activity */ 1454 /* Make sure sequencer is idle before allowing subsequent activity */
1266 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 1455 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1267 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 1456 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1268 1457
1269 if ((pp & POWER_TARGET_ON) == 0) 1458 if ((pp & POWER_TARGET_ON) == 0)
1270 intel_dp->last_power_cycle = jiffies; 1459 intel_dp->last_power_cycle = jiffies;
1271 1460
1272 power_domain = intel_display_port_power_domain(intel_encoder); 1461 power_domain = intel_display_port_power_domain(intel_encoder);
1273 intel_display_power_put(dev_priv, power_domain); 1462 intel_display_power_put(dev_priv, power_domain);
1274 }
1275} 1463}
1276 1464
1277static void edp_panel_vdd_work(struct work_struct *__work) 1465static void edp_panel_vdd_work(struct work_struct *__work)
1278{ 1466{
1279 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1467 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1280 struct intel_dp, panel_vdd_work); 1468 struct intel_dp, panel_vdd_work);
1281 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1282 1469
1283 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 1470 pps_lock(intel_dp);
1284 edp_panel_vdd_off_sync(intel_dp); 1471 if (!intel_dp->want_panel_vdd)
1285 drm_modeset_unlock(&dev->mode_config.connection_mutex); 1472 edp_panel_vdd_off_sync(intel_dp);
1473 pps_unlock(intel_dp);
1286} 1474}
1287 1475
1288static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 1476static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
@@ -1298,8 +1486,18 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1298 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 1486 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1299} 1487}
1300 1488
1489/*
1490 * Must be paired with edp_panel_vdd_on().
1491 * Must hold pps_mutex around the whole on/off sequence.
1492 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1493 */
1301static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1494static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1302{ 1495{
1496 struct drm_i915_private *dev_priv =
1497 intel_dp_to_dev(intel_dp)->dev_private;
1498
1499 lockdep_assert_held(&dev_priv->pps_mutex);
1500
1303 if (!is_edp(intel_dp)) 1501 if (!is_edp(intel_dp))
1304 return; 1502 return;
1305 1503
@@ -1313,6 +1511,22 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1313 edp_panel_vdd_schedule_off(intel_dp); 1511 edp_panel_vdd_schedule_off(intel_dp);
1314} 1512}
1315 1513
1514/*
1515 * Must be paired with intel_edp_panel_vdd_on().
1516 * Nested calls to these functions are not allowed since
1517 * we drop the lock. Caller must use some higher level
1518 * locking to prevent nested calls from other threads.
1519 */
1520static void intel_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1521{
1522 if (!is_edp(intel_dp))
1523 return;
1524
1525 pps_lock(intel_dp);
1526 edp_panel_vdd_off(intel_dp, sync);
1527 pps_unlock(intel_dp);
1528}
1529
1316void intel_edp_panel_on(struct intel_dp *intel_dp) 1530void intel_edp_panel_on(struct intel_dp *intel_dp)
1317{ 1531{
1318 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1532 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -1325,9 +1539,11 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
1325 1539
1326 DRM_DEBUG_KMS("Turn eDP power on\n"); 1540 DRM_DEBUG_KMS("Turn eDP power on\n");
1327 1541
1542 pps_lock(intel_dp);
1543
1328 if (edp_have_panel_power(intel_dp)) { 1544 if (edp_have_panel_power(intel_dp)) {
1329 DRM_DEBUG_KMS("eDP power already on\n"); 1545 DRM_DEBUG_KMS("eDP power already on\n");
1330 return; 1546 goto out;
1331 } 1547 }
1332 1548
1333 wait_panel_power_cycle(intel_dp); 1549 wait_panel_power_cycle(intel_dp);
@@ -1356,6 +1572,9 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
1356 I915_WRITE(pp_ctrl_reg, pp); 1572 I915_WRITE(pp_ctrl_reg, pp);
1357 POSTING_READ(pp_ctrl_reg); 1573 POSTING_READ(pp_ctrl_reg);
1358 } 1574 }
1575
1576 out:
1577 pps_unlock(intel_dp);
1359} 1578}
1360 1579
1361void intel_edp_panel_off(struct intel_dp *intel_dp) 1580void intel_edp_panel_off(struct intel_dp *intel_dp)
@@ -1373,6 +1592,8 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
1373 1592
1374 DRM_DEBUG_KMS("Turn eDP power off\n"); 1593 DRM_DEBUG_KMS("Turn eDP power off\n");
1375 1594
1595 pps_lock(intel_dp);
1596
1376 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1597 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1377 1598
1378 pp = ironlake_get_pp_control(intel_dp); 1599 pp = ironlake_get_pp_control(intel_dp);
@@ -1394,9 +1615,12 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
1394 /* We got a reference when we enabled the VDD. */ 1615 /* We got a reference when we enabled the VDD. */
1395 power_domain = intel_display_port_power_domain(intel_encoder); 1616 power_domain = intel_display_port_power_domain(intel_encoder);
1396 intel_display_power_put(dev_priv, power_domain); 1617 intel_display_power_put(dev_priv, power_domain);
1618
1619 pps_unlock(intel_dp);
1397} 1620}
1398 1621
1399void intel_edp_backlight_on(struct intel_dp *intel_dp) 1622/* Enable backlight in the panel power control. */
1623static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1400{ 1624{
1401 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1625 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1402 struct drm_device *dev = intel_dig_port->base.base.dev; 1626 struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -1404,13 +1628,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
1404 u32 pp; 1628 u32 pp;
1405 u32 pp_ctrl_reg; 1629 u32 pp_ctrl_reg;
1406 1630
1407 if (!is_edp(intel_dp))
1408 return;
1409
1410 DRM_DEBUG_KMS("\n");
1411
1412 intel_panel_enable_backlight(intel_dp->attached_connector);
1413
1414 /* 1631 /*
1415 * If we enable the backlight right away following a panel power 1632 * If we enable the backlight right away following a panel power
1416 * on, we may see slight flicker as the panel syncs with the eDP 1633 * on, we may see slight flicker as the panel syncs with the eDP
@@ -1418,6 +1635,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
1418 * allowing it to appear. 1635 * allowing it to appear.
1419 */ 1636 */
1420 wait_backlight_on(intel_dp); 1637 wait_backlight_on(intel_dp);
1638
1639 pps_lock(intel_dp);
1640
1421 pp = ironlake_get_pp_control(intel_dp); 1641 pp = ironlake_get_pp_control(intel_dp);
1422 pp |= EDP_BLC_ENABLE; 1642 pp |= EDP_BLC_ENABLE;
1423 1643
@@ -1425,9 +1645,24 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
1425 1645
1426 I915_WRITE(pp_ctrl_reg, pp); 1646 I915_WRITE(pp_ctrl_reg, pp);
1427 POSTING_READ(pp_ctrl_reg); 1647 POSTING_READ(pp_ctrl_reg);
1648
1649 pps_unlock(intel_dp);
1428} 1650}
1429 1651
1430void intel_edp_backlight_off(struct intel_dp *intel_dp) 1652/* Enable backlight PWM and backlight PP control. */
1653void intel_edp_backlight_on(struct intel_dp *intel_dp)
1654{
1655 if (!is_edp(intel_dp))
1656 return;
1657
1658 DRM_DEBUG_KMS("\n");
1659
1660 intel_panel_enable_backlight(intel_dp->attached_connector);
1661 _intel_edp_backlight_on(intel_dp);
1662}
1663
1664/* Disable backlight in the panel power control. */
1665static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1431{ 1666{
1432 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1667 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1433 struct drm_i915_private *dev_priv = dev->dev_private; 1668 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1437,7 +1672,8 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
1437 if (!is_edp(intel_dp)) 1672 if (!is_edp(intel_dp))
1438 return; 1673 return;
1439 1674
1440 DRM_DEBUG_KMS("\n"); 1675 pps_lock(intel_dp);
1676
1441 pp = ironlake_get_pp_control(intel_dp); 1677 pp = ironlake_get_pp_control(intel_dp);
1442 pp &= ~EDP_BLC_ENABLE; 1678 pp &= ~EDP_BLC_ENABLE;
1443 1679
@@ -1445,13 +1681,51 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
1445 1681
1446 I915_WRITE(pp_ctrl_reg, pp); 1682 I915_WRITE(pp_ctrl_reg, pp);
1447 POSTING_READ(pp_ctrl_reg); 1683 POSTING_READ(pp_ctrl_reg);
1448 intel_dp->last_backlight_off = jiffies;
1449 1684
1685 pps_unlock(intel_dp);
1686
1687 intel_dp->last_backlight_off = jiffies;
1450 edp_wait_backlight_off(intel_dp); 1688 edp_wait_backlight_off(intel_dp);
1689}
1690
1691/* Disable backlight PP control and backlight PWM. */
1692void intel_edp_backlight_off(struct intel_dp *intel_dp)
1693{
1694 if (!is_edp(intel_dp))
1695 return;
1696
1697 DRM_DEBUG_KMS("\n");
1451 1698
1699 _intel_edp_backlight_off(intel_dp);
1452 intel_panel_disable_backlight(intel_dp->attached_connector); 1700 intel_panel_disable_backlight(intel_dp->attached_connector);
1453} 1701}
1454 1702
1703/*
1704 * Hook for controlling the panel power control backlight through the bl_power
1705 * sysfs attribute. Take care to handle multiple calls.
1706 */
1707static void intel_edp_backlight_power(struct intel_connector *connector,
1708 bool enable)
1709{
1710 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
1711 bool is_enabled;
1712
1713 pps_lock(intel_dp);
1714 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
1715 pps_unlock(intel_dp);
1716
1717 if (is_enabled == enable)
1718 return;
1719
1720 DRM_DEBUG_KMS("panel power control backlight %s\n",
1721 enable ? "enable" : "disable");
1722
1723 if (enable)
1724 _intel_edp_backlight_on(intel_dp);
1725 else
1726 _intel_edp_backlight_off(intel_dp);
1727}
1728
1455static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1729static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1456{ 1730{
1457 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1731 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -1515,8 +1789,6 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1515 if (mode != DRM_MODE_DPMS_ON) { 1789 if (mode != DRM_MODE_DPMS_ON) {
1516 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 1790 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1517 DP_SET_POWER_D3); 1791 DP_SET_POWER_D3);
1518 if (ret != 1)
1519 DRM_DEBUG_DRIVER("failed to write sink power state\n");
1520 } else { 1792 } else {
1521 /* 1793 /*
1522 * When turning on, we need to retry for 1ms to give the sink 1794 * When turning on, we need to retry for 1ms to give the sink
@@ -1530,6 +1802,10 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1530 msleep(1); 1802 msleep(1);
1531 } 1803 }
1532 } 1804 }
1805
1806 if (ret != 1)
1807 DRM_DEBUG_KMS("failed to %s sink power state\n",
1808 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
1533} 1809}
1534 1810
1535static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1811static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
@@ -1576,7 +1852,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1576 return true; 1852 return true;
1577 } 1853 }
1578 1854
1579 for_each_pipe(i) { 1855 for_each_pipe(dev_priv, i) {
1580 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1856 trans_dp = I915_READ(TRANS_DP_CTL(i));
1581 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1857 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1582 *pipe = i; 1858 *pipe = i;
@@ -2036,7 +2312,6 @@ void intel_edp_psr_init(struct drm_device *dev)
2036static void intel_disable_dp(struct intel_encoder *encoder) 2312static void intel_disable_dp(struct intel_encoder *encoder)
2037{ 2313{
2038 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2314 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2039 enum port port = dp_to_dig_port(intel_dp)->port;
2040 struct drm_device *dev = encoder->base.dev; 2315 struct drm_device *dev = encoder->base.dev;
2041 2316
2042 /* Make sure the panel is off before trying to change the mode. But also 2317 /* Make sure the panel is off before trying to change the mode. But also
@@ -2046,21 +2321,19 @@ static void intel_disable_dp(struct intel_encoder *encoder)
2046 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 2321 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2047 intel_edp_panel_off(intel_dp); 2322 intel_edp_panel_off(intel_dp);
2048 2323
2049 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 2324 /* disable the port before the pipe on g4x */
2050 if (!(port == PORT_A || IS_VALLEYVIEW(dev))) 2325 if (INTEL_INFO(dev)->gen < 5)
2051 intel_dp_link_down(intel_dp); 2326 intel_dp_link_down(intel_dp);
2052} 2327}
2053 2328
2054static void g4x_post_disable_dp(struct intel_encoder *encoder) 2329static void ilk_post_disable_dp(struct intel_encoder *encoder)
2055{ 2330{
2056 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2331 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2057 enum port port = dp_to_dig_port(intel_dp)->port; 2332 enum port port = dp_to_dig_port(intel_dp)->port;
2058 2333
2059 if (port != PORT_A)
2060 return;
2061
2062 intel_dp_link_down(intel_dp); 2334 intel_dp_link_down(intel_dp);
2063 ironlake_edp_pll_off(intel_dp); 2335 if (port == PORT_A)
2336 ironlake_edp_pll_off(intel_dp);
2064} 2337}
2065 2338
2066static void vlv_post_disable_dp(struct intel_encoder *encoder) 2339static void vlv_post_disable_dp(struct intel_encoder *encoder)
@@ -2106,6 +2379,104 @@ static void chv_post_disable_dp(struct intel_encoder *encoder)
2106 mutex_unlock(&dev_priv->dpio_lock); 2379 mutex_unlock(&dev_priv->dpio_lock);
2107} 2380}
2108 2381
2382static void
2383_intel_dp_set_link_train(struct intel_dp *intel_dp,
2384 uint32_t *DP,
2385 uint8_t dp_train_pat)
2386{
2387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2388 struct drm_device *dev = intel_dig_port->base.base.dev;
2389 struct drm_i915_private *dev_priv = dev->dev_private;
2390 enum port port = intel_dig_port->port;
2391
2392 if (HAS_DDI(dev)) {
2393 uint32_t temp = I915_READ(DP_TP_CTL(port));
2394
2395 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2396 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2397 else
2398 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2399
2400 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2401 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2402 case DP_TRAINING_PATTERN_DISABLE:
2403 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2404
2405 break;
2406 case DP_TRAINING_PATTERN_1:
2407 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2408 break;
2409 case DP_TRAINING_PATTERN_2:
2410 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2411 break;
2412 case DP_TRAINING_PATTERN_3:
2413 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2414 break;
2415 }
2416 I915_WRITE(DP_TP_CTL(port), temp);
2417
2418 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2419 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2420
2421 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2422 case DP_TRAINING_PATTERN_DISABLE:
2423 *DP |= DP_LINK_TRAIN_OFF_CPT;
2424 break;
2425 case DP_TRAINING_PATTERN_1:
2426 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2427 break;
2428 case DP_TRAINING_PATTERN_2:
2429 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2430 break;
2431 case DP_TRAINING_PATTERN_3:
2432 DRM_ERROR("DP training pattern 3 not supported\n");
2433 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2434 break;
2435 }
2436
2437 } else {
2438 if (IS_CHERRYVIEW(dev))
2439 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2440 else
2441 *DP &= ~DP_LINK_TRAIN_MASK;
2442
2443 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2444 case DP_TRAINING_PATTERN_DISABLE:
2445 *DP |= DP_LINK_TRAIN_OFF;
2446 break;
2447 case DP_TRAINING_PATTERN_1:
2448 *DP |= DP_LINK_TRAIN_PAT_1;
2449 break;
2450 case DP_TRAINING_PATTERN_2:
2451 *DP |= DP_LINK_TRAIN_PAT_2;
2452 break;
2453 case DP_TRAINING_PATTERN_3:
2454 if (IS_CHERRYVIEW(dev)) {
2455 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2456 } else {
2457 DRM_ERROR("DP training pattern 3 not supported\n");
2458 *DP |= DP_LINK_TRAIN_PAT_2;
2459 }
2460 break;
2461 }
2462 }
2463}
2464
2465static void intel_dp_enable_port(struct intel_dp *intel_dp)
2466{
2467 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2468 struct drm_i915_private *dev_priv = dev->dev_private;
2469
2470 intel_dp->DP |= DP_PORT_EN;
2471
2472 /* enable with pattern 1 (as per spec) */
2473 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2474 DP_TRAINING_PATTERN_1);
2475
2476 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2477 POSTING_READ(intel_dp->output_reg);
2478}
2479
2109static void intel_enable_dp(struct intel_encoder *encoder) 2480static void intel_enable_dp(struct intel_encoder *encoder)
2110{ 2481{
2111 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2482 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -2116,11 +2487,12 @@ static void intel_enable_dp(struct intel_encoder *encoder)
2116 if (WARN_ON(dp_reg & DP_PORT_EN)) 2487 if (WARN_ON(dp_reg & DP_PORT_EN))
2117 return; 2488 return;
2118 2489
2490 intel_dp_enable_port(intel_dp);
2119 intel_edp_panel_vdd_on(intel_dp); 2491 intel_edp_panel_vdd_on(intel_dp);
2492 intel_edp_panel_on(intel_dp);
2493 intel_edp_panel_vdd_off(intel_dp, true);
2120 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 2494 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2121 intel_dp_start_link_train(intel_dp); 2495 intel_dp_start_link_train(intel_dp);
2122 intel_edp_panel_on(intel_dp);
2123 edp_panel_vdd_off(intel_dp, true);
2124 intel_dp_complete_link_train(intel_dp); 2496 intel_dp_complete_link_train(intel_dp);
2125 intel_dp_stop_link_train(intel_dp); 2497 intel_dp_stop_link_train(intel_dp);
2126} 2498}
@@ -2154,6 +2526,78 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2154 } 2526 }
2155} 2527}
2156 2528
2529static void vlv_steal_power_sequencer(struct drm_device *dev,
2530 enum pipe pipe)
2531{
2532 struct drm_i915_private *dev_priv = dev->dev_private;
2533 struct intel_encoder *encoder;
2534
2535 lockdep_assert_held(&dev_priv->pps_mutex);
2536
2537 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2538 base.head) {
2539 struct intel_dp *intel_dp;
2540 enum port port;
2541
2542 if (encoder->type != INTEL_OUTPUT_EDP)
2543 continue;
2544
2545 intel_dp = enc_to_intel_dp(&encoder->base);
2546 port = dp_to_dig_port(intel_dp)->port;
2547
2548 if (intel_dp->pps_pipe != pipe)
2549 continue;
2550
2551 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2552 pipe_name(pipe), port_name(port));
2553
2554 /* make sure vdd is off before we steal it */
2555 edp_panel_vdd_off_sync(intel_dp);
2556
2557 intel_dp->pps_pipe = INVALID_PIPE;
2558 }
2559}
2560
2561static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2562{
2563 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2564 struct intel_encoder *encoder = &intel_dig_port->base;
2565 struct drm_device *dev = encoder->base.dev;
2566 struct drm_i915_private *dev_priv = dev->dev_private;
2567 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2568 struct edp_power_seq power_seq;
2569
2570 lockdep_assert_held(&dev_priv->pps_mutex);
2571
2572 if (intel_dp->pps_pipe == crtc->pipe)
2573 return;
2574
2575 /*
2576 * If another power sequencer was being used on this
2577 * port previously make sure to turn off vdd there while
2578 * we still have control of it.
2579 */
2580 if (intel_dp->pps_pipe != INVALID_PIPE)
2581 edp_panel_vdd_off_sync(intel_dp);
2582
2583 /*
2584 * We may be stealing the power
2585 * sequencer from another port.
2586 */
2587 vlv_steal_power_sequencer(dev, crtc->pipe);
2588
2589 /* now it's all ours */
2590 intel_dp->pps_pipe = crtc->pipe;
2591
2592 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2593 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2594
2595 /* init power sequencer on this pipe and port */
2596 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2597 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2598 &power_seq);
2599}
2600
2157static void vlv_pre_enable_dp(struct intel_encoder *encoder) 2601static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2158{ 2602{
2159 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2603 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -2163,7 +2607,6 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2163 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 2607 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2164 enum dpio_channel port = vlv_dport_to_channel(dport); 2608 enum dpio_channel port = vlv_dport_to_channel(dport);
2165 int pipe = intel_crtc->pipe; 2609 int pipe = intel_crtc->pipe;
2166 struct edp_power_seq power_seq;
2167 u32 val; 2610 u32 val;
2168 2611
2169 mutex_lock(&dev_priv->dpio_lock); 2612 mutex_lock(&dev_priv->dpio_lock);
@@ -2182,10 +2625,9 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2182 mutex_unlock(&dev_priv->dpio_lock); 2625 mutex_unlock(&dev_priv->dpio_lock);
2183 2626
2184 if (is_edp(intel_dp)) { 2627 if (is_edp(intel_dp)) {
2185 /* init power sequencer on this pipe and port */ 2628 pps_lock(intel_dp);
2186 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 2629 vlv_init_panel_power_sequencer(intel_dp);
2187 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 2630 pps_unlock(intel_dp);
2188 &power_seq);
2189 } 2631 }
2190 2632
2191 intel_enable_dp(encoder); 2633 intel_enable_dp(encoder);
@@ -2229,7 +2671,6 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
2229 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2671 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2230 struct drm_device *dev = encoder->base.dev; 2672 struct drm_device *dev = encoder->base.dev;
2231 struct drm_i915_private *dev_priv = dev->dev_private; 2673 struct drm_i915_private *dev_priv = dev->dev_private;
2232 struct edp_power_seq power_seq;
2233 struct intel_crtc *intel_crtc = 2674 struct intel_crtc *intel_crtc =
2234 to_intel_crtc(encoder->base.crtc); 2675 to_intel_crtc(encoder->base.crtc);
2235 enum dpio_channel ch = vlv_dport_to_channel(dport); 2676 enum dpio_channel ch = vlv_dport_to_channel(dport);
@@ -2275,10 +2716,9 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
2275 mutex_unlock(&dev_priv->dpio_lock); 2716 mutex_unlock(&dev_priv->dpio_lock);
2276 2717
2277 if (is_edp(intel_dp)) { 2718 if (is_edp(intel_dp)) {
2278 /* init power sequencer on this pipe and port */ 2719 pps_lock(intel_dp);
2279 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 2720 vlv_init_panel_power_sequencer(intel_dp);
2280 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 2721 pps_unlock(intel_dp);
2281 &power_seq);
2282 } 2722 }
2283 2723
2284 intel_enable_dp(encoder); 2724 intel_enable_dp(encoder);
@@ -2297,6 +2737,8 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2297 enum pipe pipe = intel_crtc->pipe; 2737 enum pipe pipe = intel_crtc->pipe;
2298 u32 val; 2738 u32 val;
2299 2739
2740 intel_dp_prepare(encoder);
2741
2300 mutex_lock(&dev_priv->dpio_lock); 2742 mutex_lock(&dev_priv->dpio_lock);
2301 2743
2302 /* program left/right clock distribution */ 2744 /* program left/right clock distribution */
@@ -2395,13 +2837,13 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
2395 enum port port = dp_to_dig_port(intel_dp)->port; 2837 enum port port = dp_to_dig_port(intel_dp)->port;
2396 2838
2397 if (IS_VALLEYVIEW(dev)) 2839 if (IS_VALLEYVIEW(dev))
2398 return DP_TRAIN_VOLTAGE_SWING_1200; 2840 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2399 else if (IS_GEN7(dev) && port == PORT_A) 2841 else if (IS_GEN7(dev) && port == PORT_A)
2400 return DP_TRAIN_VOLTAGE_SWING_800; 2842 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2401 else if (HAS_PCH_CPT(dev) && port != PORT_A) 2843 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2402 return DP_TRAIN_VOLTAGE_SWING_1200; 2844 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2403 else 2845 else
2404 return DP_TRAIN_VOLTAGE_SWING_800; 2846 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2405} 2847}
2406 2848
2407static uint8_t 2849static uint8_t
@@ -2412,49 +2854,49 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2412 2854
2413 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2855 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2414 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2856 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2415 case DP_TRAIN_VOLTAGE_SWING_400: 2857 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2416 return DP_TRAIN_PRE_EMPHASIS_9_5; 2858 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2417 case DP_TRAIN_VOLTAGE_SWING_600: 2859 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2418 return DP_TRAIN_PRE_EMPHASIS_6; 2860 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2419 case DP_TRAIN_VOLTAGE_SWING_800: 2861 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2420 return DP_TRAIN_PRE_EMPHASIS_3_5; 2862 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2421 case DP_TRAIN_VOLTAGE_SWING_1200: 2863 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2422 default: 2864 default:
2423 return DP_TRAIN_PRE_EMPHASIS_0; 2865 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2424 } 2866 }
2425 } else if (IS_VALLEYVIEW(dev)) { 2867 } else if (IS_VALLEYVIEW(dev)) {
2426 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2868 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2427 case DP_TRAIN_VOLTAGE_SWING_400: 2869 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2428 return DP_TRAIN_PRE_EMPHASIS_9_5; 2870 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2429 case DP_TRAIN_VOLTAGE_SWING_600: 2871 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2430 return DP_TRAIN_PRE_EMPHASIS_6; 2872 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2431 case DP_TRAIN_VOLTAGE_SWING_800: 2873 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2432 return DP_TRAIN_PRE_EMPHASIS_3_5; 2874 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2433 case DP_TRAIN_VOLTAGE_SWING_1200: 2875 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2434 default: 2876 default:
2435 return DP_TRAIN_PRE_EMPHASIS_0; 2877 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2436 } 2878 }
2437 } else if (IS_GEN7(dev) && port == PORT_A) { 2879 } else if (IS_GEN7(dev) && port == PORT_A) {
2438 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2880 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2439 case DP_TRAIN_VOLTAGE_SWING_400: 2881 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2440 return DP_TRAIN_PRE_EMPHASIS_6; 2882 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2441 case DP_TRAIN_VOLTAGE_SWING_600: 2883 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2442 case DP_TRAIN_VOLTAGE_SWING_800: 2884 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2443 return DP_TRAIN_PRE_EMPHASIS_3_5; 2885 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2444 default: 2886 default:
2445 return DP_TRAIN_PRE_EMPHASIS_0; 2887 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2446 } 2888 }
2447 } else { 2889 } else {
2448 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2890 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2449 case DP_TRAIN_VOLTAGE_SWING_400: 2891 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2450 return DP_TRAIN_PRE_EMPHASIS_6; 2892 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2451 case DP_TRAIN_VOLTAGE_SWING_600: 2893 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2452 return DP_TRAIN_PRE_EMPHASIS_6; 2894 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2453 case DP_TRAIN_VOLTAGE_SWING_800: 2895 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2454 return DP_TRAIN_PRE_EMPHASIS_3_5; 2896 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2455 case DP_TRAIN_VOLTAGE_SWING_1200: 2897 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2456 default: 2898 default:
2457 return DP_TRAIN_PRE_EMPHASIS_0; 2899 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2458 } 2900 }
2459 } 2901 }
2460} 2902}
@@ -2473,22 +2915,22 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2473 int pipe = intel_crtc->pipe; 2915 int pipe = intel_crtc->pipe;
2474 2916
2475 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2917 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2476 case DP_TRAIN_PRE_EMPHASIS_0: 2918 case DP_TRAIN_PRE_EMPH_LEVEL_0:
2477 preemph_reg_value = 0x0004000; 2919 preemph_reg_value = 0x0004000;
2478 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2920 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2479 case DP_TRAIN_VOLTAGE_SWING_400: 2921 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2480 demph_reg_value = 0x2B405555; 2922 demph_reg_value = 0x2B405555;
2481 uniqtranscale_reg_value = 0x552AB83A; 2923 uniqtranscale_reg_value = 0x552AB83A;
2482 break; 2924 break;
2483 case DP_TRAIN_VOLTAGE_SWING_600: 2925 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2484 demph_reg_value = 0x2B404040; 2926 demph_reg_value = 0x2B404040;
2485 uniqtranscale_reg_value = 0x5548B83A; 2927 uniqtranscale_reg_value = 0x5548B83A;
2486 break; 2928 break;
2487 case DP_TRAIN_VOLTAGE_SWING_800: 2929 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2488 demph_reg_value = 0x2B245555; 2930 demph_reg_value = 0x2B245555;
2489 uniqtranscale_reg_value = 0x5560B83A; 2931 uniqtranscale_reg_value = 0x5560B83A;
2490 break; 2932 break;
2491 case DP_TRAIN_VOLTAGE_SWING_1200: 2933 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2492 demph_reg_value = 0x2B405555; 2934 demph_reg_value = 0x2B405555;
2493 uniqtranscale_reg_value = 0x5598DA3A; 2935 uniqtranscale_reg_value = 0x5598DA3A;
2494 break; 2936 break;
@@ -2496,18 +2938,18 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2496 return 0; 2938 return 0;
2497 } 2939 }
2498 break; 2940 break;
2499 case DP_TRAIN_PRE_EMPHASIS_3_5: 2941 case DP_TRAIN_PRE_EMPH_LEVEL_1:
2500 preemph_reg_value = 0x0002000; 2942 preemph_reg_value = 0x0002000;
2501 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2943 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2502 case DP_TRAIN_VOLTAGE_SWING_400: 2944 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2503 demph_reg_value = 0x2B404040; 2945 demph_reg_value = 0x2B404040;
2504 uniqtranscale_reg_value = 0x5552B83A; 2946 uniqtranscale_reg_value = 0x5552B83A;
2505 break; 2947 break;
2506 case DP_TRAIN_VOLTAGE_SWING_600: 2948 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2507 demph_reg_value = 0x2B404848; 2949 demph_reg_value = 0x2B404848;
2508 uniqtranscale_reg_value = 0x5580B83A; 2950 uniqtranscale_reg_value = 0x5580B83A;
2509 break; 2951 break;
2510 case DP_TRAIN_VOLTAGE_SWING_800: 2952 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2511 demph_reg_value = 0x2B404040; 2953 demph_reg_value = 0x2B404040;
2512 uniqtranscale_reg_value = 0x55ADDA3A; 2954 uniqtranscale_reg_value = 0x55ADDA3A;
2513 break; 2955 break;
@@ -2515,14 +2957,14 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2515 return 0; 2957 return 0;
2516 } 2958 }
2517 break; 2959 break;
2518 case DP_TRAIN_PRE_EMPHASIS_6: 2960 case DP_TRAIN_PRE_EMPH_LEVEL_2:
2519 preemph_reg_value = 0x0000000; 2961 preemph_reg_value = 0x0000000;
2520 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2962 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2521 case DP_TRAIN_VOLTAGE_SWING_400: 2963 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2522 demph_reg_value = 0x2B305555; 2964 demph_reg_value = 0x2B305555;
2523 uniqtranscale_reg_value = 0x5570B83A; 2965 uniqtranscale_reg_value = 0x5570B83A;
2524 break; 2966 break;
2525 case DP_TRAIN_VOLTAGE_SWING_600: 2967 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2526 demph_reg_value = 0x2B2B4040; 2968 demph_reg_value = 0x2B2B4040;
2527 uniqtranscale_reg_value = 0x55ADDA3A; 2969 uniqtranscale_reg_value = 0x55ADDA3A;
2528 break; 2970 break;
@@ -2530,10 +2972,10 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2530 return 0; 2972 return 0;
2531 } 2973 }
2532 break; 2974 break;
2533 case DP_TRAIN_PRE_EMPHASIS_9_5: 2975 case DP_TRAIN_PRE_EMPH_LEVEL_3:
2534 preemph_reg_value = 0x0006000; 2976 preemph_reg_value = 0x0006000;
2535 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2977 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2536 case DP_TRAIN_VOLTAGE_SWING_400: 2978 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2537 demph_reg_value = 0x1B405555; 2979 demph_reg_value = 0x1B405555;
2538 uniqtranscale_reg_value = 0x55ADDA3A; 2980 uniqtranscale_reg_value = 0x55ADDA3A;
2539 break; 2981 break;
@@ -2572,21 +3014,21 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2572 int i; 3014 int i;
2573 3015
2574 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3016 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2575 case DP_TRAIN_PRE_EMPHASIS_0: 3017 case DP_TRAIN_PRE_EMPH_LEVEL_0:
2576 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3018 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2577 case DP_TRAIN_VOLTAGE_SWING_400: 3019 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2578 deemph_reg_value = 128; 3020 deemph_reg_value = 128;
2579 margin_reg_value = 52; 3021 margin_reg_value = 52;
2580 break; 3022 break;
2581 case DP_TRAIN_VOLTAGE_SWING_600: 3023 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2582 deemph_reg_value = 128; 3024 deemph_reg_value = 128;
2583 margin_reg_value = 77; 3025 margin_reg_value = 77;
2584 break; 3026 break;
2585 case DP_TRAIN_VOLTAGE_SWING_800: 3027 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2586 deemph_reg_value = 128; 3028 deemph_reg_value = 128;
2587 margin_reg_value = 102; 3029 margin_reg_value = 102;
2588 break; 3030 break;
2589 case DP_TRAIN_VOLTAGE_SWING_1200: 3031 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2590 deemph_reg_value = 128; 3032 deemph_reg_value = 128;
2591 margin_reg_value = 154; 3033 margin_reg_value = 154;
2592 /* FIXME extra to set for 1200 */ 3034 /* FIXME extra to set for 1200 */
@@ -2595,17 +3037,17 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2595 return 0; 3037 return 0;
2596 } 3038 }
2597 break; 3039 break;
2598 case DP_TRAIN_PRE_EMPHASIS_3_5: 3040 case DP_TRAIN_PRE_EMPH_LEVEL_1:
2599 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3041 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2600 case DP_TRAIN_VOLTAGE_SWING_400: 3042 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2601 deemph_reg_value = 85; 3043 deemph_reg_value = 85;
2602 margin_reg_value = 78; 3044 margin_reg_value = 78;
2603 break; 3045 break;
2604 case DP_TRAIN_VOLTAGE_SWING_600: 3046 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2605 deemph_reg_value = 85; 3047 deemph_reg_value = 85;
2606 margin_reg_value = 116; 3048 margin_reg_value = 116;
2607 break; 3049 break;
2608 case DP_TRAIN_VOLTAGE_SWING_800: 3050 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2609 deemph_reg_value = 85; 3051 deemph_reg_value = 85;
2610 margin_reg_value = 154; 3052 margin_reg_value = 154;
2611 break; 3053 break;
@@ -2613,13 +3055,13 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2613 return 0; 3055 return 0;
2614 } 3056 }
2615 break; 3057 break;
2616 case DP_TRAIN_PRE_EMPHASIS_6: 3058 case DP_TRAIN_PRE_EMPH_LEVEL_2:
2617 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3059 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2618 case DP_TRAIN_VOLTAGE_SWING_400: 3060 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2619 deemph_reg_value = 64; 3061 deemph_reg_value = 64;
2620 margin_reg_value = 104; 3062 margin_reg_value = 104;
2621 break; 3063 break;
2622 case DP_TRAIN_VOLTAGE_SWING_600: 3064 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2623 deemph_reg_value = 64; 3065 deemph_reg_value = 64;
2624 margin_reg_value = 154; 3066 margin_reg_value = 154;
2625 break; 3067 break;
@@ -2627,9 +3069,9 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2627 return 0; 3069 return 0;
2628 } 3070 }
2629 break; 3071 break;
2630 case DP_TRAIN_PRE_EMPHASIS_9_5: 3072 case DP_TRAIN_PRE_EMPH_LEVEL_3:
2631 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3073 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2632 case DP_TRAIN_VOLTAGE_SWING_400: 3074 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2633 deemph_reg_value = 43; 3075 deemph_reg_value = 43;
2634 margin_reg_value = 154; 3076 margin_reg_value = 154;
2635 break; 3077 break;
@@ -2663,8 +3105,8 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2663 /* Program swing margin */ 3105 /* Program swing margin */
2664 for (i = 0; i < 4; i++) { 3106 for (i = 0; i < 4; i++) {
2665 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); 3107 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2666 val &= ~DPIO_SWING_MARGIN_MASK; 3108 val &= ~DPIO_SWING_MARGIN000_MASK;
2667 val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT; 3109 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
2668 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); 3110 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2669 } 3111 }
2670 3112
@@ -2676,9 +3118,9 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2676 } 3118 }
2677 3119
2678 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK) 3120 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
2679 == DP_TRAIN_PRE_EMPHASIS_0) && 3121 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
2680 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK) 3122 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
2681 == DP_TRAIN_VOLTAGE_SWING_1200)) { 3123 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
2682 3124
2683 /* 3125 /*
2684 * The document said it needs to set bit 27 for ch0 and bit 26 3126 * The document said it needs to set bit 27 for ch0 and bit 26
@@ -2757,32 +3199,32 @@ intel_gen4_signal_levels(uint8_t train_set)
2757 uint32_t signal_levels = 0; 3199 uint32_t signal_levels = 0;
2758 3200
2759 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3201 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2760 case DP_TRAIN_VOLTAGE_SWING_400: 3202 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2761 default: 3203 default:
2762 signal_levels |= DP_VOLTAGE_0_4; 3204 signal_levels |= DP_VOLTAGE_0_4;
2763 break; 3205 break;
2764 case DP_TRAIN_VOLTAGE_SWING_600: 3206 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2765 signal_levels |= DP_VOLTAGE_0_6; 3207 signal_levels |= DP_VOLTAGE_0_6;
2766 break; 3208 break;
2767 case DP_TRAIN_VOLTAGE_SWING_800: 3209 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2768 signal_levels |= DP_VOLTAGE_0_8; 3210 signal_levels |= DP_VOLTAGE_0_8;
2769 break; 3211 break;
2770 case DP_TRAIN_VOLTAGE_SWING_1200: 3212 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2771 signal_levels |= DP_VOLTAGE_1_2; 3213 signal_levels |= DP_VOLTAGE_1_2;
2772 break; 3214 break;
2773 } 3215 }
2774 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3216 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2775 case DP_TRAIN_PRE_EMPHASIS_0: 3217 case DP_TRAIN_PRE_EMPH_LEVEL_0:
2776 default: 3218 default:
2777 signal_levels |= DP_PRE_EMPHASIS_0; 3219 signal_levels |= DP_PRE_EMPHASIS_0;
2778 break; 3220 break;
2779 case DP_TRAIN_PRE_EMPHASIS_3_5: 3221 case DP_TRAIN_PRE_EMPH_LEVEL_1:
2780 signal_levels |= DP_PRE_EMPHASIS_3_5; 3222 signal_levels |= DP_PRE_EMPHASIS_3_5;
2781 break; 3223 break;
2782 case DP_TRAIN_PRE_EMPHASIS_6: 3224 case DP_TRAIN_PRE_EMPH_LEVEL_2:
2783 signal_levels |= DP_PRE_EMPHASIS_6; 3225 signal_levels |= DP_PRE_EMPHASIS_6;
2784 break; 3226 break;
2785 case DP_TRAIN_PRE_EMPHASIS_9_5: 3227 case DP_TRAIN_PRE_EMPH_LEVEL_3:
2786 signal_levels |= DP_PRE_EMPHASIS_9_5; 3228 signal_levels |= DP_PRE_EMPHASIS_9_5;
2787 break; 3229 break;
2788 } 3230 }
@@ -2796,19 +3238,19 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
2796 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 3238 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2797 DP_TRAIN_PRE_EMPHASIS_MASK); 3239 DP_TRAIN_PRE_EMPHASIS_MASK);
2798 switch (signal_levels) { 3240 switch (signal_levels) {
2799 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 3241 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2800 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 3242 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2801 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 3243 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2802 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 3244 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2803 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 3245 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
2804 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 3246 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2805 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 3247 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2806 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 3248 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
2807 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 3249 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2808 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 3250 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2809 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 3251 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
2810 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 3252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2811 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 3253 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2812 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 3254 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
2813 default: 3255 default:
2814 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 3256 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
@@ -2824,21 +3266,21 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
2824 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 3266 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2825 DP_TRAIN_PRE_EMPHASIS_MASK); 3267 DP_TRAIN_PRE_EMPHASIS_MASK);
2826 switch (signal_levels) { 3268 switch (signal_levels) {
2827 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 3269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2828 return EDP_LINK_TRAIN_400MV_0DB_IVB; 3270 return EDP_LINK_TRAIN_400MV_0DB_IVB;
2829 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 3271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2830 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 3272 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
2831 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 3273 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2832 return EDP_LINK_TRAIN_400MV_6DB_IVB; 3274 return EDP_LINK_TRAIN_400MV_6DB_IVB;
2833 3275
2834 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2835 return EDP_LINK_TRAIN_600MV_0DB_IVB; 3277 return EDP_LINK_TRAIN_600MV_0DB_IVB;
2836 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2837 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 3279 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
2838 3280
2839 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2840 return EDP_LINK_TRAIN_800MV_0DB_IVB; 3282 return EDP_LINK_TRAIN_800MV_0DB_IVB;
2841 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 3283 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2842 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 3284 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
2843 3285
2844 default: 3286 default:
@@ -2855,30 +3297,30 @@ intel_hsw_signal_levels(uint8_t train_set)
2855 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 3297 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2856 DP_TRAIN_PRE_EMPHASIS_MASK); 3298 DP_TRAIN_PRE_EMPHASIS_MASK);
2857 switch (signal_levels) { 3299 switch (signal_levels) {
2858 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 3300 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2859 return DDI_BUF_EMP_400MV_0DB_HSW; 3301 return DDI_BUF_TRANS_SELECT(0);
2860 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2861 return DDI_BUF_EMP_400MV_3_5DB_HSW; 3303 return DDI_BUF_TRANS_SELECT(1);
2862 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2863 return DDI_BUF_EMP_400MV_6DB_HSW; 3305 return DDI_BUF_TRANS_SELECT(2);
2864 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: 3306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
2865 return DDI_BUF_EMP_400MV_9_5DB_HSW; 3307 return DDI_BUF_TRANS_SELECT(3);
2866 3308
2867 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2868 return DDI_BUF_EMP_600MV_0DB_HSW; 3310 return DDI_BUF_TRANS_SELECT(4);
2869 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2870 return DDI_BUF_EMP_600MV_3_5DB_HSW; 3312 return DDI_BUF_TRANS_SELECT(5);
2871 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2872 return DDI_BUF_EMP_600MV_6DB_HSW; 3314 return DDI_BUF_TRANS_SELECT(6);
2873 3315
2874 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2875 return DDI_BUF_EMP_800MV_0DB_HSW; 3317 return DDI_BUF_TRANS_SELECT(7);
2876 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2877 return DDI_BUF_EMP_800MV_3_5DB_HSW; 3319 return DDI_BUF_TRANS_SELECT(8);
2878 default: 3320 default:
2879 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 3321 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2880 "0x%x\n", signal_levels); 3322 "0x%x\n", signal_levels);
2881 return DDI_BUF_EMP_400MV_0DB_HSW; 3323 return DDI_BUF_TRANS_SELECT(0);
2882 } 3324 }
2883} 3325}
2884 3326
@@ -2925,74 +3367,10 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
2925 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3367 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2926 struct drm_device *dev = intel_dig_port->base.base.dev; 3368 struct drm_device *dev = intel_dig_port->base.base.dev;
2927 struct drm_i915_private *dev_priv = dev->dev_private; 3369 struct drm_i915_private *dev_priv = dev->dev_private;
2928 enum port port = intel_dig_port->port;
2929 uint8_t buf[sizeof(intel_dp->train_set) + 1]; 3370 uint8_t buf[sizeof(intel_dp->train_set) + 1];
2930 int ret, len; 3371 int ret, len;
2931 3372
2932 if (HAS_DDI(dev)) { 3373 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2933 uint32_t temp = I915_READ(DP_TP_CTL(port));
2934
2935 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2936 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2937 else
2938 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2939
2940 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2941 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2942 case DP_TRAINING_PATTERN_DISABLE:
2943 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2944
2945 break;
2946 case DP_TRAINING_PATTERN_1:
2947 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2948 break;
2949 case DP_TRAINING_PATTERN_2:
2950 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2951 break;
2952 case DP_TRAINING_PATTERN_3:
2953 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2954 break;
2955 }
2956 I915_WRITE(DP_TP_CTL(port), temp);
2957
2958 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2959 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2960
2961 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2962 case DP_TRAINING_PATTERN_DISABLE:
2963 *DP |= DP_LINK_TRAIN_OFF_CPT;
2964 break;
2965 case DP_TRAINING_PATTERN_1:
2966 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2967 break;
2968 case DP_TRAINING_PATTERN_2:
2969 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2970 break;
2971 case DP_TRAINING_PATTERN_3:
2972 DRM_ERROR("DP training pattern 3 not supported\n");
2973 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2974 break;
2975 }
2976
2977 } else {
2978 *DP &= ~DP_LINK_TRAIN_MASK;
2979
2980 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2981 case DP_TRAINING_PATTERN_DISABLE:
2982 *DP |= DP_LINK_TRAIN_OFF;
2983 break;
2984 case DP_TRAINING_PATTERN_1:
2985 *DP |= DP_LINK_TRAIN_PAT_1;
2986 break;
2987 case DP_TRAINING_PATTERN_2:
2988 *DP |= DP_LINK_TRAIN_PAT_2;
2989 break;
2990 case DP_TRAINING_PATTERN_3:
2991 DRM_ERROR("DP training pattern 3 not supported\n");
2992 *DP |= DP_LINK_TRAIN_PAT_2;
2993 break;
2994 }
2995 }
2996 3374
2997 I915_WRITE(intel_dp->output_reg, *DP); 3375 I915_WRITE(intel_dp->output_reg, *DP);
2998 POSTING_READ(intel_dp->output_reg); 3376 POSTING_READ(intel_dp->output_reg);
@@ -3276,7 +3654,10 @@ intel_dp_link_down(struct intel_dp *intel_dp)
3276 DP &= ~DP_LINK_TRAIN_MASK_CPT; 3654 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3277 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 3655 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3278 } else { 3656 } else {
3279 DP &= ~DP_LINK_TRAIN_MASK; 3657 if (IS_CHERRYVIEW(dev))
3658 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3659 else
3660 DP &= ~DP_LINK_TRAIN_MASK;
3280 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 3661 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3281 } 3662 }
3282 POSTING_READ(intel_dp->output_reg); 3663 POSTING_READ(intel_dp->output_reg);
@@ -3322,15 +3703,11 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3322 struct drm_device *dev = dig_port->base.base.dev; 3703 struct drm_device *dev = dig_port->base.base.dev;
3323 struct drm_i915_private *dev_priv = dev->dev_private; 3704 struct drm_i915_private *dev_priv = dev->dev_private;
3324 3705
3325 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
3326
3327 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd, 3706 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3328 sizeof(intel_dp->dpcd)) < 0) 3707 sizeof(intel_dp->dpcd)) < 0)
3329 return false; /* aux transfer failed */ 3708 return false; /* aux transfer failed */
3330 3709
3331 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), 3710 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3332 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
3333 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
3334 3711
3335 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 3712 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3336 return false; /* DPCD not present */ 3713 return false; /* DPCD not present */
@@ -3351,7 +3728,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3351 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && 3728 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3352 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) { 3729 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
3353 intel_dp->use_tps3 = true; 3730 intel_dp->use_tps3 = true;
3354 DRM_DEBUG_KMS("Displayport TPS3 supported"); 3731 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3355 } else 3732 } else
3356 intel_dp->use_tps3 = false; 3733 intel_dp->use_tps3 = false;
3357 3734
@@ -3388,7 +3765,7 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
3388 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 3765 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3389 buf[0], buf[1], buf[2]); 3766 buf[0], buf[1], buf[2]);
3390 3767
3391 edp_panel_vdd_off(intel_dp, false); 3768 intel_edp_panel_vdd_off(intel_dp, false);
3392} 3769}
3393 3770
3394static bool 3771static bool
@@ -3402,7 +3779,7 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
3402 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) 3779 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3403 return false; 3780 return false;
3404 3781
3405 _edp_panel_vdd_on(intel_dp); 3782 intel_edp_panel_vdd_on(intel_dp);
3406 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) { 3783 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3407 if (buf[0] & DP_MST_CAP) { 3784 if (buf[0] & DP_MST_CAP) {
3408 DRM_DEBUG_KMS("Sink is MST capable\n"); 3785 DRM_DEBUG_KMS("Sink is MST capable\n");
@@ -3412,7 +3789,7 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
3412 intel_dp->is_mst = false; 3789 intel_dp->is_mst = false;
3413 } 3790 }
3414 } 3791 }
3415 edp_panel_vdd_off(intel_dp, false); 3792 intel_edp_panel_vdd_off(intel_dp, false);
3416 3793
3417 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); 3794 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3418 return intel_dp->is_mst; 3795 return intel_dp->is_mst;
@@ -3427,21 +3804,21 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3427 u8 buf[1]; 3804 u8 buf[1];
3428 3805
3429 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0) 3806 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
3430 return -EAGAIN; 3807 return -EIO;
3431 3808
3432 if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) 3809 if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
3433 return -ENOTTY; 3810 return -ENOTTY;
3434 3811
3435 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 3812 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3436 DP_TEST_SINK_START) < 0) 3813 DP_TEST_SINK_START) < 0)
3437 return -EAGAIN; 3814 return -EIO;
3438 3815
3439 /* Wait 2 vblanks to be sure we will have the correct CRC value */ 3816 /* Wait 2 vblanks to be sure we will have the correct CRC value */
3440 intel_wait_for_vblank(dev, intel_crtc->pipe); 3817 intel_wait_for_vblank(dev, intel_crtc->pipe);
3441 intel_wait_for_vblank(dev, intel_crtc->pipe); 3818 intel_wait_for_vblank(dev, intel_crtc->pipe);
3442 3819
3443 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) 3820 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3444 return -EAGAIN; 3821 return -EIO;
3445 3822
3446 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0); 3823 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
3447 return 0; 3824 return 0;
@@ -3644,20 +4021,24 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
3644} 4021}
3645 4022
3646static enum drm_connector_status 4023static enum drm_connector_status
4024edp_detect(struct intel_dp *intel_dp)
4025{
4026 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4027 enum drm_connector_status status;
4028
4029 status = intel_panel_detect(dev);
4030 if (status == connector_status_unknown)
4031 status = connector_status_connected;
4032
4033 return status;
4034}
4035
4036static enum drm_connector_status
3647ironlake_dp_detect(struct intel_dp *intel_dp) 4037ironlake_dp_detect(struct intel_dp *intel_dp)
3648{ 4038{
3649 struct drm_device *dev = intel_dp_to_dev(intel_dp); 4039 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3650 struct drm_i915_private *dev_priv = dev->dev_private; 4040 struct drm_i915_private *dev_priv = dev->dev_private;
3651 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4041 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3652 enum drm_connector_status status;
3653
3654 /* Can't disconnect eDP, but you can close the lid... */
3655 if (is_edp(intel_dp)) {
3656 status = intel_panel_detect(dev);
3657 if (status == connector_status_unknown)
3658 status = connector_status_connected;
3659 return status;
3660 }
3661 4042
3662 if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) 4043 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
3663 return connector_status_disconnected; 4044 return connector_status_disconnected;
@@ -3733,9 +4114,9 @@ g4x_dp_detect(struct intel_dp *intel_dp)
3733} 4114}
3734 4115
3735static struct edid * 4116static struct edid *
3736intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 4117intel_dp_get_edid(struct intel_dp *intel_dp)
3737{ 4118{
3738 struct intel_connector *intel_connector = to_intel_connector(connector); 4119 struct intel_connector *intel_connector = intel_dp->attached_connector;
3739 4120
3740 /* use cached edid if we have one */ 4121 /* use cached edid if we have one */
3741 if (intel_connector->edid) { 4122 if (intel_connector->edid) {
@@ -3744,27 +4125,55 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
3744 return NULL; 4125 return NULL;
3745 4126
3746 return drm_edid_duplicate(intel_connector->edid); 4127 return drm_edid_duplicate(intel_connector->edid);
3747 } 4128 } else
4129 return drm_get_edid(&intel_connector->base,
4130 &intel_dp->aux.ddc);
4131}
4132
4133static void
4134intel_dp_set_edid(struct intel_dp *intel_dp)
4135{
4136 struct intel_connector *intel_connector = intel_dp->attached_connector;
4137 struct edid *edid;
4138
4139 edid = intel_dp_get_edid(intel_dp);
4140 intel_connector->detect_edid = edid;
3748 4141
3749 return drm_get_edid(connector, adapter); 4142 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4143 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4144 else
4145 intel_dp->has_audio = drm_detect_monitor_audio(edid);
3750} 4146}
3751 4147
3752static int 4148static void
3753intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 4149intel_dp_unset_edid(struct intel_dp *intel_dp)
3754{ 4150{
3755 struct intel_connector *intel_connector = to_intel_connector(connector); 4151 struct intel_connector *intel_connector = intel_dp->attached_connector;
3756 4152
3757 /* use cached edid if we have one */ 4153 kfree(intel_connector->detect_edid);
3758 if (intel_connector->edid) { 4154 intel_connector->detect_edid = NULL;
3759 /* invalid edid */
3760 if (IS_ERR(intel_connector->edid))
3761 return 0;
3762 4155
3763 return intel_connector_update_modes(connector, 4156 intel_dp->has_audio = false;
3764 intel_connector->edid); 4157}
3765 } 4158
4159static enum intel_display_power_domain
4160intel_dp_power_get(struct intel_dp *dp)
4161{
4162 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4163 enum intel_display_power_domain power_domain;
4164
4165 power_domain = intel_display_port_power_domain(encoder);
4166 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4167
4168 return power_domain;
4169}
3766 4170
3767 return intel_ddc_get_modes(connector, adapter); 4171static void
4172intel_dp_power_put(struct intel_dp *dp,
4173 enum intel_display_power_domain power_domain)
4174{
4175 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4176 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
3768} 4177}
3769 4178
3770static enum drm_connector_status 4179static enum drm_connector_status
@@ -3774,33 +4183,30 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3774 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4183 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3775 struct intel_encoder *intel_encoder = &intel_dig_port->base; 4184 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3776 struct drm_device *dev = connector->dev; 4185 struct drm_device *dev = connector->dev;
3777 struct drm_i915_private *dev_priv = dev->dev_private;
3778 enum drm_connector_status status; 4186 enum drm_connector_status status;
3779 enum intel_display_power_domain power_domain; 4187 enum intel_display_power_domain power_domain;
3780 struct edid *edid = NULL;
3781 bool ret; 4188 bool ret;
3782 4189
3783 power_domain = intel_display_port_power_domain(intel_encoder);
3784 intel_display_power_get(dev_priv, power_domain);
3785
3786 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 4190 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3787 connector->base.id, connector->name); 4191 connector->base.id, connector->name);
4192 intel_dp_unset_edid(intel_dp);
3788 4193
3789 if (intel_dp->is_mst) { 4194 if (intel_dp->is_mst) {
3790 /* MST devices are disconnected from a monitor POV */ 4195 /* MST devices are disconnected from a monitor POV */
3791 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4196 if (intel_encoder->type != INTEL_OUTPUT_EDP)
3792 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4197 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3793 status = connector_status_disconnected; 4198 return connector_status_disconnected;
3794 goto out;
3795 } 4199 }
3796 4200
3797 intel_dp->has_audio = false; 4201 power_domain = intel_dp_power_get(intel_dp);
3798 4202
3799 if (HAS_PCH_SPLIT(dev)) 4203 /* Can't disconnect eDP, but you can close the lid... */
4204 if (is_edp(intel_dp))
4205 status = edp_detect(intel_dp);
4206 else if (HAS_PCH_SPLIT(dev))
3800 status = ironlake_dp_detect(intel_dp); 4207 status = ironlake_dp_detect(intel_dp);
3801 else 4208 else
3802 status = g4x_dp_detect(intel_dp); 4209 status = g4x_dp_detect(intel_dp);
3803
3804 if (status != connector_status_connected) 4210 if (status != connector_status_connected)
3805 goto out; 4211 goto out;
3806 4212
@@ -3816,82 +4222,78 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3816 goto out; 4222 goto out;
3817 } 4223 }
3818 4224
3819 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 4225 intel_dp_set_edid(intel_dp);
3820 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
3821 } else {
3822 edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3823 if (edid) {
3824 intel_dp->has_audio = drm_detect_monitor_audio(edid);
3825 kfree(edid);
3826 }
3827 }
3828 4226
3829 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4227 if (intel_encoder->type != INTEL_OUTPUT_EDP)
3830 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4228 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3831 status = connector_status_connected; 4229 status = connector_status_connected;
3832 4230
3833out: 4231out:
3834 intel_display_power_put(dev_priv, power_domain); 4232 intel_dp_power_put(intel_dp, power_domain);
3835 return status; 4233 return status;
3836} 4234}
3837 4235
3838static int intel_dp_get_modes(struct drm_connector *connector) 4236static void
4237intel_dp_force(struct drm_connector *connector)
3839{ 4238{
3840 struct intel_dp *intel_dp = intel_attached_dp(connector); 4239 struct intel_dp *intel_dp = intel_attached_dp(connector);
3841 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4240 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3842 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3843 struct intel_connector *intel_connector = to_intel_connector(connector);
3844 struct drm_device *dev = connector->dev;
3845 struct drm_i915_private *dev_priv = dev->dev_private;
3846 enum intel_display_power_domain power_domain; 4241 enum intel_display_power_domain power_domain;
3847 int ret;
3848 4242
3849 /* We should parse the EDID data and find out if it has an audio sink 4243 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3850 */ 4244 connector->base.id, connector->name);
4245 intel_dp_unset_edid(intel_dp);
3851 4246
3852 power_domain = intel_display_port_power_domain(intel_encoder); 4247 if (connector->status != connector_status_connected)
3853 intel_display_power_get(dev_priv, power_domain); 4248 return;
3854 4249
3855 ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc); 4250 power_domain = intel_dp_power_get(intel_dp);
3856 intel_display_power_put(dev_priv, power_domain); 4251
3857 if (ret) 4252 intel_dp_set_edid(intel_dp);
3858 return ret; 4253
4254 intel_dp_power_put(intel_dp, power_domain);
4255
4256 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4257 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4258}
4259
4260static int intel_dp_get_modes(struct drm_connector *connector)
4261{
4262 struct intel_connector *intel_connector = to_intel_connector(connector);
4263 struct edid *edid;
4264
4265 edid = intel_connector->detect_edid;
4266 if (edid) {
4267 int ret = intel_connector_update_modes(connector, edid);
4268 if (ret)
4269 return ret;
4270 }
3859 4271
3860 /* if eDP has no EDID, fall back to fixed mode */ 4272 /* if eDP has no EDID, fall back to fixed mode */
3861 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 4273 if (is_edp(intel_attached_dp(connector)) &&
4274 intel_connector->panel.fixed_mode) {
3862 struct drm_display_mode *mode; 4275 struct drm_display_mode *mode;
3863 mode = drm_mode_duplicate(dev, 4276
4277 mode = drm_mode_duplicate(connector->dev,
3864 intel_connector->panel.fixed_mode); 4278 intel_connector->panel.fixed_mode);
3865 if (mode) { 4279 if (mode) {
3866 drm_mode_probed_add(connector, mode); 4280 drm_mode_probed_add(connector, mode);
3867 return 1; 4281 return 1;
3868 } 4282 }
3869 } 4283 }
4284
3870 return 0; 4285 return 0;
3871} 4286}
3872 4287
3873static bool 4288static bool
3874intel_dp_detect_audio(struct drm_connector *connector) 4289intel_dp_detect_audio(struct drm_connector *connector)
3875{ 4290{
3876 struct intel_dp *intel_dp = intel_attached_dp(connector);
3877 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3878 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3879 struct drm_device *dev = connector->dev;
3880 struct drm_i915_private *dev_priv = dev->dev_private;
3881 enum intel_display_power_domain power_domain;
3882 struct edid *edid;
3883 bool has_audio = false; 4291 bool has_audio = false;
4292 struct edid *edid;
3884 4293
3885 power_domain = intel_display_port_power_domain(intel_encoder); 4294 edid = to_intel_connector(connector)->detect_edid;
3886 intel_display_power_get(dev_priv, power_domain); 4295 if (edid)
3887
3888 edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3889 if (edid) {
3890 has_audio = drm_detect_monitor_audio(edid); 4296 has_audio = drm_detect_monitor_audio(edid);
3891 kfree(edid);
3892 }
3893
3894 intel_display_power_put(dev_priv, power_domain);
3895 4297
3896 return has_audio; 4298 return has_audio;
3897} 4299}
@@ -3989,6 +4391,8 @@ intel_dp_connector_destroy(struct drm_connector *connector)
3989{ 4391{
3990 struct intel_connector *intel_connector = to_intel_connector(connector); 4392 struct intel_connector *intel_connector = to_intel_connector(connector);
3991 4393
4394 kfree(intel_connector->detect_edid);
4395
3992 if (!IS_ERR_OR_NULL(intel_connector->edid)) 4396 if (!IS_ERR_OR_NULL(intel_connector->edid))
3993 kfree(intel_connector->edid); 4397 kfree(intel_connector->edid);
3994 4398
@@ -4005,16 +4409,20 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4005{ 4409{
4006 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 4410 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4007 struct intel_dp *intel_dp = &intel_dig_port->dp; 4411 struct intel_dp *intel_dp = &intel_dig_port->dp;
4008 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4009 4412
4010 drm_dp_aux_unregister(&intel_dp->aux); 4413 drm_dp_aux_unregister(&intel_dp->aux);
4011 intel_dp_mst_encoder_cleanup(intel_dig_port); 4414 intel_dp_mst_encoder_cleanup(intel_dig_port);
4012 drm_encoder_cleanup(encoder); 4415 drm_encoder_cleanup(encoder);
4013 if (is_edp(intel_dp)) { 4416 if (is_edp(intel_dp)) {
4014 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 4417 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4015 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 4418 /*
4419 * vdd might still be enabled do to the delayed vdd off.
4420 * Make sure vdd is actually turned off here.
4421 */
4422 pps_lock(intel_dp);
4016 edp_panel_vdd_off_sync(intel_dp); 4423 edp_panel_vdd_off_sync(intel_dp);
4017 drm_modeset_unlock(&dev->mode_config.connection_mutex); 4424 pps_unlock(intel_dp);
4425
4018 if (intel_dp->edp_notifier.notifier_call) { 4426 if (intel_dp->edp_notifier.notifier_call) {
4019 unregister_reboot_notifier(&intel_dp->edp_notifier); 4427 unregister_reboot_notifier(&intel_dp->edp_notifier);
4020 intel_dp->edp_notifier.notifier_call = NULL; 4428 intel_dp->edp_notifier.notifier_call = NULL;
@@ -4030,7 +4438,13 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4030 if (!is_edp(intel_dp)) 4438 if (!is_edp(intel_dp))
4031 return; 4439 return;
4032 4440
4441 /*
4442 * vdd might still be enabled do to the delayed vdd off.
4443 * Make sure vdd is actually turned off here.
4444 */
4445 pps_lock(intel_dp);
4033 edp_panel_vdd_off_sync(intel_dp); 4446 edp_panel_vdd_off_sync(intel_dp);
4447 pps_unlock(intel_dp);
4034} 4448}
4035 4449
4036static void intel_dp_encoder_reset(struct drm_encoder *encoder) 4450static void intel_dp_encoder_reset(struct drm_encoder *encoder)
@@ -4041,6 +4455,7 @@ static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4041static const struct drm_connector_funcs intel_dp_connector_funcs = { 4455static const struct drm_connector_funcs intel_dp_connector_funcs = {
4042 .dpms = intel_connector_dpms, 4456 .dpms = intel_connector_dpms,
4043 .detect = intel_dp_detect, 4457 .detect = intel_dp_detect,
4458 .force = intel_dp_force,
4044 .fill_modes = drm_helper_probe_single_connector_modes, 4459 .fill_modes = drm_helper_probe_single_connector_modes,
4045 .set_property = intel_dp_set_property, 4460 .set_property = intel_dp_set_property,
4046 .destroy = intel_dp_connector_destroy, 4461 .destroy = intel_dp_connector_destroy,
@@ -4076,7 +4491,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4076 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) 4491 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4077 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; 4492 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4078 4493
4079 DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port, 4494 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4495 port_name(intel_dig_port->port),
4080 long_hpd ? "long" : "short"); 4496 long_hpd ? "long" : "short");
4081 4497
4082 power_domain = intel_display_port_power_domain(intel_encoder); 4498 power_domain = intel_display_port_power_domain(intel_encoder);
@@ -4216,6 +4632,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4216 u32 pp_on, pp_off, pp_div, pp; 4632 u32 pp_on, pp_off, pp_div, pp;
4217 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; 4633 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4218 4634
4635 lockdep_assert_held(&dev_priv->pps_mutex);
4636
4219 if (HAS_PCH_SPLIT(dev)) { 4637 if (HAS_PCH_SPLIT(dev)) {
4220 pp_ctrl_reg = PCH_PP_CONTROL; 4638 pp_ctrl_reg = PCH_PP_CONTROL;
4221 pp_on_reg = PCH_PP_ON_DELAYS; 4639 pp_on_reg = PCH_PP_ON_DELAYS;
@@ -4315,6 +4733,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4315 u32 pp_on, pp_off, pp_div, port_sel = 0; 4733 u32 pp_on, pp_off, pp_div, port_sel = 0;
4316 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); 4734 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4317 int pp_on_reg, pp_off_reg, pp_div_reg; 4735 int pp_on_reg, pp_off_reg, pp_div_reg;
4736 enum port port = dp_to_dig_port(intel_dp)->port;
4737
4738 lockdep_assert_held(&dev_priv->pps_mutex);
4318 4739
4319 if (HAS_PCH_SPLIT(dev)) { 4740 if (HAS_PCH_SPLIT(dev)) {
4320 pp_on_reg = PCH_PP_ON_DELAYS; 4741 pp_on_reg = PCH_PP_ON_DELAYS;
@@ -4349,12 +4770,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4349 /* Haswell doesn't have any port selection bits for the panel 4770 /* Haswell doesn't have any port selection bits for the panel
4350 * power sequencer any more. */ 4771 * power sequencer any more. */
4351 if (IS_VALLEYVIEW(dev)) { 4772 if (IS_VALLEYVIEW(dev)) {
4352 if (dp_to_dig_port(intel_dp)->port == PORT_B) 4773 port_sel = PANEL_PORT_SELECT_VLV(port);
4353 port_sel = PANEL_PORT_SELECT_DPB_VLV;
4354 else
4355 port_sel = PANEL_PORT_SELECT_DPC_VLV;
4356 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 4774 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4357 if (dp_to_dig_port(intel_dp)->port == PORT_A) 4775 if (port == PORT_A)
4358 port_sel = PANEL_PORT_SELECT_DPA; 4776 port_sel = PANEL_PORT_SELECT_DPA;
4359 else 4777 else
4360 port_sel = PANEL_PORT_SELECT_DPD; 4778 port_sel = PANEL_PORT_SELECT_DPD;
@@ -4438,7 +4856,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4438 val = I915_READ(reg); 4856 val = I915_READ(reg);
4439 if (index > DRRS_HIGH_RR) { 4857 if (index > DRRS_HIGH_RR) {
4440 val |= PIPECONF_EDP_RR_MODE_SWITCH; 4858 val |= PIPECONF_EDP_RR_MODE_SWITCH;
4441 intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2); 4859 intel_dp_set_m_n(intel_crtc);
4442 } else { 4860 } else {
4443 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 4861 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4444 } 4862 }
@@ -4478,7 +4896,7 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
4478 } 4896 }
4479 4897
4480 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 4898 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4481 DRM_INFO("VBT doesn't support DRRS\n"); 4899 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4482 return NULL; 4900 return NULL;
4483 } 4901 }
4484 4902
@@ -4486,7 +4904,7 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
4486 (dev, fixed_mode, connector); 4904 (dev, fixed_mode, connector);
4487 4905
4488 if (!downclock_mode) { 4906 if (!downclock_mode) {
4489 DRM_INFO("DRRS not supported\n"); 4907 DRM_DEBUG_KMS("DRRS not supported\n");
4490 return NULL; 4908 return NULL;
4491 } 4909 }
4492 4910
@@ -4497,7 +4915,7 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
4497 intel_dp->drrs_state.type = dev_priv->vbt.drrs_type; 4915 intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
4498 4916
4499 intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR; 4917 intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
4500 DRM_INFO("seamless DRRS supported for eDP panel.\n"); 4918 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4501 return downclock_mode; 4919 return downclock_mode;
4502} 4920}
4503 4921
@@ -4512,8 +4930,11 @@ void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
4512 return; 4930 return;
4513 4931
4514 intel_dp = enc_to_intel_dp(&intel_encoder->base); 4932 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4933
4934 pps_lock(intel_dp);
4935
4515 if (!edp_have_panel_vdd(intel_dp)) 4936 if (!edp_have_panel_vdd(intel_dp))
4516 return; 4937 goto out;
4517 /* 4938 /*
4518 * The VDD bit needs a power domain reference, so if the bit is 4939 * The VDD bit needs a power domain reference, so if the bit is
4519 * already enabled when we boot or resume, grab this reference and 4940 * already enabled when we boot or resume, grab this reference and
@@ -4525,6 +4946,8 @@ void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
4525 intel_display_power_get(dev_priv, power_domain); 4946 intel_display_power_get(dev_priv, power_domain);
4526 4947
4527 edp_panel_vdd_schedule_off(intel_dp); 4948 edp_panel_vdd_schedule_off(intel_dp);
4949 out:
4950 pps_unlock(intel_dp);
4528} 4951}
4529 4952
4530static bool intel_edp_init_connector(struct intel_dp *intel_dp, 4953static bool intel_edp_init_connector(struct intel_dp *intel_dp,
@@ -4552,7 +4975,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4552 /* Cache DPCD and EDID for edp. */ 4975 /* Cache DPCD and EDID for edp. */
4553 intel_edp_panel_vdd_on(intel_dp); 4976 intel_edp_panel_vdd_on(intel_dp);
4554 has_dpcd = intel_dp_get_dpcd(intel_dp); 4977 has_dpcd = intel_dp_get_dpcd(intel_dp);
4555 edp_panel_vdd_off(intel_dp, false); 4978 intel_edp_panel_vdd_off(intel_dp, false);
4556 4979
4557 if (has_dpcd) { 4980 if (has_dpcd) {
4558 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 4981 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
@@ -4566,7 +4989,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4566 } 4989 }
4567 4990
4568 /* We now know it's not a ghost, init power sequence regs. */ 4991 /* We now know it's not a ghost, init power sequence regs. */
4992 pps_lock(intel_dp);
4569 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq); 4993 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
4994 pps_unlock(intel_dp);
4570 4995
4571 mutex_lock(&dev->mode_config.mutex); 4996 mutex_lock(&dev->mode_config.mutex);
4572 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 4997 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
@@ -4610,6 +5035,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4610 } 5035 }
4611 5036
4612 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 5037 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5038 intel_connector->panel.backlight_power = intel_edp_backlight_power;
4613 intel_panel_setup_backlight(connector); 5039 intel_panel_setup_backlight(connector);
4614 5040
4615 return true; 5041 return true;
@@ -4628,6 +5054,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4628 struct edp_power_seq power_seq = { 0 }; 5054 struct edp_power_seq power_seq = { 0 };
4629 int type; 5055 int type;
4630 5056
5057 intel_dp->pps_pipe = INVALID_PIPE;
5058
4631 /* intel_dp vfuncs */ 5059 /* intel_dp vfuncs */
4632 if (IS_VALLEYVIEW(dev)) 5060 if (IS_VALLEYVIEW(dev))
4633 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; 5061 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
@@ -4698,8 +5126,15 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4698 } 5126 }
4699 5127
4700 if (is_edp(intel_dp)) { 5128 if (is_edp(intel_dp)) {
4701 intel_dp_init_panel_power_timestamps(intel_dp); 5129 pps_lock(intel_dp);
4702 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 5130 if (IS_VALLEYVIEW(dev)) {
5131 vlv_initial_power_sequencer_setup(intel_dp);
5132 } else {
5133 intel_dp_init_panel_power_timestamps(intel_dp);
5134 intel_dp_init_panel_power_sequencer(dev, intel_dp,
5135 &power_seq);
5136 }
5137 pps_unlock(intel_dp);
4703 } 5138 }
4704 5139
4705 intel_dp_aux_init(intel_dp, intel_connector); 5140 intel_dp_aux_init(intel_dp, intel_connector);
@@ -4707,7 +5142,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4707 /* init MST on ports that can support it */ 5142 /* init MST on ports that can support it */
4708 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 5143 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4709 if (port == PORT_B || port == PORT_C || port == PORT_D) { 5144 if (port == PORT_B || port == PORT_C || port == PORT_D) {
4710 intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id); 5145 intel_dp_mst_encoder_init(intel_dig_port,
5146 intel_connector->base.base.id);
4711 } 5147 }
4712 } 5148 }
4713 5149
@@ -4715,9 +5151,13 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4715 drm_dp_aux_unregister(&intel_dp->aux); 5151 drm_dp_aux_unregister(&intel_dp->aux);
4716 if (is_edp(intel_dp)) { 5152 if (is_edp(intel_dp)) {
4717 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 5153 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4718 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 5154 /*
5155 * vdd might still be enabled do to the delayed vdd off.
5156 * Make sure vdd is actually turned off here.
5157 */
5158 pps_lock(intel_dp);
4719 edp_panel_vdd_off_sync(intel_dp); 5159 edp_panel_vdd_off_sync(intel_dp);
4720 drm_modeset_unlock(&dev->mode_config.connection_mutex); 5160 pps_unlock(intel_dp);
4721 } 5161 }
4722 drm_connector_unregister(connector); 5162 drm_connector_unregister(connector);
4723 drm_connector_cleanup(connector); 5163 drm_connector_cleanup(connector);
@@ -4781,7 +5221,8 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4781 } else { 5221 } else {
4782 intel_encoder->pre_enable = g4x_pre_enable_dp; 5222 intel_encoder->pre_enable = g4x_pre_enable_dp;
4783 intel_encoder->enable = g4x_enable_dp; 5223 intel_encoder->enable = g4x_enable_dp;
4784 intel_encoder->post_disable = g4x_post_disable_dp; 5224 if (INTEL_INFO(dev)->gen >= 5)
5225 intel_encoder->post_disable = ilk_post_disable_dp;
4785 } 5226 }
4786 5227
4787 intel_dig_port->port = port; 5228 intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b8c8bbd8e5f9..07ce04683c30 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -25,6 +25,7 @@
25#ifndef __INTEL_DRV_H__ 25#ifndef __INTEL_DRV_H__
26#define __INTEL_DRV_H__ 26#define __INTEL_DRV_H__
27 27
28#include <linux/async.h>
28#include <linux/i2c.h> 29#include <linux/i2c.h>
29#include <linux/hdmi.h> 30#include <linux/hdmi.h>
30#include <drm/i915_drm.h> 31#include <drm/i915_drm.h>
@@ -179,6 +180,8 @@ struct intel_panel {
179 bool active_low_pwm; 180 bool active_low_pwm;
180 struct backlight_device *device; 181 struct backlight_device *device;
181 } backlight; 182 } backlight;
183
184 void (*backlight_power)(struct intel_connector *, bool enable);
182}; 185};
183 186
184struct intel_connector { 187struct intel_connector {
@@ -211,6 +214,7 @@ struct intel_connector {
211 214
212 /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ 215 /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
213 struct edid *edid; 216 struct edid *edid;
217 struct edid *detect_edid;
214 218
215 /* since POLL and HPD connectors may use the same HPD line keep the native 219 /* since POLL and HPD connectors may use the same HPD line keep the native
216 state of connector->polled in case hotplug storm detection changes it */ 220 state of connector->polled in case hotplug storm detection changes it */
@@ -330,6 +334,7 @@ struct intel_crtc_config {
330 334
331 /* m2_n2 for eDP downclock */ 335 /* m2_n2 for eDP downclock */
332 struct intel_link_m_n dp_m2_n2; 336 struct intel_link_m_n dp_m2_n2;
337 bool has_drrs;
333 338
334 /* 339 /*
335 * Frequence the dpll for the port should run at. Differs from the 340 * Frequence the dpll for the port should run at. Differs from the
@@ -410,6 +415,7 @@ struct intel_crtc {
410 uint32_t cursor_addr; 415 uint32_t cursor_addr;
411 int16_t cursor_width, cursor_height; 416 int16_t cursor_width, cursor_height;
412 uint32_t cursor_cntl; 417 uint32_t cursor_cntl;
418 uint32_t cursor_size;
413 uint32_t cursor_base; 419 uint32_t cursor_base;
414 420
415 struct intel_plane_config plane_config; 421 struct intel_plane_config plane_config;
@@ -430,8 +436,6 @@ struct intel_crtc {
430 struct intel_pipe_wm active; 436 struct intel_pipe_wm active;
431 } wm; 437 } wm;
432 438
433 wait_queue_head_t vbl_wait;
434
435 int scanline_offset; 439 int scanline_offset;
436 struct intel_mmio_flip mmio_flip; 440 struct intel_mmio_flip mmio_flip;
437}; 441};
@@ -455,6 +459,7 @@ struct intel_plane {
455 unsigned int crtc_w, crtc_h; 459 unsigned int crtc_w, crtc_h;
456 uint32_t src_x, src_y; 460 uint32_t src_x, src_y;
457 uint32_t src_w, src_h; 461 uint32_t src_w, src_h;
462 unsigned int rotation;
458 463
459 /* Since we need to change the watermarks before/after 464 /* Since we need to change the watermarks before/after
460 * enabling/disabling the planes, we need to store the parameters here 465 * enabling/disabling the planes, we need to store the parameters here
@@ -565,6 +570,12 @@ struct intel_dp {
565 570
566 struct notifier_block edp_notifier; 571 struct notifier_block edp_notifier;
567 572
573 /*
574 * Pipe whose power sequencer is currently locked into
575 * this port. Only relevant on VLV/CHV.
576 */
577 enum pipe pps_pipe;
578
568 bool use_tps3; 579 bool use_tps3;
569 bool can_mst; /* this port supports mst */ 580 bool can_mst; /* this port supports mst */
570 bool is_mst; 581 bool is_mst;
@@ -663,6 +674,10 @@ struct intel_unpin_work {
663#define INTEL_FLIP_COMPLETE 2 674#define INTEL_FLIP_COMPLETE 2
664 u32 flip_count; 675 u32 flip_count;
665 u32 gtt_offset; 676 u32 gtt_offset;
677 struct intel_engine_cs *flip_queued_ring;
678 u32 flip_queued_seqno;
679 int flip_queued_vblank;
680 int flip_ready_vblank;
666 bool enable_stall_check; 681 bool enable_stall_check;
667}; 682};
668 683
@@ -827,7 +842,6 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
827enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 842enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
828 enum pipe pipe); 843 enum pipe pipe);
829void intel_wait_for_vblank(struct drm_device *dev, int pipe); 844void intel_wait_for_vblank(struct drm_device *dev, int pipe);
830void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
831int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 845int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
832void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 846void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
833 struct intel_digital_port *dport); 847 struct intel_digital_port *dport);
@@ -848,6 +862,7 @@ __intel_framebuffer_create(struct drm_device *dev,
848void intel_prepare_page_flip(struct drm_device *dev, int plane); 862void intel_prepare_page_flip(struct drm_device *dev, int plane);
849void intel_finish_page_flip(struct drm_device *dev, int pipe); 863void intel_finish_page_flip(struct drm_device *dev, int pipe);
850void intel_finish_page_flip_plane(struct drm_device *dev, int plane); 864void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
865void intel_check_page_flip(struct drm_device *dev, int pipe);
851 866
852/* shared dpll functions */ 867/* shared dpll functions */
853struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); 868struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
@@ -882,6 +897,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
882void hsw_disable_pc8(struct drm_i915_private *dev_priv); 897void hsw_disable_pc8(struct drm_i915_private *dev_priv);
883void intel_dp_get_m_n(struct intel_crtc *crtc, 898void intel_dp_get_m_n(struct intel_crtc *crtc,
884 struct intel_crtc_config *pipe_config); 899 struct intel_crtc_config *pipe_config);
900void intel_dp_set_m_n(struct intel_crtc *crtc);
885int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); 901int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
886void 902void
887ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, 903ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
@@ -896,7 +912,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
896 struct intel_crtc_config *pipe_config); 912 struct intel_crtc_config *pipe_config);
897int intel_format_to_fourcc(int format); 913int intel_format_to_fourcc(int format);
898void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); 914void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
899 915void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
900 916
901/* intel_dp.c */ 917/* intel_dp.c */
902void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); 918void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -935,6 +951,7 @@ void intel_dp_mst_suspend(struct drm_device *dev);
935void intel_dp_mst_resume(struct drm_device *dev); 951void intel_dp_mst_resume(struct drm_device *dev);
936int intel_dp_max_link_bw(struct intel_dp *intel_dp); 952int intel_dp_max_link_bw(struct intel_dp *intel_dp);
937void intel_dp_hot_plug(struct intel_encoder *intel_encoder); 953void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
954void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
938/* intel_dp_mst.c */ 955/* intel_dp_mst.c */
939int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); 956int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
940void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); 957void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
@@ -949,9 +966,9 @@ void intel_dvo_init(struct drm_device *dev);
949/* legacy fbdev emulation in intel_fbdev.c */ 966/* legacy fbdev emulation in intel_fbdev.c */
950#ifdef CONFIG_DRM_I915_FBDEV 967#ifdef CONFIG_DRM_I915_FBDEV
951extern int intel_fbdev_init(struct drm_device *dev); 968extern int intel_fbdev_init(struct drm_device *dev);
952extern void intel_fbdev_initial_config(struct drm_device *dev); 969extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
953extern void intel_fbdev_fini(struct drm_device *dev); 970extern void intel_fbdev_fini(struct drm_device *dev);
954extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); 971extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
955extern void intel_fbdev_output_poll_changed(struct drm_device *dev); 972extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
956extern void intel_fbdev_restore_mode(struct drm_device *dev); 973extern void intel_fbdev_restore_mode(struct drm_device *dev);
957#else 974#else
@@ -960,7 +977,7 @@ static inline int intel_fbdev_init(struct drm_device *dev)
960 return 0; 977 return 0;
961} 978}
962 979
963static inline void intel_fbdev_initial_config(struct drm_device *dev) 980static inline void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
964{ 981{
965} 982}
966 983
@@ -968,7 +985,7 @@ static inline void intel_fbdev_fini(struct drm_device *dev)
968{ 985{
969} 986}
970 987
971static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state) 988static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
972{ 989{
973} 990}
974 991
@@ -1091,7 +1108,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
1091int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); 1108int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
1092void intel_flush_primary_plane(struct drm_i915_private *dev_priv, 1109void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
1093 enum plane plane); 1110 enum plane plane);
1094void intel_plane_restore(struct drm_plane *plane); 1111int intel_plane_set_property(struct drm_plane *plane,
1112 struct drm_property *prop,
1113 uint64_t val);
1114int intel_plane_restore(struct drm_plane *plane);
1095void intel_plane_disable(struct drm_plane *plane); 1115void intel_plane_disable(struct drm_plane *plane);
1096int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 1116int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1097 struct drm_file *file_priv); 1117 struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 670c29a7b5dd..5bd9e09ad3c5 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -184,7 +184,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
184 184
185 /* update the hw state for DPLL */ 185 /* update the hw state for DPLL */
186 intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV | 186 intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
187 DPLL_REFA_CLK_ENABLE_VLV; 187 DPLL_REFA_CLK_ENABLE_VLV;
188 188
189 tmp = I915_READ(DSPCLK_GATE_D); 189 tmp = I915_READ(DSPCLK_GATE_D);
190 tmp |= DPOUNIT_CLOCK_GATE_DISABLE; 190 tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
@@ -259,8 +259,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
259 temp = I915_READ(MIPI_CTRL(pipe)); 259 temp = I915_READ(MIPI_CTRL(pipe));
260 temp &= ~ESCAPE_CLOCK_DIVIDER_MASK; 260 temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
261 I915_WRITE(MIPI_CTRL(pipe), temp | 261 I915_WRITE(MIPI_CTRL(pipe), temp |
262 intel_dsi->escape_clk_div << 262 intel_dsi->escape_clk_div <<
263 ESCAPE_CLOCK_DIVIDER_SHIFT); 263 ESCAPE_CLOCK_DIVIDER_SHIFT);
264 264
265 I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP); 265 I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
266 266
@@ -297,7 +297,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
297 usleep_range(2000, 2500); 297 usleep_range(2000, 2500);
298 298
299 if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT) 299 if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
300 == 0x00000), 30)) 300 == 0x00000), 30))
301 DRM_ERROR("DSI LP not going Low\n"); 301 DRM_ERROR("DSI LP not going Low\n");
302 302
303 val = I915_READ(MIPI_PORT_CTRL(pipe)); 303 val = I915_READ(MIPI_PORT_CTRL(pipe));
@@ -423,9 +423,11 @@ static u16 txclkesc(u32 divider, unsigned int us)
423} 423}
424 424
425/* return pixels in terms of txbyteclkhs */ 425/* return pixels in terms of txbyteclkhs */
426static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count) 426static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
427 u16 burst_mode_ratio)
427{ 428{
428 return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count); 429 return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
430 8 * 100), lane_count);
429} 431}
430 432
431static void set_dsi_timings(struct drm_encoder *encoder, 433static void set_dsi_timings(struct drm_encoder *encoder,
@@ -451,10 +453,12 @@ static void set_dsi_timings(struct drm_encoder *encoder,
451 vbp = mode->vtotal - mode->vsync_end; 453 vbp = mode->vtotal - mode->vsync_end;
452 454
453 /* horizontal values are in terms of high speed byte clock */ 455 /* horizontal values are in terms of high speed byte clock */
454 hactive = txbyteclkhs(hactive, bpp, lane_count); 456 hactive = txbyteclkhs(hactive, bpp, lane_count,
455 hfp = txbyteclkhs(hfp, bpp, lane_count); 457 intel_dsi->burst_mode_ratio);
456 hsync = txbyteclkhs(hsync, bpp, lane_count); 458 hfp = txbyteclkhs(hfp, bpp, lane_count, intel_dsi->burst_mode_ratio);
457 hbp = txbyteclkhs(hbp, bpp, lane_count); 459 hsync = txbyteclkhs(hsync, bpp, lane_count,
460 intel_dsi->burst_mode_ratio);
461 hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
458 462
459 I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive); 463 I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
460 I915_WRITE(MIPI_HFP_COUNT(pipe), hfp); 464 I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
@@ -541,12 +545,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
541 intel_dsi->video_mode_format == VIDEO_MODE_BURST) { 545 intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
542 I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe), 546 I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
543 txbyteclkhs(adjusted_mode->htotal, bpp, 547 txbyteclkhs(adjusted_mode->htotal, bpp,
544 intel_dsi->lane_count) + 1); 548 intel_dsi->lane_count,
549 intel_dsi->burst_mode_ratio) + 1);
545 } else { 550 } else {
546 I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe), 551 I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
547 txbyteclkhs(adjusted_mode->vtotal * 552 txbyteclkhs(adjusted_mode->vtotal *
548 adjusted_mode->htotal, 553 adjusted_mode->htotal,
549 bpp, intel_dsi->lane_count) + 1); 554 bpp, intel_dsi->lane_count,
555 intel_dsi->burst_mode_ratio) + 1);
550 } 556 }
551 I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout); 557 I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
552 I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val); 558 I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
@@ -576,7 +582,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
576 * XXX: write MIPI_STOP_STATE_STALL? 582 * XXX: write MIPI_STOP_STATE_STALL?
577 */ 583 */
578 I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 584 I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
579 intel_dsi->hs_to_lp_count); 585 intel_dsi->hs_to_lp_count);
580 586
581 /* XXX: low power clock equivalence in terms of byte clock. the number 587 /* XXX: low power clock equivalence in terms of byte clock. the number
582 * of byte clocks occupied in one low power clock. based on txbyteclkhs 588 * of byte clocks occupied in one low power clock. based on txbyteclkhs
@@ -601,10 +607,10 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
601 * 64 like 1366 x 768. Enable RANDOM resolution support for such 607 * 64 like 1366 x 768. Enable RANDOM resolution support for such
602 * panels by default */ 608 * panels by default */
603 I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe), 609 I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
604 intel_dsi->video_frmt_cfg_bits | 610 intel_dsi->video_frmt_cfg_bits |
605 intel_dsi->video_mode_format | 611 intel_dsi->video_mode_format |
606 IP_TG_CONFIG | 612 IP_TG_CONFIG |
607 RANDOM_DPI_DISPLAY_RESOLUTION); 613 RANDOM_DPI_DISPLAY_RESOLUTION);
608} 614}
609 615
610static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder) 616static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index fd51867fd0d3..657eb5c1b9d8 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -116,6 +116,8 @@ struct intel_dsi {
116 u16 clk_hs_to_lp_count; 116 u16 clk_hs_to_lp_count;
117 117
118 u16 init_count; 118 u16 init_count;
119 u32 pclk;
120 u16 burst_mode_ratio;
119 121
120 /* all delays in ms */ 122 /* all delays in ms */
121 u16 backlight_off_delay; 123 u16 backlight_off_delay;
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
index 7f1430ac8543..f4767fd2ebeb 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -430,7 +430,7 @@ void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi)
430 u32 mask; 430 u32 mask;
431 431
432 mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | 432 mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
433 LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; 433 LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
434 434
435 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100)) 435 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100))
436 DRM_ERROR("DPI FIFOs are not empty\n"); 436 DRM_ERROR("DPI FIFOs are not empty\n");
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 47c7584a4aa0..f6bdd44069ce 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -271,6 +271,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
271 u32 ths_prepare_ns, tclk_trail_ns; 271 u32 ths_prepare_ns, tclk_trail_ns;
272 u32 tclk_prepare_clkzero, ths_prepare_hszero; 272 u32 tclk_prepare_clkzero, ths_prepare_hszero;
273 u32 lp_to_hs_switch, hs_to_lp_switch; 273 u32 lp_to_hs_switch, hs_to_lp_switch;
274 u32 pclk, computed_ddr;
275 u16 burst_mode_ratio;
274 276
275 DRM_DEBUG_KMS("\n"); 277 DRM_DEBUG_KMS("\n");
276 278
@@ -284,8 +286,6 @@ static bool generic_init(struct intel_dsi_device *dsi)
284 else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565) 286 else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
285 bits_per_pixel = 16; 287 bits_per_pixel = 16;
286 288
287 bitrate = (mode->clock * bits_per_pixel) / intel_dsi->lane_count;
288
289 intel_dsi->operation_mode = mipi_config->is_cmd_mode; 289 intel_dsi->operation_mode = mipi_config->is_cmd_mode;
290 intel_dsi->video_mode_format = mipi_config->video_transfer_mode; 290 intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
291 intel_dsi->escape_clk_div = mipi_config->byte_clk_sel; 291 intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
@@ -297,6 +297,40 @@ static bool generic_init(struct intel_dsi_device *dsi)
297 intel_dsi->video_frmt_cfg_bits = 297 intel_dsi->video_frmt_cfg_bits =
298 mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; 298 mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
299 299
300 pclk = mode->clock;
301
302 /* Burst Mode Ratio
303 * Target ddr frequency from VBT / non burst ddr freq
304 * multiply by 100 to preserve remainder
305 */
306 if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
307 if (mipi_config->target_burst_mode_freq) {
308 computed_ddr =
309 (pclk * bits_per_pixel) / intel_dsi->lane_count;
310
311 if (mipi_config->target_burst_mode_freq <
312 computed_ddr) {
313 DRM_ERROR("Burst mode freq is less than computed\n");
314 return false;
315 }
316
317 burst_mode_ratio = DIV_ROUND_UP(
318 mipi_config->target_burst_mode_freq * 100,
319 computed_ddr);
320
321 pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
322 } else {
323 DRM_ERROR("Burst mode target is not set\n");
324 return false;
325 }
326 } else
327 burst_mode_ratio = 100;
328
329 intel_dsi->burst_mode_ratio = burst_mode_ratio;
330 intel_dsi->pclk = pclk;
331
332 bitrate = (pclk * bits_per_pixel) / intel_dsi->lane_count;
333
300 switch (intel_dsi->escape_clk_div) { 334 switch (intel_dsi->escape_clk_div) {
301 case 0: 335 case 0:
302 tlpx_ns = 50; 336 tlpx_ns = 50;
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index d8bb1ea2f0da..fa7a6ca34cd6 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -134,8 +134,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
134#else 134#else
135 135
136/* Get DSI clock from pixel clock */ 136/* Get DSI clock from pixel clock */
137static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode, 137static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
138 int pixel_format, int lane_count)
139{ 138{
140 u32 dsi_clk_khz; 139 u32 dsi_clk_khz;
141 u32 bpp; 140 u32 bpp;
@@ -156,7 +155,7 @@ static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode,
156 155
157 /* DSI data rate = pixel clock * bits per pixel / lane count 156 /* DSI data rate = pixel clock * bits per pixel / lane count
158 pixel clock is converted from KHz to Hz */ 157 pixel clock is converted from KHz to Hz */
159 dsi_clk_khz = DIV_ROUND_CLOSEST(mode->clock * bpp, lane_count); 158 dsi_clk_khz = DIV_ROUND_CLOSEST(pclk * bpp, lane_count);
160 159
161 return dsi_clk_khz; 160 return dsi_clk_khz;
162} 161}
@@ -191,7 +190,7 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
191 for (m = 62; m <= 92; m++) { 190 for (m = 62; m <= 92; m++) {
192 for (p = 2; p <= 6; p++) { 191 for (p = 2; p <= 6; p++) {
193 /* Find the optimal m and p divisors 192 /* Find the optimal m and p divisors
194 with minimal error +/- the required clock */ 193 with minimal error +/- the required clock */
195 calc_dsi_clk = (m * ref_clk) / p; 194 calc_dsi_clk = (m * ref_clk) / p;
196 if (calc_dsi_clk == target_dsi_clk) { 195 if (calc_dsi_clk == target_dsi_clk) {
197 calc_m = m; 196 calc_m = m;
@@ -228,15 +227,13 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
228static void vlv_configure_dsi_pll(struct intel_encoder *encoder) 227static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
229{ 228{
230 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 229 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
231 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
232 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
233 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 230 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
234 int ret; 231 int ret;
235 struct dsi_mnp dsi_mnp; 232 struct dsi_mnp dsi_mnp;
236 u32 dsi_clk; 233 u32 dsi_clk;
237 234
238 dsi_clk = dsi_clk_from_pclk(mode, intel_dsi->pixel_format, 235 dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
239 intel_dsi->lane_count); 236 intel_dsi->lane_count);
240 237
241 ret = dsi_calc_mnp(dsi_clk, &dsi_mnp); 238 ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
242 if (ret) { 239 if (ret) {
@@ -318,8 +315,8 @@ static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
318 } 315 }
319 316
320 WARN(bpp != pipe_bpp, 317 WARN(bpp != pipe_bpp,
321 "bpp match assertion failure (expected %d, current %d)\n", 318 "bpp match assertion failure (expected %d, current %d)\n",
322 bpp, pipe_bpp); 319 bpp, pipe_bpp);
323} 320}
324 321
325u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) 322u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 56b47d2ffaf7..e40e3df33517 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -85,7 +85,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
85 { 85 {
86 .type = INTEL_DVO_CHIP_TMDS, 86 .type = INTEL_DVO_CHIP_TMDS,
87 .name = "ns2501", 87 .name = "ns2501",
88 .dvo_reg = DVOC, 88 .dvo_reg = DVOB,
89 .slave_addr = NS2501_ADDR, 89 .slave_addr = NS2501_ADDR,
90 .dev_ops = &ns2501_ops, 90 .dev_ops = &ns2501_ops,
91 } 91 }
@@ -185,12 +185,13 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
185 u32 dvo_reg = intel_dvo->dev.dvo_reg; 185 u32 dvo_reg = intel_dvo->dev.dvo_reg;
186 u32 temp = I915_READ(dvo_reg); 186 u32 temp = I915_READ(dvo_reg);
187 187
188 I915_WRITE(dvo_reg, temp | DVO_ENABLE);
189 I915_READ(dvo_reg);
190 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, 188 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
191 &crtc->config.requested_mode, 189 &crtc->config.requested_mode,
192 &crtc->config.adjusted_mode); 190 &crtc->config.adjusted_mode);
193 191
192 I915_WRITE(dvo_reg, temp | DVO_ENABLE);
193 I915_READ(dvo_reg);
194
194 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); 195 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
195} 196}
196 197
@@ -226,10 +227,6 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
226 227
227 intel_crtc_update_dpms(crtc); 228 intel_crtc_update_dpms(crtc);
228 229
229 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
230 &config->requested_mode,
231 &config->adjusted_mode);
232
233 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); 230 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
234 } else { 231 } else {
235 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); 232 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f475414671d8..9b584f3fbb99 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -24,8 +24,10 @@
24 * David Airlie 24 * David Airlie
25 */ 25 */
26 26
27#include <linux/async.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/console.h>
29#include <linux/errno.h> 31#include <linux/errno.h>
30#include <linux/string.h> 32#include <linux/string.h>
31#include <linux/mm.h> 33#include <linux/mm.h>
@@ -331,24 +333,6 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
331 int num_connectors_enabled = 0; 333 int num_connectors_enabled = 0;
332 int num_connectors_detected = 0; 334 int num_connectors_detected = 0;
333 335
334 /*
335 * If the user specified any force options, just bail here
336 * and use that config.
337 */
338 for (i = 0; i < fb_helper->connector_count; i++) {
339 struct drm_fb_helper_connector *fb_conn;
340 struct drm_connector *connector;
341
342 fb_conn = fb_helper->connector_info[i];
343 connector = fb_conn->connector;
344
345 if (!enabled[i])
346 continue;
347
348 if (connector->force != DRM_FORCE_UNSPECIFIED)
349 return false;
350 }
351
352 save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), 336 save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
353 GFP_KERNEL); 337 GFP_KERNEL);
354 if (!save_enabled) 338 if (!save_enabled)
@@ -374,8 +358,18 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
374 continue; 358 continue;
375 } 359 }
376 360
361 if (connector->force == DRM_FORCE_OFF) {
362 DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
363 connector->name);
364 enabled[i] = false;
365 continue;
366 }
367
377 encoder = connector->encoder; 368 encoder = connector->encoder;
378 if (!encoder || WARN_ON(!encoder->crtc)) { 369 if (!encoder || WARN_ON(!encoder->crtc)) {
370 if (connector->force > DRM_FORCE_OFF)
371 goto bail;
372
379 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", 373 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
380 connector->name); 374 connector->name);
381 enabled[i] = false; 375 enabled[i] = false;
@@ -394,8 +388,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
394 for (j = 0; j < fb_helper->connector_count; j++) { 388 for (j = 0; j < fb_helper->connector_count; j++) {
395 if (crtcs[j] == new_crtc) { 389 if (crtcs[j] == new_crtc) {
396 DRM_DEBUG_KMS("fallback: cloned configuration\n"); 390 DRM_DEBUG_KMS("fallback: cloned configuration\n");
397 fallback = true; 391 goto bail;
398 goto out;
399 } 392 }
400 } 393 }
401 394
@@ -466,8 +459,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
466 fallback = true; 459 fallback = true;
467 } 460 }
468 461
469out:
470 if (fallback) { 462 if (fallback) {
463bail:
471 DRM_DEBUG_KMS("Not using firmware configuration\n"); 464 DRM_DEBUG_KMS("Not using firmware configuration\n");
472 memcpy(enabled, save_enabled, dev->mode_config.num_connector); 465 memcpy(enabled, save_enabled, dev->mode_config.num_connector);
473 kfree(save_enabled); 466 kfree(save_enabled);
@@ -636,6 +629,15 @@ out:
636 return false; 629 return false;
637} 630}
638 631
632static void intel_fbdev_suspend_worker(struct work_struct *work)
633{
634 intel_fbdev_set_suspend(container_of(work,
635 struct drm_i915_private,
636 fbdev_suspend_work)->dev,
637 FBINFO_STATE_RUNNING,
638 true);
639}
640
639int intel_fbdev_init(struct drm_device *dev) 641int intel_fbdev_init(struct drm_device *dev)
640{ 642{
641 struct intel_fbdev *ifbdev; 643 struct intel_fbdev *ifbdev;
@@ -662,14 +664,16 @@ int intel_fbdev_init(struct drm_device *dev)
662 } 664 }
663 665
664 dev_priv->fbdev = ifbdev; 666 dev_priv->fbdev = ifbdev;
667 INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
668
665 drm_fb_helper_single_add_all_connectors(&ifbdev->helper); 669 drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
666 670
667 return 0; 671 return 0;
668} 672}
669 673
670void intel_fbdev_initial_config(struct drm_device *dev) 674void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
671{ 675{
672 struct drm_i915_private *dev_priv = dev->dev_private; 676 struct drm_i915_private *dev_priv = data;
673 struct intel_fbdev *ifbdev = dev_priv->fbdev; 677 struct intel_fbdev *ifbdev = dev_priv->fbdev;
674 678
675 /* Due to peculiar init order wrt to hpd handling this is separate. */ 679 /* Due to peculiar init order wrt to hpd handling this is separate. */
@@ -682,12 +686,15 @@ void intel_fbdev_fini(struct drm_device *dev)
682 if (!dev_priv->fbdev) 686 if (!dev_priv->fbdev)
683 return; 687 return;
684 688
689 flush_work(&dev_priv->fbdev_suspend_work);
690
691 async_synchronize_full();
685 intel_fbdev_destroy(dev, dev_priv->fbdev); 692 intel_fbdev_destroy(dev, dev_priv->fbdev);
686 kfree(dev_priv->fbdev); 693 kfree(dev_priv->fbdev);
687 dev_priv->fbdev = NULL; 694 dev_priv->fbdev = NULL;
688} 695}
689 696
690void intel_fbdev_set_suspend(struct drm_device *dev, int state) 697void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
691{ 698{
692 struct drm_i915_private *dev_priv = dev->dev_private; 699 struct drm_i915_private *dev_priv = dev->dev_private;
693 struct intel_fbdev *ifbdev = dev_priv->fbdev; 700 struct intel_fbdev *ifbdev = dev_priv->fbdev;
@@ -698,6 +705,33 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
698 705
699 info = ifbdev->helper.fbdev; 706 info = ifbdev->helper.fbdev;
700 707
708 if (synchronous) {
709 /* Flush any pending work to turn the console on, and then
710 * wait to turn it off. It must be synchronous as we are
711 * about to suspend or unload the driver.
712 *
713 * Note that from within the work-handler, we cannot flush
714 * ourselves, so only flush outstanding work upon suspend!
715 */
716 if (state != FBINFO_STATE_RUNNING)
717 flush_work(&dev_priv->fbdev_suspend_work);
718 console_lock();
719 } else {
720 /*
721 * The console lock can be pretty contented on resume due
722 * to all the printk activity. Try to keep it out of the hot
723 * path of resume if possible.
724 */
725 WARN_ON(state != FBINFO_STATE_RUNNING);
726 if (!console_trylock()) {
727 /* Don't block our own workqueue as this can
728 * be run in parallel with other i915.ko tasks.
729 */
730 schedule_work(&dev_priv->fbdev_suspend_work);
731 return;
732 }
733 }
734
701 /* On resume from hibernation: If the object is shmemfs backed, it has 735 /* On resume from hibernation: If the object is shmemfs backed, it has
702 * been restored from swap. If the object is stolen however, it will be 736 * been restored from swap. If the object is stolen however, it will be
703 * full of whatever garbage was left in there. 737 * full of whatever garbage was left in there.
@@ -706,6 +740,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
706 memset_io(info->screen_base, 0, info->screen_size); 740 memset_io(info->screen_base, 0, info->screen_size);
707 741
708 fb_set_suspend(info, state); 742 fb_set_suspend(info, state);
743 console_unlock();
709} 744}
710 745
711void intel_fbdev_output_poll_changed(struct drm_device *dev) 746void intel_fbdev_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 5a9de21637b7..29ec1535992d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -869,10 +869,15 @@ static enum drm_mode_status
869intel_hdmi_mode_valid(struct drm_connector *connector, 869intel_hdmi_mode_valid(struct drm_connector *connector,
870 struct drm_display_mode *mode) 870 struct drm_display_mode *mode)
871{ 871{
872 if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector), 872 int clock = mode->clock;
873 true)) 873
874 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
875 clock *= 2;
876
877 if (clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
878 true))
874 return MODE_CLOCK_HIGH; 879 return MODE_CLOCK_HIGH;
875 if (mode->clock < 20000) 880 if (clock < 20000)
876 return MODE_CLOCK_LOW; 881 return MODE_CLOCK_LOW;
877 882
878 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 883 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -890,7 +895,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
890 if (HAS_GMCH_DISPLAY(dev)) 895 if (HAS_GMCH_DISPLAY(dev))
891 return false; 896 return false;
892 897
893 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 898 for_each_intel_encoder(dev, encoder) {
894 if (encoder->new_crtc != crtc) 899 if (encoder->new_crtc != crtc)
895 continue; 900 continue;
896 901
@@ -926,6 +931,10 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
926 intel_hdmi->color_range = 0; 931 intel_hdmi->color_range = 0;
927 } 932 }
928 933
934 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
935 pipe_config->pixel_multiplier = 2;
936 }
937
929 if (intel_hdmi->color_range) 938 if (intel_hdmi->color_range)
930 pipe_config->limited_color_range = true; 939 pipe_config->limited_color_range = true;
931 940
@@ -967,104 +976,117 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
967 return true; 976 return true;
968} 977}
969 978
970static enum drm_connector_status 979static void
971intel_hdmi_detect(struct drm_connector *connector, bool force) 980intel_hdmi_unset_edid(struct drm_connector *connector)
972{ 981{
973 struct drm_device *dev = connector->dev;
974 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 982 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
975 struct intel_digital_port *intel_dig_port =
976 hdmi_to_dig_port(intel_hdmi);
977 struct intel_encoder *intel_encoder = &intel_dig_port->base;
978 struct drm_i915_private *dev_priv = dev->dev_private;
979 struct edid *edid;
980 enum intel_display_power_domain power_domain;
981 enum drm_connector_status status = connector_status_disconnected;
982 983
983 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 984 intel_hdmi->has_hdmi_sink = false;
984 connector->base.id, connector->name); 985 intel_hdmi->has_audio = false;
986 intel_hdmi->rgb_quant_range_selectable = false;
987
988 kfree(to_intel_connector(connector)->detect_edid);
989 to_intel_connector(connector)->detect_edid = NULL;
990}
991
992static bool
993intel_hdmi_set_edid(struct drm_connector *connector)
994{
995 struct drm_i915_private *dev_priv = to_i915(connector->dev);
996 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
997 struct intel_encoder *intel_encoder =
998 &hdmi_to_dig_port(intel_hdmi)->base;
999 enum intel_display_power_domain power_domain;
1000 struct edid *edid;
1001 bool connected = false;
985 1002
986 power_domain = intel_display_port_power_domain(intel_encoder); 1003 power_domain = intel_display_port_power_domain(intel_encoder);
987 intel_display_power_get(dev_priv, power_domain); 1004 intel_display_power_get(dev_priv, power_domain);
988 1005
989 intel_hdmi->has_hdmi_sink = false;
990 intel_hdmi->has_audio = false;
991 intel_hdmi->rgb_quant_range_selectable = false;
992 edid = drm_get_edid(connector, 1006 edid = drm_get_edid(connector,
993 intel_gmbus_get_adapter(dev_priv, 1007 intel_gmbus_get_adapter(dev_priv,
994 intel_hdmi->ddc_bus)); 1008 intel_hdmi->ddc_bus));
995 1009
996 if (edid) { 1010 intel_display_power_put(dev_priv, power_domain);
997 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 1011
998 status = connector_status_connected; 1012 to_intel_connector(connector)->detect_edid = edid;
999 if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI) 1013 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
1000 intel_hdmi->has_hdmi_sink = 1014 intel_hdmi->rgb_quant_range_selectable =
1001 drm_detect_hdmi_monitor(edid); 1015 drm_rgb_quant_range_selectable(edid);
1002 intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
1003 intel_hdmi->rgb_quant_range_selectable =
1004 drm_rgb_quant_range_selectable(edid);
1005 }
1006 kfree(edid);
1007 }
1008 1016
1009 if (status == connector_status_connected) { 1017 intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
1010 if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) 1018 if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
1011 intel_hdmi->has_audio = 1019 intel_hdmi->has_audio =
1012 (intel_hdmi->force_audio == HDMI_AUDIO_ON); 1020 intel_hdmi->force_audio == HDMI_AUDIO_ON;
1013 intel_encoder->type = INTEL_OUTPUT_HDMI; 1021
1022 if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
1023 intel_hdmi->has_hdmi_sink =
1024 drm_detect_hdmi_monitor(edid);
1025
1026 connected = true;
1014 } 1027 }
1015 1028
1016 intel_display_power_put(dev_priv, power_domain); 1029 return connected;
1030}
1031
1032static enum drm_connector_status
1033intel_hdmi_detect(struct drm_connector *connector, bool force)
1034{
1035 enum drm_connector_status status;
1036
1037 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1038 connector->base.id, connector->name);
1039
1040 intel_hdmi_unset_edid(connector);
1041
1042 if (intel_hdmi_set_edid(connector)) {
1043 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1044
1045 hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
1046 status = connector_status_connected;
1047 } else
1048 status = connector_status_disconnected;
1017 1049
1018 return status; 1050 return status;
1019} 1051}
1020 1052
1021static int intel_hdmi_get_modes(struct drm_connector *connector) 1053static void
1054intel_hdmi_force(struct drm_connector *connector)
1022{ 1055{
1023 struct intel_encoder *intel_encoder = intel_attached_encoder(connector); 1056 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1024 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
1025 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1026 enum intel_display_power_domain power_domain;
1027 int ret;
1028 1057
1029 /* We should parse the EDID data and find out if it's an HDMI sink so 1058 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1030 * we can send audio to it. 1059 connector->base.id, connector->name);
1031 */
1032 1060
1033 power_domain = intel_display_port_power_domain(intel_encoder); 1061 intel_hdmi_unset_edid(connector);
1034 intel_display_power_get(dev_priv, power_domain);
1035 1062
1036 ret = intel_ddc_get_modes(connector, 1063 if (connector->status != connector_status_connected)
1037 intel_gmbus_get_adapter(dev_priv, 1064 return;
1038 intel_hdmi->ddc_bus));
1039 1065
1040 intel_display_power_put(dev_priv, power_domain); 1066 intel_hdmi_set_edid(connector);
1067 hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
1068}
1041 1069
1042 return ret; 1070static int intel_hdmi_get_modes(struct drm_connector *connector)
1071{
1072 struct edid *edid;
1073
1074 edid = to_intel_connector(connector)->detect_edid;
1075 if (edid == NULL)
1076 return 0;
1077
1078 return intel_connector_update_modes(connector, edid);
1043} 1079}
1044 1080
1045static bool 1081static bool
1046intel_hdmi_detect_audio(struct drm_connector *connector) 1082intel_hdmi_detect_audio(struct drm_connector *connector)
1047{ 1083{
1048 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
1049 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
1050 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1051 enum intel_display_power_domain power_domain;
1052 struct edid *edid;
1053 bool has_audio = false; 1084 bool has_audio = false;
1085 struct edid *edid;
1054 1086
1055 power_domain = intel_display_port_power_domain(intel_encoder); 1087 edid = to_intel_connector(connector)->detect_edid;
1056 intel_display_power_get(dev_priv, power_domain); 1088 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL)
1057 1089 has_audio = drm_detect_monitor_audio(edid);
1058 edid = drm_get_edid(connector,
1059 intel_gmbus_get_adapter(dev_priv,
1060 intel_hdmi->ddc_bus));
1061 if (edid) {
1062 if (edid->input & DRM_EDID_INPUT_DIGITAL)
1063 has_audio = drm_detect_monitor_audio(edid);
1064 kfree(edid);
1065 }
1066
1067 intel_display_power_put(dev_priv, power_domain);
1068 1090
1069 return has_audio; 1091 return has_audio;
1070} 1092}
@@ -1265,6 +1287,8 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1265 enum pipe pipe = intel_crtc->pipe; 1287 enum pipe pipe = intel_crtc->pipe;
1266 u32 val; 1288 u32 val;
1267 1289
1290 intel_hdmi_prepare(encoder);
1291
1268 mutex_lock(&dev_priv->dpio_lock); 1292 mutex_lock(&dev_priv->dpio_lock);
1269 1293
1270 /* program left/right clock distribution */ 1294 /* program left/right clock distribution */
@@ -1434,8 +1458,8 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1434 1458
1435 for (i = 0; i < 4; i++) { 1459 for (i = 0; i < 4; i++) {
1436 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); 1460 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1437 val &= ~DPIO_SWING_MARGIN_MASK; 1461 val &= ~DPIO_SWING_MARGIN000_MASK;
1438 val |= 102 << DPIO_SWING_MARGIN_SHIFT; 1462 val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
1439 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); 1463 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
1440 } 1464 }
1441 1465
@@ -1482,6 +1506,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1482 1506
1483static void intel_hdmi_destroy(struct drm_connector *connector) 1507static void intel_hdmi_destroy(struct drm_connector *connector)
1484{ 1508{
1509 kfree(to_intel_connector(connector)->detect_edid);
1485 drm_connector_cleanup(connector); 1510 drm_connector_cleanup(connector);
1486 kfree(connector); 1511 kfree(connector);
1487} 1512}
@@ -1489,6 +1514,7 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
1489static const struct drm_connector_funcs intel_hdmi_connector_funcs = { 1514static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
1490 .dpms = intel_connector_dpms, 1515 .dpms = intel_connector_dpms,
1491 .detect = intel_hdmi_detect, 1516 .detect = intel_hdmi_detect,
1517 .force = intel_hdmi_force,
1492 .fill_modes = drm_helper_probe_single_connector_modes, 1518 .fill_modes = drm_helper_probe_single_connector_modes,
1493 .set_property = intel_hdmi_set_property, 1519 .set_property = intel_hdmi_set_property,
1494 .destroy = intel_hdmi_destroy, 1520 .destroy = intel_hdmi_destroy,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
new file mode 100644
index 000000000000..bafd38b5703e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -0,0 +1,1766 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
31/**
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
33 *
34 * Motivation:
35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
38 *
39 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
42 *
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
47 *
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
55 *
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
58 *
59 * LRC implementation:
60 * Regarding the creation of contexts, we have:
61 *
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
65 *
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
69 *
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
72 *
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
77 * contexts:
78 *
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
83 * so on.
84 *
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
88 *
89 * Execlists implementation:
90 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92 * This method works as follows:
93 *
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
101 * context itself.
102 *
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
108 *
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
114 *
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
122 *
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
132 *
133 */
134
135#include <drm/drmP.h>
136#include <drm/i915_drm.h>
137#include "i915_drv.h"
138
139#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
140#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
141
142#define GEN8_LR_CONTEXT_ALIGN 4096
143
144#define RING_EXECLIST_QFULL (1 << 0x2)
145#define RING_EXECLIST1_VALID (1 << 0x3)
146#define RING_EXECLIST0_VALID (1 << 0x4)
147#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
148#define RING_EXECLIST1_ACTIVE (1 << 0x11)
149#define RING_EXECLIST0_ACTIVE (1 << 0x12)
150
151#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
152#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
153#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
154#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
155#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
156#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
157
158#define CTX_LRI_HEADER_0 0x01
159#define CTX_CONTEXT_CONTROL 0x02
160#define CTX_RING_HEAD 0x04
161#define CTX_RING_TAIL 0x06
162#define CTX_RING_BUFFER_START 0x08
163#define CTX_RING_BUFFER_CONTROL 0x0a
164#define CTX_BB_HEAD_U 0x0c
165#define CTX_BB_HEAD_L 0x0e
166#define CTX_BB_STATE 0x10
167#define CTX_SECOND_BB_HEAD_U 0x12
168#define CTX_SECOND_BB_HEAD_L 0x14
169#define CTX_SECOND_BB_STATE 0x16
170#define CTX_BB_PER_CTX_PTR 0x18
171#define CTX_RCS_INDIRECT_CTX 0x1a
172#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
173#define CTX_LRI_HEADER_1 0x21
174#define CTX_CTX_TIMESTAMP 0x22
175#define CTX_PDP3_UDW 0x24
176#define CTX_PDP3_LDW 0x26
177#define CTX_PDP2_UDW 0x28
178#define CTX_PDP2_LDW 0x2a
179#define CTX_PDP1_UDW 0x2c
180#define CTX_PDP1_LDW 0x2e
181#define CTX_PDP0_UDW 0x30
182#define CTX_PDP0_LDW 0x32
183#define CTX_LRI_HEADER_2 0x41
184#define CTX_R_PWR_CLK_STATE 0x42
185#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
186
187#define GEN8_CTX_VALID (1<<0)
188#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
189#define GEN8_CTX_FORCE_RESTORE (1<<2)
190#define GEN8_CTX_L3LLC_COHERENT (1<<5)
191#define GEN8_CTX_PRIVILEGE (1<<8)
192enum {
193 ADVANCED_CONTEXT = 0,
194 LEGACY_CONTEXT,
195 ADVANCED_AD_CONTEXT,
196 LEGACY_64B_CONTEXT
197};
198#define GEN8_CTX_MODE_SHIFT 3
199enum {
200 FAULT_AND_HANG = 0,
201 FAULT_AND_HALT, /* Debug only */
202 FAULT_AND_STREAM,
203 FAULT_AND_CONTINUE /* Unsupported */
204};
205#define GEN8_CTX_ID_SHIFT 32
206
207/**
208 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
209 * @dev: DRM device.
210 * @enable_execlists: value of i915.enable_execlists module parameter.
211 *
212 * Only certain platforms support Execlists (the prerequisites being
213 * support for Logical Ring Contexts and Aliasing PPGTT or better),
214 * and only when enabled via module parameter.
215 *
216 * Return: 1 if Execlists is supported and has to be enabled.
217 */
218int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
219{
220 WARN_ON(i915.enable_ppgtt == -1);
221
222 if (enable_execlists == 0)
223 return 0;
224
225 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
226 i915.use_mmio_flip >= 0)
227 return 1;
228
229 return 0;
230}
231
232/**
233 * intel_execlists_ctx_id() - get the Execlists Context ID
234 * @ctx_obj: Logical Ring Context backing object.
235 *
236 * Do not confuse with ctx->id! Unfortunately we have a name overload
237 * here: the old context ID we pass to userspace as a handler so that
238 * they can refer to a context, and the new context ID we pass to the
239 * ELSP so that the GPU can inform us of the context status via
240 * interrupts.
241 *
242 * Return: 20-bits globally unique context ID.
243 */
244u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
245{
246 u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj);
247
248 /* LRCA is required to be 4K aligned so the more significant 20 bits
249 * are globally unique */
250 return lrca >> 12;
251}
252
253static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
254{
255 uint64_t desc;
256 uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
257
258 WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
259
260 desc = GEN8_CTX_VALID;
261 desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
262 desc |= GEN8_CTX_L3LLC_COHERENT;
263 desc |= GEN8_CTX_PRIVILEGE;
264 desc |= lrca;
265 desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
266
267 /* TODO: WaDisableLiteRestore when we start using semaphore
268 * signalling between Command Streamers */
269 /* desc |= GEN8_CTX_FORCE_RESTORE; */
270
271 return desc;
272}
273
274static void execlists_elsp_write(struct intel_engine_cs *ring,
275 struct drm_i915_gem_object *ctx_obj0,
276 struct drm_i915_gem_object *ctx_obj1)
277{
278 struct drm_i915_private *dev_priv = ring->dev->dev_private;
279 uint64_t temp = 0;
280 uint32_t desc[4];
281 unsigned long flags;
282
283 /* XXX: You must always write both descriptors in the order below. */
284 if (ctx_obj1)
285 temp = execlists_ctx_descriptor(ctx_obj1);
286 else
287 temp = 0;
288 desc[1] = (u32)(temp >> 32);
289 desc[0] = (u32)temp;
290
291 temp = execlists_ctx_descriptor(ctx_obj0);
292 desc[3] = (u32)(temp >> 32);
293 desc[2] = (u32)temp;
294
295 /* Set Force Wakeup bit to prevent GT from entering C6 while ELSP writes
296 * are in progress.
297 *
298 * The other problem is that we can't just call gen6_gt_force_wake_get()
299 * because that function calls intel_runtime_pm_get(), which might sleep.
300 * Instead, we do the runtime_pm_get/put when creating/destroying requests.
301 */
302 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
303 if (IS_CHERRYVIEW(dev_priv->dev)) {
304 if (dev_priv->uncore.fw_rendercount++ == 0)
305 dev_priv->uncore.funcs.force_wake_get(dev_priv,
306 FORCEWAKE_RENDER);
307 if (dev_priv->uncore.fw_mediacount++ == 0)
308 dev_priv->uncore.funcs.force_wake_get(dev_priv,
309 FORCEWAKE_MEDIA);
310 } else {
311 if (dev_priv->uncore.forcewake_count++ == 0)
312 dev_priv->uncore.funcs.force_wake_get(dev_priv,
313 FORCEWAKE_ALL);
314 }
315 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
316
317 I915_WRITE(RING_ELSP(ring), desc[1]);
318 I915_WRITE(RING_ELSP(ring), desc[0]);
319 I915_WRITE(RING_ELSP(ring), desc[3]);
320 /* The context is automatically loaded after the following */
321 I915_WRITE(RING_ELSP(ring), desc[2]);
322
323 /* ELSP is a wo register, so use another nearby reg for posting instead */
324 POSTING_READ(RING_EXECLIST_STATUS(ring));
325
326 /* Release Force Wakeup (see the big comment above). */
327 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
328 if (IS_CHERRYVIEW(dev_priv->dev)) {
329 if (--dev_priv->uncore.fw_rendercount == 0)
330 dev_priv->uncore.funcs.force_wake_put(dev_priv,
331 FORCEWAKE_RENDER);
332 if (--dev_priv->uncore.fw_mediacount == 0)
333 dev_priv->uncore.funcs.force_wake_put(dev_priv,
334 FORCEWAKE_MEDIA);
335 } else {
336 if (--dev_priv->uncore.forcewake_count == 0)
337 dev_priv->uncore.funcs.force_wake_put(dev_priv,
338 FORCEWAKE_ALL);
339 }
340
341 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
342}
343
344static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail)
345{
346 struct page *page;
347 uint32_t *reg_state;
348
349 page = i915_gem_object_get_page(ctx_obj, 1);
350 reg_state = kmap_atomic(page);
351
352 reg_state[CTX_RING_TAIL+1] = tail;
353
354 kunmap_atomic(reg_state);
355
356 return 0;
357}
358
359static int execlists_submit_context(struct intel_engine_cs *ring,
360 struct intel_context *to0, u32 tail0,
361 struct intel_context *to1, u32 tail1)
362{
363 struct drm_i915_gem_object *ctx_obj0;
364 struct drm_i915_gem_object *ctx_obj1 = NULL;
365
366 ctx_obj0 = to0->engine[ring->id].state;
367 BUG_ON(!ctx_obj0);
368 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
369
370 execlists_ctx_write_tail(ctx_obj0, tail0);
371
372 if (to1) {
373 ctx_obj1 = to1->engine[ring->id].state;
374 BUG_ON(!ctx_obj1);
375 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
376
377 execlists_ctx_write_tail(ctx_obj1, tail1);
378 }
379
380 execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
381
382 return 0;
383}
384
385static void execlists_context_unqueue(struct intel_engine_cs *ring)
386{
387 struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
388 struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
389 struct drm_i915_private *dev_priv = ring->dev->dev_private;
390
391 assert_spin_locked(&ring->execlist_lock);
392
393 if (list_empty(&ring->execlist_queue))
394 return;
395
396 /* Try to read in pairs */
397 list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
398 execlist_link) {
399 if (!req0) {
400 req0 = cursor;
401 } else if (req0->ctx == cursor->ctx) {
402 /* Same ctx: ignore first request, as second request
403 * will update tail past first request's workload */
404 cursor->elsp_submitted = req0->elsp_submitted;
405 list_del(&req0->execlist_link);
406 queue_work(dev_priv->wq, &req0->work);
407 req0 = cursor;
408 } else {
409 req1 = cursor;
410 break;
411 }
412 }
413
414 WARN_ON(req1 && req1->elsp_submitted);
415
416 WARN_ON(execlists_submit_context(ring, req0->ctx, req0->tail,
417 req1 ? req1->ctx : NULL,
418 req1 ? req1->tail : 0));
419
420 req0->elsp_submitted++;
421 if (req1)
422 req1->elsp_submitted++;
423}
424
425static bool execlists_check_remove_request(struct intel_engine_cs *ring,
426 u32 request_id)
427{
428 struct drm_i915_private *dev_priv = ring->dev->dev_private;
429 struct intel_ctx_submit_request *head_req;
430
431 assert_spin_locked(&ring->execlist_lock);
432
433 head_req = list_first_entry_or_null(&ring->execlist_queue,
434 struct intel_ctx_submit_request,
435 execlist_link);
436
437 if (head_req != NULL) {
438 struct drm_i915_gem_object *ctx_obj =
439 head_req->ctx->engine[ring->id].state;
440 if (intel_execlists_ctx_id(ctx_obj) == request_id) {
441 WARN(head_req->elsp_submitted == 0,
442 "Never submitted head request\n");
443
444 if (--head_req->elsp_submitted <= 0) {
445 list_del(&head_req->execlist_link);
446 queue_work(dev_priv->wq, &head_req->work);
447 return true;
448 }
449 }
450 }
451
452 return false;
453}
454
455/**
456 * intel_execlists_handle_ctx_events() - handle Context Switch interrupts
457 * @ring: Engine Command Streamer to handle.
458 *
459 * Check the unread Context Status Buffers and manage the submission of new
460 * contexts to the ELSP accordingly.
461 */
462void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
463{
464 struct drm_i915_private *dev_priv = ring->dev->dev_private;
465 u32 status_pointer;
466 u8 read_pointer;
467 u8 write_pointer;
468 u32 status;
469 u32 status_id;
470 u32 submit_contexts = 0;
471
472 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
473
474 read_pointer = ring->next_context_status_buffer;
475 write_pointer = status_pointer & 0x07;
476 if (read_pointer > write_pointer)
477 write_pointer += 6;
478
479 spin_lock(&ring->execlist_lock);
480
481 while (read_pointer < write_pointer) {
482 read_pointer++;
483 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
484 (read_pointer % 6) * 8);
485 status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
486 (read_pointer % 6) * 8 + 4);
487
488 if (status & GEN8_CTX_STATUS_PREEMPTED) {
489 if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
490 if (execlists_check_remove_request(ring, status_id))
491 WARN(1, "Lite Restored request removed from queue\n");
492 } else
493 WARN(1, "Preemption without Lite Restore\n");
494 }
495
496 if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
497 (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
498 if (execlists_check_remove_request(ring, status_id))
499 submit_contexts++;
500 }
501 }
502
503 if (submit_contexts != 0)
504 execlists_context_unqueue(ring);
505
506 spin_unlock(&ring->execlist_lock);
507
508 WARN(submit_contexts > 2, "More than two context complete events?\n");
509 ring->next_context_status_buffer = write_pointer % 6;
510
511 I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
512 ((u32)ring->next_context_status_buffer & 0x07) << 8);
513}
514
515static void execlists_free_request_task(struct work_struct *work)
516{
517 struct intel_ctx_submit_request *req =
518 container_of(work, struct intel_ctx_submit_request, work);
519 struct drm_device *dev = req->ring->dev;
520 struct drm_i915_private *dev_priv = dev->dev_private;
521
522 intel_runtime_pm_put(dev_priv);
523
524 mutex_lock(&dev->struct_mutex);
525 i915_gem_context_unreference(req->ctx);
526 mutex_unlock(&dev->struct_mutex);
527
528 kfree(req);
529}
530
531static int execlists_context_queue(struct intel_engine_cs *ring,
532 struct intel_context *to,
533 u32 tail)
534{
535 struct intel_ctx_submit_request *req = NULL, *cursor;
536 struct drm_i915_private *dev_priv = ring->dev->dev_private;
537 unsigned long flags;
538 int num_elements = 0;
539
540 req = kzalloc(sizeof(*req), GFP_KERNEL);
541 if (req == NULL)
542 return -ENOMEM;
543 req->ctx = to;
544 i915_gem_context_reference(req->ctx);
545 req->ring = ring;
546 req->tail = tail;
547 INIT_WORK(&req->work, execlists_free_request_task);
548
549 intel_runtime_pm_get(dev_priv);
550
551 spin_lock_irqsave(&ring->execlist_lock, flags);
552
553 list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
554 if (++num_elements > 2)
555 break;
556
557 if (num_elements > 2) {
558 struct intel_ctx_submit_request *tail_req;
559
560 tail_req = list_last_entry(&ring->execlist_queue,
561 struct intel_ctx_submit_request,
562 execlist_link);
563
564 if (to == tail_req->ctx) {
565 WARN(tail_req->elsp_submitted != 0,
566 "More than 2 already-submitted reqs queued\n");
567 list_del(&tail_req->execlist_link);
568 queue_work(dev_priv->wq, &tail_req->work);
569 }
570 }
571
572 list_add_tail(&req->execlist_link, &ring->execlist_queue);
573 if (num_elements == 0)
574 execlists_context_unqueue(ring);
575
576 spin_unlock_irqrestore(&ring->execlist_lock, flags);
577
578 return 0;
579}
580
581static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
582{
583 struct intel_engine_cs *ring = ringbuf->ring;
584 uint32_t flush_domains;
585 int ret;
586
587 flush_domains = 0;
588 if (ring->gpu_caches_dirty)
589 flush_domains = I915_GEM_GPU_DOMAINS;
590
591 ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
592 if (ret)
593 return ret;
594
595 ring->gpu_caches_dirty = false;
596 return 0;
597}
598
599static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
600 struct list_head *vmas)
601{
602 struct intel_engine_cs *ring = ringbuf->ring;
603 struct i915_vma *vma;
604 uint32_t flush_domains = 0;
605 bool flush_chipset = false;
606 int ret;
607
608 list_for_each_entry(vma, vmas, exec_list) {
609 struct drm_i915_gem_object *obj = vma->obj;
610
611 ret = i915_gem_object_sync(obj, ring);
612 if (ret)
613 return ret;
614
615 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
616 flush_chipset |= i915_gem_clflush_object(obj, false);
617
618 flush_domains |= obj->base.write_domain;
619 }
620
621 if (flush_domains & I915_GEM_DOMAIN_GTT)
622 wmb();
623
624 /* Unconditionally invalidate gpu caches and ensure that we do flush
625 * any residual writes from the previous batch.
626 */
627 return logical_ring_invalidate_all_caches(ringbuf);
628}
629
630/**
631 * execlists_submission() - submit a batchbuffer for execution, Execlists style
632 * @dev: DRM device.
633 * @file: DRM file.
634 * @ring: Engine Command Streamer to submit to.
635 * @ctx: Context to employ for this submission.
636 * @args: execbuffer call arguments.
637 * @vmas: list of vmas.
638 * @batch_obj: the batchbuffer to submit.
639 * @exec_start: batchbuffer start virtual address pointer.
640 * @flags: translated execbuffer call flags.
641 *
642 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
643 * away the submission details of the execbuffer ioctl call.
644 *
645 * Return: non-zero if the submission fails.
646 */
647int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
648 struct intel_engine_cs *ring,
649 struct intel_context *ctx,
650 struct drm_i915_gem_execbuffer2 *args,
651 struct list_head *vmas,
652 struct drm_i915_gem_object *batch_obj,
653 u64 exec_start, u32 flags)
654{
655 struct drm_i915_private *dev_priv = dev->dev_private;
656 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
657 int instp_mode;
658 u32 instp_mask;
659 int ret;
660
661 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
662 instp_mask = I915_EXEC_CONSTANTS_MASK;
663 switch (instp_mode) {
664 case I915_EXEC_CONSTANTS_REL_GENERAL:
665 case I915_EXEC_CONSTANTS_ABSOLUTE:
666 case I915_EXEC_CONSTANTS_REL_SURFACE:
667 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
668 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
669 return -EINVAL;
670 }
671
672 if (instp_mode != dev_priv->relative_constants_mode) {
673 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
674 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
675 return -EINVAL;
676 }
677
678 /* The HW changed the meaning on this bit on gen6 */
679 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
680 }
681 break;
682 default:
683 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
684 return -EINVAL;
685 }
686
687 if (args->num_cliprects != 0) {
688 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
689 return -EINVAL;
690 } else {
691 if (args->DR4 == 0xffffffff) {
692 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
693 args->DR4 = 0;
694 }
695
696 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
697 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
698 return -EINVAL;
699 }
700 }
701
702 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
703 DRM_DEBUG("sol reset is gen7 only\n");
704 return -EINVAL;
705 }
706
707 ret = execlists_move_to_gpu(ringbuf, vmas);
708 if (ret)
709 return ret;
710
711 if (ring == &dev_priv->ring[RCS] &&
712 instp_mode != dev_priv->relative_constants_mode) {
713 ret = intel_logical_ring_begin(ringbuf, 4);
714 if (ret)
715 return ret;
716
717 intel_logical_ring_emit(ringbuf, MI_NOOP);
718 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
719 intel_logical_ring_emit(ringbuf, INSTPM);
720 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
721 intel_logical_ring_advance(ringbuf);
722
723 dev_priv->relative_constants_mode = instp_mode;
724 }
725
726 ret = ring->emit_bb_start(ringbuf, exec_start, flags);
727 if (ret)
728 return ret;
729
730 i915_gem_execbuffer_move_to_active(vmas, ring);
731 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
732
733 return 0;
734}
735
736void intel_logical_ring_stop(struct intel_engine_cs *ring)
737{
738 struct drm_i915_private *dev_priv = ring->dev->dev_private;
739 int ret;
740
741 if (!intel_ring_initialized(ring))
742 return;
743
744 ret = intel_ring_idle(ring);
745 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
746 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
747 ring->name, ret);
748
749 /* TODO: Is this correct with Execlists enabled? */
750 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
751 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
752 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
753 return;
754 }
755 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
756}
757
758int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
759{
760 struct intel_engine_cs *ring = ringbuf->ring;
761 int ret;
762
763 if (!ring->gpu_caches_dirty)
764 return 0;
765
766 ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
767 if (ret)
768 return ret;
769
770 ring->gpu_caches_dirty = false;
771 return 0;
772}
773
774/**
775 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
776 * @ringbuf: Logical Ringbuffer to advance.
777 *
778 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
779 * really happens during submission is that the context and current tail will be placed
780 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
781 * point, the tail *inside* the context is updated and the ELSP written to.
782 */
783void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
784{
785 struct intel_engine_cs *ring = ringbuf->ring;
786 struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
787
788 intel_logical_ring_advance(ringbuf);
789
790 if (intel_ring_stopped(ring))
791 return;
792
793 execlists_context_queue(ring, ctx, ringbuf->tail);
794}
795
796static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
797 struct intel_context *ctx)
798{
799 if (ring->outstanding_lazy_seqno)
800 return 0;
801
802 if (ring->preallocated_lazy_request == NULL) {
803 struct drm_i915_gem_request *request;
804
805 request = kmalloc(sizeof(*request), GFP_KERNEL);
806 if (request == NULL)
807 return -ENOMEM;
808
809 /* Hold a reference to the context this request belongs to
810 * (we will need it when the time comes to emit/retire the
811 * request).
812 */
813 request->ctx = ctx;
814 i915_gem_context_reference(request->ctx);
815
816 ring->preallocated_lazy_request = request;
817 }
818
819 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
820}
821
822static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
823 int bytes)
824{
825 struct intel_engine_cs *ring = ringbuf->ring;
826 struct drm_i915_gem_request *request;
827 u32 seqno = 0;
828 int ret;
829
830 if (ringbuf->last_retired_head != -1) {
831 ringbuf->head = ringbuf->last_retired_head;
832 ringbuf->last_retired_head = -1;
833
834 ringbuf->space = intel_ring_space(ringbuf);
835 if (ringbuf->space >= bytes)
836 return 0;
837 }
838
839 list_for_each_entry(request, &ring->request_list, list) {
840 if (__intel_ring_space(request->tail, ringbuf->tail,
841 ringbuf->size) >= bytes) {
842 seqno = request->seqno;
843 break;
844 }
845 }
846
847 if (seqno == 0)
848 return -ENOSPC;
849
850 ret = i915_wait_seqno(ring, seqno);
851 if (ret)
852 return ret;
853
854 i915_gem_retire_requests_ring(ring);
855 ringbuf->head = ringbuf->last_retired_head;
856 ringbuf->last_retired_head = -1;
857
858 ringbuf->space = intel_ring_space(ringbuf);
859 return 0;
860}
861
862static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
863 int bytes)
864{
865 struct intel_engine_cs *ring = ringbuf->ring;
866 struct drm_device *dev = ring->dev;
867 struct drm_i915_private *dev_priv = dev->dev_private;
868 unsigned long end;
869 int ret;
870
871 ret = logical_ring_wait_request(ringbuf, bytes);
872 if (ret != -ENOSPC)
873 return ret;
874
875 /* Force the context submission in case we have been skipping it */
876 intel_logical_ring_advance_and_submit(ringbuf);
877
878 /* With GEM the hangcheck timer should kick us out of the loop,
879 * leaving it early runs the risk of corrupting GEM state (due
880 * to running on almost untested codepaths). But on resume
881 * timers don't work yet, so prevent a complete hang in that
882 * case by choosing an insanely large timeout. */
883 end = jiffies + 60 * HZ;
884
885 do {
886 ringbuf->head = I915_READ_HEAD(ring);
887 ringbuf->space = intel_ring_space(ringbuf);
888 if (ringbuf->space >= bytes) {
889 ret = 0;
890 break;
891 }
892
893 msleep(1);
894
895 if (dev_priv->mm.interruptible && signal_pending(current)) {
896 ret = -ERESTARTSYS;
897 break;
898 }
899
900 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
901 dev_priv->mm.interruptible);
902 if (ret)
903 break;
904
905 if (time_after(jiffies, end)) {
906 ret = -EBUSY;
907 break;
908 }
909 } while (1);
910
911 return ret;
912}
913
914static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
915{
916 uint32_t __iomem *virt;
917 int rem = ringbuf->size - ringbuf->tail;
918
919 if (ringbuf->space < rem) {
920 int ret = logical_ring_wait_for_space(ringbuf, rem);
921
922 if (ret)
923 return ret;
924 }
925
926 virt = ringbuf->virtual_start + ringbuf->tail;
927 rem /= 4;
928 while (rem--)
929 iowrite32(MI_NOOP, virt++);
930
931 ringbuf->tail = 0;
932 ringbuf->space = intel_ring_space(ringbuf);
933
934 return 0;
935}
936
937static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
938{
939 int ret;
940
941 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
942 ret = logical_ring_wrap_buffer(ringbuf);
943 if (unlikely(ret))
944 return ret;
945 }
946
947 if (unlikely(ringbuf->space < bytes)) {
948 ret = logical_ring_wait_for_space(ringbuf, bytes);
949 if (unlikely(ret))
950 return ret;
951 }
952
953 return 0;
954}
955
956/**
957 * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
958 *
959 * @ringbuf: Logical ringbuffer.
960 * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
961 *
962 * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
963 * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
964 * and also preallocates a request (every workload submission is still mediated through
965 * requests, same as it did with legacy ringbuffer submission).
966 *
967 * Return: non-zero if the ringbuffer is not ready to be written to.
968 */
969int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
970{
971 struct intel_engine_cs *ring = ringbuf->ring;
972 struct drm_device *dev = ring->dev;
973 struct drm_i915_private *dev_priv = dev->dev_private;
974 int ret;
975
976 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
977 dev_priv->mm.interruptible);
978 if (ret)
979 return ret;
980
981 ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
982 if (ret)
983 return ret;
984
985 /* Preallocate the olr before touching the ring */
986 ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
987 if (ret)
988 return ret;
989
990 ringbuf->space -= num_dwords * sizeof(uint32_t);
991 return 0;
992}
993
994static int gen8_init_common_ring(struct intel_engine_cs *ring)
995{
996 struct drm_device *dev = ring->dev;
997 struct drm_i915_private *dev_priv = dev->dev_private;
998
999 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1000 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
1001
1002 I915_WRITE(RING_MODE_GEN7(ring),
1003 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1004 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1005 POSTING_READ(RING_MODE_GEN7(ring));
1006 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
1007
1008 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
1009
1010 return 0;
1011}
1012
1013static int gen8_init_render_ring(struct intel_engine_cs *ring)
1014{
1015 struct drm_device *dev = ring->dev;
1016 struct drm_i915_private *dev_priv = dev->dev_private;
1017 int ret;
1018
1019 ret = gen8_init_common_ring(ring);
1020 if (ret)
1021 return ret;
1022
1023 /* We need to disable the AsyncFlip performance optimisations in order
1024 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1025 * programmed to '1' on all products.
1026 *
1027 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1028 */
1029 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1030
1031 ret = intel_init_pipe_control(ring);
1032 if (ret)
1033 return ret;
1034
1035 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1036
1037 return ret;
1038}
1039
1040static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
1041 u64 offset, unsigned flags)
1042{
1043 bool ppgtt = !(flags & I915_DISPATCH_SECURE);
1044 int ret;
1045
1046 ret = intel_logical_ring_begin(ringbuf, 4);
1047 if (ret)
1048 return ret;
1049
1050 /* FIXME(BDW): Address space and security selectors. */
1051 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
1052 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
1053 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
1054 intel_logical_ring_emit(ringbuf, MI_NOOP);
1055 intel_logical_ring_advance(ringbuf);
1056
1057 return 0;
1058}
1059
1060static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
1061{
1062 struct drm_device *dev = ring->dev;
1063 struct drm_i915_private *dev_priv = dev->dev_private;
1064 unsigned long flags;
1065
1066 if (!dev->irq_enabled)
1067 return false;
1068
1069 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1070 if (ring->irq_refcount++ == 0) {
1071 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1072 POSTING_READ(RING_IMR(ring->mmio_base));
1073 }
1074 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1075
1076 return true;
1077}
1078
1079static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
1080{
1081 struct drm_device *dev = ring->dev;
1082 struct drm_i915_private *dev_priv = dev->dev_private;
1083 unsigned long flags;
1084
1085 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1086 if (--ring->irq_refcount == 0) {
1087 I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
1088 POSTING_READ(RING_IMR(ring->mmio_base));
1089 }
1090 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1091}
1092
1093static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
1094 u32 invalidate_domains,
1095 u32 unused)
1096{
1097 struct intel_engine_cs *ring = ringbuf->ring;
1098 struct drm_device *dev = ring->dev;
1099 struct drm_i915_private *dev_priv = dev->dev_private;
1100 uint32_t cmd;
1101 int ret;
1102
1103 ret = intel_logical_ring_begin(ringbuf, 4);
1104 if (ret)
1105 return ret;
1106
1107 cmd = MI_FLUSH_DW + 1;
1108
1109 if (ring == &dev_priv->ring[VCS]) {
1110 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
1111 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1112 MI_FLUSH_DW_STORE_INDEX |
1113 MI_FLUSH_DW_OP_STOREDW;
1114 } else {
1115 if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
1116 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
1117 MI_FLUSH_DW_OP_STOREDW;
1118 }
1119
1120 intel_logical_ring_emit(ringbuf, cmd);
1121 intel_logical_ring_emit(ringbuf,
1122 I915_GEM_HWS_SCRATCH_ADDR |
1123 MI_FLUSH_DW_USE_GTT);
1124 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
1125 intel_logical_ring_emit(ringbuf, 0); /* value */
1126 intel_logical_ring_advance(ringbuf);
1127
1128 return 0;
1129}
1130
1131static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
1132 u32 invalidate_domains,
1133 u32 flush_domains)
1134{
1135 struct intel_engine_cs *ring = ringbuf->ring;
1136 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
1137 u32 flags = 0;
1138 int ret;
1139
1140 flags |= PIPE_CONTROL_CS_STALL;
1141
1142 if (flush_domains) {
1143 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1144 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1145 }
1146
1147 if (invalidate_domains) {
1148 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1149 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1150 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1151 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1152 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1153 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1154 flags |= PIPE_CONTROL_QW_WRITE;
1155 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
1156 }
1157
1158 ret = intel_logical_ring_begin(ringbuf, 6);
1159 if (ret)
1160 return ret;
1161
1162 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1163 intel_logical_ring_emit(ringbuf, flags);
1164 intel_logical_ring_emit(ringbuf, scratch_addr);
1165 intel_logical_ring_emit(ringbuf, 0);
1166 intel_logical_ring_emit(ringbuf, 0);
1167 intel_logical_ring_emit(ringbuf, 0);
1168 intel_logical_ring_advance(ringbuf);
1169
1170 return 0;
1171}
1172
1173static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1174{
1175 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1176}
1177
1178static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1179{
1180 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
1181}
1182
1183static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
1184{
1185 struct intel_engine_cs *ring = ringbuf->ring;
1186 u32 cmd;
1187 int ret;
1188
1189 ret = intel_logical_ring_begin(ringbuf, 6);
1190 if (ret)
1191 return ret;
1192
1193 cmd = MI_STORE_DWORD_IMM_GEN8;
1194 cmd |= MI_GLOBAL_GTT;
1195
1196 intel_logical_ring_emit(ringbuf, cmd);
1197 intel_logical_ring_emit(ringbuf,
1198 (ring->status_page.gfx_addr +
1199 (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
1200 intel_logical_ring_emit(ringbuf, 0);
1201 intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
1202 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1203 intel_logical_ring_emit(ringbuf, MI_NOOP);
1204 intel_logical_ring_advance_and_submit(ringbuf);
1205
1206 return 0;
1207}
1208
1209/**
1210 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1211 *
1212 * @ring: Engine Command Streamer.
1213 *
1214 */
1215void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
1216{
1217 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1218
1219 if (!intel_ring_initialized(ring))
1220 return;
1221
1222 intel_logical_ring_stop(ring);
1223 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1224 ring->preallocated_lazy_request = NULL;
1225 ring->outstanding_lazy_seqno = 0;
1226
1227 if (ring->cleanup)
1228 ring->cleanup(ring);
1229
1230 i915_cmd_parser_fini_ring(ring);
1231
1232 if (ring->status_page.obj) {
1233 kunmap(sg_page(ring->status_page.obj->pages->sgl));
1234 ring->status_page.obj = NULL;
1235 }
1236}
1237
1238static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
1239{
1240 int ret;
1241
1242 /* Intentionally left blank. */
1243 ring->buffer = NULL;
1244
1245 ring->dev = dev;
1246 INIT_LIST_HEAD(&ring->active_list);
1247 INIT_LIST_HEAD(&ring->request_list);
1248 init_waitqueue_head(&ring->irq_queue);
1249
1250 INIT_LIST_HEAD(&ring->execlist_queue);
1251 spin_lock_init(&ring->execlist_lock);
1252 ring->next_context_status_buffer = 0;
1253
1254 ret = i915_cmd_parser_init_ring(ring);
1255 if (ret)
1256 return ret;
1257
1258 if (ring->init) {
1259 ret = ring->init(ring);
1260 if (ret)
1261 return ret;
1262 }
1263
1264 ret = intel_lr_context_deferred_create(ring->default_context, ring);
1265
1266 return ret;
1267}
1268
1269static int logical_render_ring_init(struct drm_device *dev)
1270{
1271 struct drm_i915_private *dev_priv = dev->dev_private;
1272 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
1273
1274 ring->name = "render ring";
1275 ring->id = RCS;
1276 ring->mmio_base = RENDER_RING_BASE;
1277 ring->irq_enable_mask =
1278 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
1279 ring->irq_keep_mask =
1280 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
1281 if (HAS_L3_DPF(dev))
1282 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1283
1284 ring->init = gen8_init_render_ring;
1285 ring->cleanup = intel_fini_pipe_control;
1286 ring->get_seqno = gen8_get_seqno;
1287 ring->set_seqno = gen8_set_seqno;
1288 ring->emit_request = gen8_emit_request;
1289 ring->emit_flush = gen8_emit_flush_render;
1290 ring->irq_get = gen8_logical_ring_get_irq;
1291 ring->irq_put = gen8_logical_ring_put_irq;
1292 ring->emit_bb_start = gen8_emit_bb_start;
1293
1294 return logical_ring_init(dev, ring);
1295}
1296
1297static int logical_bsd_ring_init(struct drm_device *dev)
1298{
1299 struct drm_i915_private *dev_priv = dev->dev_private;
1300 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
1301
1302 ring->name = "bsd ring";
1303 ring->id = VCS;
1304 ring->mmio_base = GEN6_BSD_RING_BASE;
1305 ring->irq_enable_mask =
1306 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
1307 ring->irq_keep_mask =
1308 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
1309
1310 ring->init = gen8_init_common_ring;
1311 ring->get_seqno = gen8_get_seqno;
1312 ring->set_seqno = gen8_set_seqno;
1313 ring->emit_request = gen8_emit_request;
1314 ring->emit_flush = gen8_emit_flush;
1315 ring->irq_get = gen8_logical_ring_get_irq;
1316 ring->irq_put = gen8_logical_ring_put_irq;
1317 ring->emit_bb_start = gen8_emit_bb_start;
1318
1319 return logical_ring_init(dev, ring);
1320}
1321
1322static int logical_bsd2_ring_init(struct drm_device *dev)
1323{
1324 struct drm_i915_private *dev_priv = dev->dev_private;
1325 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
1326
1327 ring->name = "bds2 ring";
1328 ring->id = VCS2;
1329 ring->mmio_base = GEN8_BSD2_RING_BASE;
1330 ring->irq_enable_mask =
1331 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
1332 ring->irq_keep_mask =
1333 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
1334
1335 ring->init = gen8_init_common_ring;
1336 ring->get_seqno = gen8_get_seqno;
1337 ring->set_seqno = gen8_set_seqno;
1338 ring->emit_request = gen8_emit_request;
1339 ring->emit_flush = gen8_emit_flush;
1340 ring->irq_get = gen8_logical_ring_get_irq;
1341 ring->irq_put = gen8_logical_ring_put_irq;
1342 ring->emit_bb_start = gen8_emit_bb_start;
1343
1344 return logical_ring_init(dev, ring);
1345}
1346
1347static int logical_blt_ring_init(struct drm_device *dev)
1348{
1349 struct drm_i915_private *dev_priv = dev->dev_private;
1350 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
1351
1352 ring->name = "blitter ring";
1353 ring->id = BCS;
1354 ring->mmio_base = BLT_RING_BASE;
1355 ring->irq_enable_mask =
1356 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1357 ring->irq_keep_mask =
1358 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1359
1360 ring->init = gen8_init_common_ring;
1361 ring->get_seqno = gen8_get_seqno;
1362 ring->set_seqno = gen8_set_seqno;
1363 ring->emit_request = gen8_emit_request;
1364 ring->emit_flush = gen8_emit_flush;
1365 ring->irq_get = gen8_logical_ring_get_irq;
1366 ring->irq_put = gen8_logical_ring_put_irq;
1367 ring->emit_bb_start = gen8_emit_bb_start;
1368
1369 return logical_ring_init(dev, ring);
1370}
1371
1372static int logical_vebox_ring_init(struct drm_device *dev)
1373{
1374 struct drm_i915_private *dev_priv = dev->dev_private;
1375 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
1376
1377 ring->name = "video enhancement ring";
1378 ring->id = VECS;
1379 ring->mmio_base = VEBOX_RING_BASE;
1380 ring->irq_enable_mask =
1381 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
1382 ring->irq_keep_mask =
1383 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
1384
1385 ring->init = gen8_init_common_ring;
1386 ring->get_seqno = gen8_get_seqno;
1387 ring->set_seqno = gen8_set_seqno;
1388 ring->emit_request = gen8_emit_request;
1389 ring->emit_flush = gen8_emit_flush;
1390 ring->irq_get = gen8_logical_ring_get_irq;
1391 ring->irq_put = gen8_logical_ring_put_irq;
1392 ring->emit_bb_start = gen8_emit_bb_start;
1393
1394 return logical_ring_init(dev, ring);
1395}
1396
1397/**
1398 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
1399 * @dev: DRM device.
1400 *
1401 * This function inits the engines for an Execlists submission style (the equivalent in the
1402 * legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for
1403 * those engines that are present in the hardware.
1404 *
1405 * Return: non-zero if the initialization failed.
1406 */
1407int intel_logical_rings_init(struct drm_device *dev)
1408{
1409 struct drm_i915_private *dev_priv = dev->dev_private;
1410 int ret;
1411
1412 ret = logical_render_ring_init(dev);
1413 if (ret)
1414 return ret;
1415
1416 if (HAS_BSD(dev)) {
1417 ret = logical_bsd_ring_init(dev);
1418 if (ret)
1419 goto cleanup_render_ring;
1420 }
1421
1422 if (HAS_BLT(dev)) {
1423 ret = logical_blt_ring_init(dev);
1424 if (ret)
1425 goto cleanup_bsd_ring;
1426 }
1427
1428 if (HAS_VEBOX(dev)) {
1429 ret = logical_vebox_ring_init(dev);
1430 if (ret)
1431 goto cleanup_blt_ring;
1432 }
1433
1434 if (HAS_BSD2(dev)) {
1435 ret = logical_bsd2_ring_init(dev);
1436 if (ret)
1437 goto cleanup_vebox_ring;
1438 }
1439
1440 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
1441 if (ret)
1442 goto cleanup_bsd2_ring;
1443
1444 return 0;
1445
1446cleanup_bsd2_ring:
1447 intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
1448cleanup_vebox_ring:
1449 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
1450cleanup_blt_ring:
1451 intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
1452cleanup_bsd_ring:
1453 intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
1454cleanup_render_ring:
1455 intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
1456
1457 return ret;
1458}
1459
1460int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
1461 struct intel_context *ctx)
1462{
1463 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
1464 struct render_state so;
1465 struct drm_i915_file_private *file_priv = ctx->file_priv;
1466 struct drm_file *file = file_priv ? file_priv->file : NULL;
1467 int ret;
1468
1469 ret = i915_gem_render_state_prepare(ring, &so);
1470 if (ret)
1471 return ret;
1472
1473 if (so.rodata == NULL)
1474 return 0;
1475
1476 ret = ring->emit_bb_start(ringbuf,
1477 so.ggtt_offset,
1478 I915_DISPATCH_SECURE);
1479 if (ret)
1480 goto out;
1481
1482 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
1483
1484 ret = __i915_add_request(ring, file, so.obj, NULL);
1485 /* intel_logical_ring_add_request moves object to inactive if it
1486 * fails */
1487out:
1488 i915_gem_render_state_fini(&so);
1489 return ret;
1490}
1491
1492static int
1493populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
1494 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
1495{
1496 struct drm_device *dev = ring->dev;
1497 struct drm_i915_private *dev_priv = dev->dev_private;
1498 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
1499 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
1500 struct page *page;
1501 uint32_t *reg_state;
1502 int ret;
1503
1504 if (!ppgtt)
1505 ppgtt = dev_priv->mm.aliasing_ppgtt;
1506
1507 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
1508 if (ret) {
1509 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
1510 return ret;
1511 }
1512
1513 ret = i915_gem_object_get_pages(ctx_obj);
1514 if (ret) {
1515 DRM_DEBUG_DRIVER("Could not get object pages\n");
1516 return ret;
1517 }
1518
1519 i915_gem_object_pin_pages(ctx_obj);
1520
1521 /* The second page of the context object contains some fields which must
1522 * be set up prior to the first execution. */
1523 page = i915_gem_object_get_page(ctx_obj, 1);
1524 reg_state = kmap_atomic(page);
1525
1526 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
1527 * commands followed by (reg, value) pairs. The values we are setting here are
1528 * only for the first context restore: on a subsequent save, the GPU will
1529 * recreate this batchbuffer with new values (including all the missing
1530 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
1531 if (ring->id == RCS)
1532 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
1533 else
1534 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
1535 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
1536 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
1537 reg_state[CTX_CONTEXT_CONTROL+1] =
1538 _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
1539 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
1540 reg_state[CTX_RING_HEAD+1] = 0;
1541 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
1542 reg_state[CTX_RING_TAIL+1] = 0;
1543 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
1544 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
1545 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
1546 reg_state[CTX_RING_BUFFER_CONTROL+1] =
1547 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
1548 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
1549 reg_state[CTX_BB_HEAD_U+1] = 0;
1550 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
1551 reg_state[CTX_BB_HEAD_L+1] = 0;
1552 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
1553 reg_state[CTX_BB_STATE+1] = (1<<5);
1554 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
1555 reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
1556 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
1557 reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
1558 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
1559 reg_state[CTX_SECOND_BB_STATE+1] = 0;
1560 if (ring->id == RCS) {
1561 /* TODO: according to BSpec, the register state context
1562 * for CHV does not have these. OTOH, these registers do
1563 * exist in CHV. I'm waiting for a clarification */
1564 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
1565 reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
1566 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
1567 reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
1568 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
1569 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
1570 }
1571 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
1572 reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
1573 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
1574 reg_state[CTX_CTX_TIMESTAMP+1] = 0;
1575 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
1576 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
1577 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
1578 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
1579 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
1580 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
1581 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
1582 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
1583 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
1584 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
1585 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
1586 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
1587 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
1588 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
1589 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
1590 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
1591 if (ring->id == RCS) {
1592 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
1593 reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
1594 reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
1595 }
1596
1597 kunmap_atomic(reg_state);
1598
1599 ctx_obj->dirty = 1;
1600 set_page_dirty(page);
1601 i915_gem_object_unpin_pages(ctx_obj);
1602
1603 return 0;
1604}
1605
1606/**
1607 * intel_lr_context_free() - free the LRC specific bits of a context
1608 * @ctx: the LR context to free.
1609 *
1610 * The real context freeing is done in i915_gem_context_free: this only
1611 * takes care of the bits that are LRC related: the per-engine backing
1612 * objects and the logical ringbuffer.
1613 */
1614void intel_lr_context_free(struct intel_context *ctx)
1615{
1616 int i;
1617
1618 for (i = 0; i < I915_NUM_RINGS; i++) {
1619 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
1620 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
1621
1622 if (ctx_obj) {
1623 intel_destroy_ringbuffer_obj(ringbuf);
1624 kfree(ringbuf);
1625 i915_gem_object_ggtt_unpin(ctx_obj);
1626 drm_gem_object_unreference(&ctx_obj->base);
1627 }
1628 }
1629}
1630
1631static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
1632{
1633 int ret = 0;
1634
1635 WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
1636
1637 switch (ring->id) {
1638 case RCS:
1639 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
1640 break;
1641 case VCS:
1642 case BCS:
1643 case VECS:
1644 case VCS2:
1645 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
1646 break;
1647 }
1648
1649 return ret;
1650}
1651
1652/**
1653 * intel_lr_context_deferred_create() - create the LRC specific bits of a context
1654 * @ctx: LR context to create.
1655 * @ring: engine to be used with the context.
1656 *
1657 * This function can be called more than once, with different engines, if we plan
1658 * to use the context with them. The context backing objects and the ringbuffers
1659 * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
1660 * the creation is a deferred call: it's better to make sure first that we need to use
1661 * a given ring with the context.
1662 *
1663 * Return: non-zero on eror.
1664 */
1665int intel_lr_context_deferred_create(struct intel_context *ctx,
1666 struct intel_engine_cs *ring)
1667{
1668 struct drm_device *dev = ring->dev;
1669 struct drm_i915_gem_object *ctx_obj;
1670 uint32_t context_size;
1671 struct intel_ringbuffer *ringbuf;
1672 int ret;
1673
1674 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
1675 if (ctx->engine[ring->id].state)
1676 return 0;
1677
1678 context_size = round_up(get_lr_context_size(ring), 4096);
1679
1680 ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
1681 if (IS_ERR(ctx_obj)) {
1682 ret = PTR_ERR(ctx_obj);
1683 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
1684 return ret;
1685 }
1686
1687 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
1688 if (ret) {
1689 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
1690 drm_gem_object_unreference(&ctx_obj->base);
1691 return ret;
1692 }
1693
1694 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1695 if (!ringbuf) {
1696 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
1697 ring->name);
1698 i915_gem_object_ggtt_unpin(ctx_obj);
1699 drm_gem_object_unreference(&ctx_obj->base);
1700 ret = -ENOMEM;
1701 return ret;
1702 }
1703
1704 ringbuf->ring = ring;
1705 ringbuf->FIXME_lrc_ctx = ctx;
1706
1707 ringbuf->size = 32 * PAGE_SIZE;
1708 ringbuf->effective_size = ringbuf->size;
1709 ringbuf->head = 0;
1710 ringbuf->tail = 0;
1711 ringbuf->space = ringbuf->size;
1712 ringbuf->last_retired_head = -1;
1713
1714 /* TODO: For now we put this in the mappable region so that we can reuse
1715 * the existing ringbuffer code which ioremaps it. When we start
1716 * creating many contexts, this will no longer work and we must switch
1717 * to a kmapish interface.
1718 */
1719 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1720 if (ret) {
1721 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
1722 ring->name, ret);
1723 goto error;
1724 }
1725
1726 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
1727 if (ret) {
1728 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
1729 intel_destroy_ringbuffer_obj(ringbuf);
1730 goto error;
1731 }
1732
1733 ctx->engine[ring->id].ringbuf = ringbuf;
1734 ctx->engine[ring->id].state = ctx_obj;
1735
1736 if (ctx == ring->default_context) {
1737 /* The status page is offset 0 from the default context object
1738 * in LRC mode. */
1739 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(ctx_obj);
1740 ring->status_page.page_addr =
1741 kmap(sg_page(ctx_obj->pages->sgl));
1742 if (ring->status_page.page_addr == NULL)
1743 return -ENOMEM;
1744 ring->status_page.obj = ctx_obj;
1745 }
1746
1747 if (ring->id == RCS && !ctx->rcs_initialized) {
1748 ret = intel_lr_context_render_state_init(ring, ctx);
1749 if (ret) {
1750 DRM_ERROR("Init render state failed: %d\n", ret);
1751 ctx->engine[ring->id].ringbuf = NULL;
1752 ctx->engine[ring->id].state = NULL;
1753 intel_destroy_ringbuffer_obj(ringbuf);
1754 goto error;
1755 }
1756 ctx->rcs_initialized = true;
1757 }
1758
1759 return 0;
1760
1761error:
1762 kfree(ringbuf);
1763 i915_gem_object_ggtt_unpin(ctx_obj);
1764 drm_gem_object_unreference(&ctx_obj->base);
1765 return ret;
1766}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
new file mode 100644
index 000000000000..33c3b4bf28c5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -0,0 +1,114 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef _INTEL_LRC_H_
25#define _INTEL_LRC_H_
26
27/* Execlists regs */
28#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
29#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
30#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
31#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
32#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
33
34/* Logical Rings */
35void intel_logical_ring_stop(struct intel_engine_cs *ring);
36void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
37int intel_logical_rings_init(struct drm_device *dev);
38
39int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
40void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
41/**
42 * intel_logical_ring_advance() - advance the ringbuffer tail
43 * @ringbuf: Ringbuffer to advance.
44 *
45 * The tail is only updated in our logical ringbuffer struct.
46 */
47static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
48{
49 ringbuf->tail &= ringbuf->size - 1;
50}
51/**
52 * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
53 * @ringbuf: Ringbuffer to write to.
54 * @data: DWORD to write.
55 */
56static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
57 u32 data)
58{
59 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
60 ringbuf->tail += 4;
61}
62int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
63
64/* Logical Ring Contexts */
65int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
66 struct intel_context *ctx);
67void intel_lr_context_free(struct intel_context *ctx);
68int intel_lr_context_deferred_create(struct intel_context *ctx,
69 struct intel_engine_cs *ring);
70
71/* Execlists */
72int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
73int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
74 struct intel_engine_cs *ring,
75 struct intel_context *ctx,
76 struct drm_i915_gem_execbuffer2 *args,
77 struct list_head *vmas,
78 struct drm_i915_gem_object *batch_obj,
79 u64 exec_start, u32 flags);
80u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
81
82/**
83 * struct intel_ctx_submit_request - queued context submission request
84 * @ctx: Context to submit to the ELSP.
85 * @ring: Engine to submit it to.
86 * @tail: how far in the context's ringbuffer this request goes to.
87 * @execlist_link: link in the submission queue.
88 * @work: workqueue for processing this request in a bottom half.
89 * @elsp_submitted: no. of times this request has been sent to the ELSP.
90 *
91 * The ELSP only accepts two elements at a time, so we queue context/tail
92 * pairs on a given queue (ring->execlist_queue) until the hardware is
93 * available. The queue serves a double purpose: we also use it to keep track
94 * of the up to 2 contexts currently in the hardware (usually one in execution
95 * and the other queued up by the GPU): We only remove elements from the head
96 * of the queue when the hardware informs us that an element has been
97 * completed.
98 *
99 * All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
100 */
101struct intel_ctx_submit_request {
102 struct intel_context *ctx;
103 struct intel_engine_cs *ring;
104 u32 tail;
105
106 struct list_head execlist_link;
107 struct work_struct work;
108
109 int elsp_submitted;
110};
111
112void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
113
114#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index fdf40267249c..a6bd1422e38f 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -823,8 +823,7 @@ bool intel_is_dual_link_lvds(struct drm_device *dev)
823 struct intel_encoder *encoder; 823 struct intel_encoder *encoder;
824 struct intel_lvds_encoder *lvds_encoder; 824 struct intel_lvds_encoder *lvds_encoder;
825 825
826 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 826 for_each_intel_encoder(dev, encoder) {
827 base.head) {
828 if (encoder->type == INTEL_OUTPUT_LVDS) { 827 if (encoder->type == INTEL_OUTPUT_LVDS) {
829 lvds_encoder = to_lvds_encoder(&encoder->base); 828 lvds_encoder = to_lvds_encoder(&encoder->base);
830 829
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 8e374449c6b5..18784470a760 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -751,6 +751,8 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
751 751
752 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 752 spin_lock_irqsave(&dev_priv->backlight_lock, flags);
753 753
754 if (panel->backlight.device)
755 panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
754 panel->backlight.enabled = false; 756 panel->backlight.enabled = false;
755 dev_priv->display.disable_backlight(connector); 757 dev_priv->display.disable_backlight(connector);
756 758
@@ -957,6 +959,8 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
957 959
958 dev_priv->display.enable_backlight(connector); 960 dev_priv->display.enable_backlight(connector);
959 panel->backlight.enabled = true; 961 panel->backlight.enabled = true;
962 if (panel->backlight.device)
963 panel->backlight.device->props.power = FB_BLANK_UNBLANK;
960 964
961 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 965 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
962} 966}
@@ -965,6 +969,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
965static int intel_backlight_device_update_status(struct backlight_device *bd) 969static int intel_backlight_device_update_status(struct backlight_device *bd)
966{ 970{
967 struct intel_connector *connector = bl_get_data(bd); 971 struct intel_connector *connector = bl_get_data(bd);
972 struct intel_panel *panel = &connector->panel;
968 struct drm_device *dev = connector->base.dev; 973 struct drm_device *dev = connector->base.dev;
969 974
970 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 975 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
@@ -972,6 +977,23 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
972 bd->props.brightness, bd->props.max_brightness); 977 bd->props.brightness, bd->props.max_brightness);
973 intel_panel_set_backlight(connector, bd->props.brightness, 978 intel_panel_set_backlight(connector, bd->props.brightness,
974 bd->props.max_brightness); 979 bd->props.max_brightness);
980
981 /*
982 * Allow flipping bl_power as a sub-state of enabled. Sadly the
983 * backlight class device does not make it easy to to differentiate
984 * between callbacks for brightness and bl_power, so our backlight_power
985 * callback needs to take this into account.
986 */
987 if (panel->backlight.enabled) {
988 if (panel->backlight_power) {
989 bool enable = bd->props.power == FB_BLANK_UNBLANK &&
990 bd->props.brightness != 0;
991 panel->backlight_power(connector, enable);
992 }
993 } else {
994 bd->props.power = FB_BLANK_POWERDOWN;
995 }
996
975 drm_modeset_unlock(&dev->mode_config.connection_mutex); 997 drm_modeset_unlock(&dev->mode_config.connection_mutex);
976 return 0; 998 return 0;
977} 999}
@@ -1023,6 +1045,11 @@ static int intel_backlight_device_register(struct intel_connector *connector)
1023 panel->backlight.level, 1045 panel->backlight.level,
1024 props.max_brightness); 1046 props.max_brightness);
1025 1047
1048 if (panel->backlight.enabled)
1049 props.power = FB_BLANK_UNBLANK;
1050 else
1051 props.power = FB_BLANK_POWERDOWN;
1052
1026 /* 1053 /*
1027 * Note: using the same name independent of the connector prevents 1054 * Note: using the same name independent of the connector prevents
1028 * registration of multiple backlight devices in the driver. 1055 * registration of multiple backlight devices in the driver.
@@ -1203,7 +1230,7 @@ static int vlv_setup_backlight(struct intel_connector *connector)
1203 enum pipe pipe; 1230 enum pipe pipe;
1204 u32 ctl, ctl2, val; 1231 u32 ctl, ctl2, val;
1205 1232
1206 for_each_pipe(pipe) { 1233 for_each_pipe(dev_priv, pipe) {
1207 u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe)); 1234 u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
1208 1235
1209 /* Skip if the modulation freq is already set */ 1236 /* Skip if the modulation freq is already set */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 40c12295c0bd..c27b6140bfd1 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -309,6 +309,9 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
309 309
310 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 310 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
311 311
312 if (dev_priv->fbc.false_color)
313 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
314
312 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 315 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
313 316
314 if (IS_IVYBRIDGE(dev)) { 317 if (IS_IVYBRIDGE(dev)) {
@@ -342,6 +345,16 @@ bool intel_fbc_enabled(struct drm_device *dev)
342 return dev_priv->display.fbc_enabled(dev); 345 return dev_priv->display.fbc_enabled(dev);
343} 346}
344 347
348void gen8_fbc_sw_flush(struct drm_device *dev, u32 value)
349{
350 struct drm_i915_private *dev_priv = dev->dev_private;
351
352 if (!IS_GEN8(dev))
353 return;
354
355 I915_WRITE(MSG_FBC_REND_STATE, value);
356}
357
345static void intel_fbc_work_fn(struct work_struct *__work) 358static void intel_fbc_work_fn(struct work_struct *__work)
346{ 359{
347 struct intel_fbc_work *work = 360 struct intel_fbc_work *work =
@@ -578,6 +591,12 @@ void intel_update_fbc(struct drm_device *dev)
578 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); 591 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
579 goto out_disable; 592 goto out_disable;
580 } 593 }
594 if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
595 to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
596 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
597 DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
598 goto out_disable;
599 }
581 600
582 /* If the kernel debugger is active, always disable compression */ 601 /* If the kernel debugger is active, always disable compression */
583 if (in_dbg_master()) 602 if (in_dbg_master())
@@ -853,7 +872,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
853 * A value of 5us seems to be a good balance; safe for very low end 872 * A value of 5us seems to be a good balance; safe for very low end
854 * platforms but not overly aggressive on lower latency configs. 873 * platforms but not overly aggressive on lower latency configs.
855 */ 874 */
856static const int latency_ns = 5000; 875static const int pessimal_latency_ns = 5000;
857 876
858static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 877static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
859{ 878{
@@ -982,13 +1001,20 @@ static const struct intel_watermark_params i915_wm_info = {
982 .guard_size = 2, 1001 .guard_size = 2,
983 .cacheline_size = I915_FIFO_LINE_SIZE, 1002 .cacheline_size = I915_FIFO_LINE_SIZE,
984}; 1003};
985static const struct intel_watermark_params i830_wm_info = { 1004static const struct intel_watermark_params i830_a_wm_info = {
986 .fifo_size = I855GM_FIFO_SIZE, 1005 .fifo_size = I855GM_FIFO_SIZE,
987 .max_wm = I915_MAX_WM, 1006 .max_wm = I915_MAX_WM,
988 .default_wm = 1, 1007 .default_wm = 1,
989 .guard_size = 2, 1008 .guard_size = 2,
990 .cacheline_size = I830_FIFO_LINE_SIZE, 1009 .cacheline_size = I830_FIFO_LINE_SIZE,
991}; 1010};
1011static const struct intel_watermark_params i830_bc_wm_info = {
1012 .fifo_size = I855GM_FIFO_SIZE,
1013 .max_wm = I915_MAX_WM/2,
1014 .default_wm = 1,
1015 .guard_size = 2,
1016 .cacheline_size = I830_FIFO_LINE_SIZE,
1017};
992static const struct intel_watermark_params i845_wm_info = { 1018static const struct intel_watermark_params i845_wm_info = {
993 .fifo_size = I830_FIFO_SIZE, 1019 .fifo_size = I830_FIFO_SIZE,
994 .max_wm = I915_MAX_WM, 1020 .max_wm = I915_MAX_WM,
@@ -1044,6 +1070,17 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1044 wm_size = wm->max_wm; 1070 wm_size = wm->max_wm;
1045 if (wm_size <= 0) 1071 if (wm_size <= 0)
1046 wm_size = wm->default_wm; 1072 wm_size = wm->default_wm;
1073
1074 /*
1075 * Bspec seems to indicate that the value shouldn't be lower than
1076 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
1077 * Lets go for 8 which is the burst size since certain platforms
1078 * already use a hardcoded 8 (which is what the spec says should be
1079 * done).
1080 */
1081 if (wm_size <= 8)
1082 wm_size = 8;
1083
1047 return wm_size; 1084 return wm_size;
1048} 1085}
1049 1086
@@ -1268,33 +1305,27 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1268 display, cursor); 1305 display, cursor);
1269} 1306}
1270 1307
1271static bool vlv_compute_drain_latency(struct drm_device *dev, 1308static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
1272 int plane, 1309 int pixel_size,
1273 int *plane_prec_mult, 1310 int *prec_mult,
1274 int *plane_dl, 1311 int *drain_latency)
1275 int *cursor_prec_mult,
1276 int *cursor_dl)
1277{ 1312{
1278 struct drm_crtc *crtc;
1279 int clock, pixel_size;
1280 int entries; 1313 int entries;
1314 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1281 1315
1282 crtc = intel_get_crtc_for_plane(dev, plane); 1316 if (WARN(clock == 0, "Pixel clock is zero!\n"))
1283 if (!intel_crtc_active(crtc))
1284 return false; 1317 return false;
1285 1318
1286 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; 1319 if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
1287 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */ 1320 return false;
1288 1321
1289 entries = (clock / 1000) * pixel_size; 1322 entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
1290 *plane_prec_mult = (entries > 128) ? 1323 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
1291 DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32; 1324 DRAIN_LATENCY_PRECISION_32;
1292 *plane_dl = (64 * (*plane_prec_mult) * 4) / entries; 1325 *drain_latency = (64 * (*prec_mult) * 4) / entries;
1293 1326
1294 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */ 1327 if (*drain_latency > DRAIN_LATENCY_MASK)
1295 *cursor_prec_mult = (entries > 128) ? 1328 *drain_latency = DRAIN_LATENCY_MASK;
1296 DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
1297 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / entries;
1298 1329
1299 return true; 1330 return true;
1300} 1331}
@@ -1307,39 +1338,48 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1307 * latency value. 1338 * latency value.
1308 */ 1339 */
1309 1340
1310static void vlv_update_drain_latency(struct drm_device *dev) 1341static void vlv_update_drain_latency(struct drm_crtc *crtc)
1311{ 1342{
1312 struct drm_i915_private *dev_priv = dev->dev_private; 1343 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1313 int planea_prec, planea_dl, planeb_prec, planeb_dl; 1344 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1314 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl; 1345 int pixel_size;
1315 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is 1346 int drain_latency;
1316 either 16 or 32 */ 1347 enum pipe pipe = intel_crtc->pipe;
1348 int plane_prec, prec_mult, plane_dl;
1349
1350 plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 |
1351 DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 |
1352 (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
1317 1353
1318 /* For plane A, Cursor A */ 1354 if (!intel_crtc_active(crtc)) {
1319 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl, 1355 I915_WRITE(VLV_DDL(pipe), plane_dl);
1320 &cursor_prec_mult, &cursora_dl)) { 1356 return;
1321 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? 1357 }
1322 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_64;
1323 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1324 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_64;
1325 1358
1326 I915_WRITE(VLV_DDL1, cursora_prec | 1359 /* Primary plane Drain Latency */
1327 (cursora_dl << DDL_CURSORA_SHIFT) | 1360 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1328 planea_prec | planea_dl); 1361 if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1362 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1363 DDL_PLANE_PRECISION_64 :
1364 DDL_PLANE_PRECISION_32;
1365 plane_dl |= plane_prec | drain_latency;
1329 } 1366 }
1330 1367
1331 /* For plane B, Cursor B */ 1368 /* Cursor Drain Latency
1332 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl, 1369 * BPP is always 4 for cursor
1333 &cursor_prec_mult, &cursorb_dl)) { 1370 */
1334 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? 1371 pixel_size = 4;
1335 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_64;
1336 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1337 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_64;
1338 1372
1339 I915_WRITE(VLV_DDL2, cursorb_prec | 1373 /* Program cursor DL only if it is enabled */
1340 (cursorb_dl << DDL_CURSORB_SHIFT) | 1374 if (intel_crtc->cursor_base &&
1341 planeb_prec | planeb_dl); 1375 vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1376 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1377 DDL_CURSOR_PRECISION_64 :
1378 DDL_CURSOR_PRECISION_32;
1379 plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
1342 } 1380 }
1381
1382 I915_WRITE(VLV_DDL(pipe), plane_dl);
1343} 1383}
1344 1384
1345#define single_plane_enabled(mask) is_power_of_2(mask) 1385#define single_plane_enabled(mask) is_power_of_2(mask)
@@ -1355,20 +1395,92 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
1355 unsigned int enabled = 0; 1395 unsigned int enabled = 0;
1356 bool cxsr_enabled; 1396 bool cxsr_enabled;
1357 1397
1358 vlv_update_drain_latency(dev); 1398 vlv_update_drain_latency(crtc);
1399
1400 if (g4x_compute_wm0(dev, PIPE_A,
1401 &valleyview_wm_info, pessimal_latency_ns,
1402 &valleyview_cursor_wm_info, pessimal_latency_ns,
1403 &planea_wm, &cursora_wm))
1404 enabled |= 1 << PIPE_A;
1405
1406 if (g4x_compute_wm0(dev, PIPE_B,
1407 &valleyview_wm_info, pessimal_latency_ns,
1408 &valleyview_cursor_wm_info, pessimal_latency_ns,
1409 &planeb_wm, &cursorb_wm))
1410 enabled |= 1 << PIPE_B;
1411
1412 if (single_plane_enabled(enabled) &&
1413 g4x_compute_srwm(dev, ffs(enabled) - 1,
1414 sr_latency_ns,
1415 &valleyview_wm_info,
1416 &valleyview_cursor_wm_info,
1417 &plane_sr, &ignore_cursor_sr) &&
1418 g4x_compute_srwm(dev, ffs(enabled) - 1,
1419 2*sr_latency_ns,
1420 &valleyview_wm_info,
1421 &valleyview_cursor_wm_info,
1422 &ignore_plane_sr, &cursor_sr)) {
1423 cxsr_enabled = true;
1424 } else {
1425 cxsr_enabled = false;
1426 intel_set_memory_cxsr(dev_priv, false);
1427 plane_sr = cursor_sr = 0;
1428 }
1429
1430 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1431 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1432 planea_wm, cursora_wm,
1433 planeb_wm, cursorb_wm,
1434 plane_sr, cursor_sr);
1435
1436 I915_WRITE(DSPFW1,
1437 (plane_sr << DSPFW_SR_SHIFT) |
1438 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1439 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1440 (planea_wm << DSPFW_PLANEA_SHIFT));
1441 I915_WRITE(DSPFW2,
1442 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1443 (cursora_wm << DSPFW_CURSORA_SHIFT));
1444 I915_WRITE(DSPFW3,
1445 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1446 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1447
1448 if (cxsr_enabled)
1449 intel_set_memory_cxsr(dev_priv, true);
1450}
1451
1452static void cherryview_update_wm(struct drm_crtc *crtc)
1453{
1454 struct drm_device *dev = crtc->dev;
1455 static const int sr_latency_ns = 12000;
1456 struct drm_i915_private *dev_priv = dev->dev_private;
1457 int planea_wm, planeb_wm, planec_wm;
1458 int cursora_wm, cursorb_wm, cursorc_wm;
1459 int plane_sr, cursor_sr;
1460 int ignore_plane_sr, ignore_cursor_sr;
1461 unsigned int enabled = 0;
1462 bool cxsr_enabled;
1463
1464 vlv_update_drain_latency(crtc);
1359 1465
1360 if (g4x_compute_wm0(dev, PIPE_A, 1466 if (g4x_compute_wm0(dev, PIPE_A,
1361 &valleyview_wm_info, latency_ns, 1467 &valleyview_wm_info, pessimal_latency_ns,
1362 &valleyview_cursor_wm_info, latency_ns, 1468 &valleyview_cursor_wm_info, pessimal_latency_ns,
1363 &planea_wm, &cursora_wm)) 1469 &planea_wm, &cursora_wm))
1364 enabled |= 1 << PIPE_A; 1470 enabled |= 1 << PIPE_A;
1365 1471
1366 if (g4x_compute_wm0(dev, PIPE_B, 1472 if (g4x_compute_wm0(dev, PIPE_B,
1367 &valleyview_wm_info, latency_ns, 1473 &valleyview_wm_info, pessimal_latency_ns,
1368 &valleyview_cursor_wm_info, latency_ns, 1474 &valleyview_cursor_wm_info, pessimal_latency_ns,
1369 &planeb_wm, &cursorb_wm)) 1475 &planeb_wm, &cursorb_wm))
1370 enabled |= 1 << PIPE_B; 1476 enabled |= 1 << PIPE_B;
1371 1477
1478 if (g4x_compute_wm0(dev, PIPE_C,
1479 &valleyview_wm_info, pessimal_latency_ns,
1480 &valleyview_cursor_wm_info, pessimal_latency_ns,
1481 &planec_wm, &cursorc_wm))
1482 enabled |= 1 << PIPE_C;
1483
1372 if (single_plane_enabled(enabled) && 1484 if (single_plane_enabled(enabled) &&
1373 g4x_compute_srwm(dev, ffs(enabled) - 1, 1485 g4x_compute_srwm(dev, ffs(enabled) - 1,
1374 sr_latency_ns, 1486 sr_latency_ns,
@@ -1387,27 +1499,66 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
1387 plane_sr = cursor_sr = 0; 1499 plane_sr = cursor_sr = 0;
1388 } 1500 }
1389 1501
1390 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1502 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1503 "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
1504 "SR: plane=%d, cursor=%d\n",
1391 planea_wm, cursora_wm, 1505 planea_wm, cursora_wm,
1392 planeb_wm, cursorb_wm, 1506 planeb_wm, cursorb_wm,
1507 planec_wm, cursorc_wm,
1393 plane_sr, cursor_sr); 1508 plane_sr, cursor_sr);
1394 1509
1395 I915_WRITE(DSPFW1, 1510 I915_WRITE(DSPFW1,
1396 (plane_sr << DSPFW_SR_SHIFT) | 1511 (plane_sr << DSPFW_SR_SHIFT) |
1397 (cursorb_wm << DSPFW_CURSORB_SHIFT) | 1512 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1398 (planeb_wm << DSPFW_PLANEB_SHIFT) | 1513 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1399 planea_wm); 1514 (planea_wm << DSPFW_PLANEA_SHIFT));
1400 I915_WRITE(DSPFW2, 1515 I915_WRITE(DSPFW2,
1401 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | 1516 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1402 (cursora_wm << DSPFW_CURSORA_SHIFT)); 1517 (cursora_wm << DSPFW_CURSORA_SHIFT));
1403 I915_WRITE(DSPFW3, 1518 I915_WRITE(DSPFW3,
1404 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | 1519 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1405 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1520 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1521 I915_WRITE(DSPFW9_CHV,
1522 (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
1523 DSPFW_CURSORC_MASK)) |
1524 (planec_wm << DSPFW_PLANEC_SHIFT) |
1525 (cursorc_wm << DSPFW_CURSORC_SHIFT));
1406 1526
1407 if (cxsr_enabled) 1527 if (cxsr_enabled)
1408 intel_set_memory_cxsr(dev_priv, true); 1528 intel_set_memory_cxsr(dev_priv, true);
1409} 1529}
1410 1530
1531static void valleyview_update_sprite_wm(struct drm_plane *plane,
1532 struct drm_crtc *crtc,
1533 uint32_t sprite_width,
1534 uint32_t sprite_height,
1535 int pixel_size,
1536 bool enabled, bool scaled)
1537{
1538 struct drm_device *dev = crtc->dev;
1539 struct drm_i915_private *dev_priv = dev->dev_private;
1540 int pipe = to_intel_plane(plane)->pipe;
1541 int sprite = to_intel_plane(plane)->plane;
1542 int drain_latency;
1543 int plane_prec;
1544 int sprite_dl;
1545 int prec_mult;
1546
1547 sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) |
1548 (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
1549
1550 if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
1551 &drain_latency)) {
1552 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1553 DDL_SPRITE_PRECISION_64(sprite) :
1554 DDL_SPRITE_PRECISION_32(sprite);
1555 sprite_dl |= plane_prec |
1556 (drain_latency << DDL_SPRITE_SHIFT(sprite));
1557 }
1558
1559 I915_WRITE(VLV_DDL(pipe), sprite_dl);
1560}
1561
1411static void g4x_update_wm(struct drm_crtc *crtc) 1562static void g4x_update_wm(struct drm_crtc *crtc)
1412{ 1563{
1413 struct drm_device *dev = crtc->dev; 1564 struct drm_device *dev = crtc->dev;
@@ -1419,14 +1570,14 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1419 bool cxsr_enabled; 1570 bool cxsr_enabled;
1420 1571
1421 if (g4x_compute_wm0(dev, PIPE_A, 1572 if (g4x_compute_wm0(dev, PIPE_A,
1422 &g4x_wm_info, latency_ns, 1573 &g4x_wm_info, pessimal_latency_ns,
1423 &g4x_cursor_wm_info, latency_ns, 1574 &g4x_cursor_wm_info, pessimal_latency_ns,
1424 &planea_wm, &cursora_wm)) 1575 &planea_wm, &cursora_wm))
1425 enabled |= 1 << PIPE_A; 1576 enabled |= 1 << PIPE_A;
1426 1577
1427 if (g4x_compute_wm0(dev, PIPE_B, 1578 if (g4x_compute_wm0(dev, PIPE_B,
1428 &g4x_wm_info, latency_ns, 1579 &g4x_wm_info, pessimal_latency_ns,
1429 &g4x_cursor_wm_info, latency_ns, 1580 &g4x_cursor_wm_info, pessimal_latency_ns,
1430 &planeb_wm, &cursorb_wm)) 1581 &planeb_wm, &cursorb_wm))
1431 enabled |= 1 << PIPE_B; 1582 enabled |= 1 << PIPE_B;
1432 1583
@@ -1443,7 +1594,8 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1443 plane_sr = cursor_sr = 0; 1594 plane_sr = cursor_sr = 0;
1444 } 1595 }
1445 1596
1446 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1597 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1598 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1447 planea_wm, cursora_wm, 1599 planea_wm, cursora_wm,
1448 planeb_wm, cursorb_wm, 1600 planeb_wm, cursorb_wm,
1449 plane_sr, cursor_sr); 1601 plane_sr, cursor_sr);
@@ -1452,7 +1604,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1452 (plane_sr << DSPFW_SR_SHIFT) | 1604 (plane_sr << DSPFW_SR_SHIFT) |
1453 (cursorb_wm << DSPFW_CURSORB_SHIFT) | 1605 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1454 (planeb_wm << DSPFW_PLANEB_SHIFT) | 1606 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1455 planea_wm); 1607 (planea_wm << DSPFW_PLANEA_SHIFT));
1456 I915_WRITE(DSPFW2, 1608 I915_WRITE(DSPFW2,
1457 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | 1609 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1458 (cursora_wm << DSPFW_CURSORA_SHIFT)); 1610 (cursora_wm << DSPFW_CURSORA_SHIFT));
@@ -1526,8 +1678,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1526 1678
1527 /* 965 has limitations... */ 1679 /* 965 has limitations... */
1528 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | 1680 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1529 (8 << 16) | (8 << 8) | (8 << 0)); 1681 (8 << DSPFW_CURSORB_SHIFT) |
1530 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 1682 (8 << DSPFW_PLANEB_SHIFT) |
1683 (8 << DSPFW_PLANEA_SHIFT));
1684 I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
1685 (8 << DSPFW_PLANEC_SHIFT_OLD));
1531 /* update cursor SR watermark */ 1686 /* update cursor SR watermark */
1532 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1687 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1533 1688
@@ -1552,7 +1707,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1552 else if (!IS_GEN2(dev)) 1707 else if (!IS_GEN2(dev))
1553 wm_info = &i915_wm_info; 1708 wm_info = &i915_wm_info;
1554 else 1709 else
1555 wm_info = &i830_wm_info; 1710 wm_info = &i830_a_wm_info;
1556 1711
1557 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1712 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1558 crtc = intel_get_crtc_for_plane(dev, 0); 1713 crtc = intel_get_crtc_for_plane(dev, 0);
@@ -1565,10 +1720,16 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1565 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1720 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1566 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1721 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1567 wm_info, fifo_size, cpp, 1722 wm_info, fifo_size, cpp,
1568 latency_ns); 1723 pessimal_latency_ns);
1569 enabled = crtc; 1724 enabled = crtc;
1570 } else 1725 } else {
1571 planea_wm = fifo_size - wm_info->guard_size; 1726 planea_wm = fifo_size - wm_info->guard_size;
1727 if (planea_wm > (long)wm_info->max_wm)
1728 planea_wm = wm_info->max_wm;
1729 }
1730
1731 if (IS_GEN2(dev))
1732 wm_info = &i830_bc_wm_info;
1572 1733
1573 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1734 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1574 crtc = intel_get_crtc_for_plane(dev, 1); 1735 crtc = intel_get_crtc_for_plane(dev, 1);
@@ -1581,13 +1742,16 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1581 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1742 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1582 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1743 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1583 wm_info, fifo_size, cpp, 1744 wm_info, fifo_size, cpp,
1584 latency_ns); 1745 pessimal_latency_ns);
1585 if (enabled == NULL) 1746 if (enabled == NULL)
1586 enabled = crtc; 1747 enabled = crtc;
1587 else 1748 else
1588 enabled = NULL; 1749 enabled = NULL;
1589 } else 1750 } else {
1590 planeb_wm = fifo_size - wm_info->guard_size; 1751 planeb_wm = fifo_size - wm_info->guard_size;
1752 if (planeb_wm > (long)wm_info->max_wm)
1753 planeb_wm = wm_info->max_wm;
1754 }
1591 1755
1592 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 1756 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1593 1757
@@ -1674,7 +1838,7 @@ static void i845_update_wm(struct drm_crtc *unused_crtc)
1674 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1838 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1675 &i845_wm_info, 1839 &i845_wm_info,
1676 dev_priv->display.get_fifo_size(dev, 0), 1840 dev_priv->display.get_fifo_size(dev, 0),
1677 4, latency_ns); 1841 4, pessimal_latency_ns);
1678 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1842 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1679 fwater_lo |= (3<<8) | planea_wm; 1843 fwater_lo |= (3<<8) | planea_wm;
1680 1844
@@ -2527,7 +2691,7 @@ static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2527#define WM_DIRTY_FBC (1 << 24) 2691#define WM_DIRTY_FBC (1 << 24)
2528#define WM_DIRTY_DDB (1 << 25) 2692#define WM_DIRTY_DDB (1 << 25)
2529 2693
2530static unsigned int ilk_compute_wm_dirty(struct drm_device *dev, 2694static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2531 const struct ilk_wm_values *old, 2695 const struct ilk_wm_values *old,
2532 const struct ilk_wm_values *new) 2696 const struct ilk_wm_values *new)
2533{ 2697{
@@ -2535,7 +2699,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2535 enum pipe pipe; 2699 enum pipe pipe;
2536 int wm_lp; 2700 int wm_lp;
2537 2701
2538 for_each_pipe(pipe) { 2702 for_each_pipe(dev_priv, pipe) {
2539 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) { 2703 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2540 dirty |= WM_DIRTY_LINETIME(pipe); 2704 dirty |= WM_DIRTY_LINETIME(pipe);
2541 /* Must disable LP1+ watermarks too */ 2705 /* Must disable LP1+ watermarks too */
@@ -2621,7 +2785,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2621 unsigned int dirty; 2785 unsigned int dirty;
2622 uint32_t val; 2786 uint32_t val;
2623 2787
2624 dirty = ilk_compute_wm_dirty(dev, previous, results); 2788 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2625 if (!dirty) 2789 if (!dirty)
2626 return; 2790 return;
2627 2791
@@ -3327,13 +3491,18 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3327 WARN_ON(val > dev_priv->rps.max_freq_softlimit); 3491 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3328 WARN_ON(val < dev_priv->rps.min_freq_softlimit); 3492 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3329 3493
3330 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", 3494 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
3331 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 3495 "Odd GPU freq value\n"))
3332 dev_priv->rps.cur_freq, 3496 val &= ~1;
3333 vlv_gpu_freq(dev_priv, val), val); 3497
3498 if (val != dev_priv->rps.cur_freq) {
3499 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3500 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3501 dev_priv->rps.cur_freq,
3502 vlv_gpu_freq(dev_priv, val), val);
3334 3503
3335 if (val != dev_priv->rps.cur_freq)
3336 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 3504 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3505 }
3337 3506
3338 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 3507 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3339 3508
@@ -3406,8 +3575,14 @@ static void valleyview_disable_rps(struct drm_device *dev)
3406{ 3575{
3407 struct drm_i915_private *dev_priv = dev->dev_private; 3576 struct drm_i915_private *dev_priv = dev->dev_private;
3408 3577
3578 /* we're doing forcewake before Disabling RC6,
3579 * This what the BIOS expects when going into suspend */
3580 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3581
3409 I915_WRITE(GEN6_RC_CONTROL, 0); 3582 I915_WRITE(GEN6_RC_CONTROL, 0);
3410 3583
3584 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3585
3411 gen6_disable_rps_interrupts(dev); 3586 gen6_disable_rps_interrupts(dev);
3412} 3587}
3413 3588
@@ -3598,7 +3773,6 @@ static void gen6_enable_rps(struct drm_device *dev)
3598 struct drm_i915_private *dev_priv = dev->dev_private; 3773 struct drm_i915_private *dev_priv = dev->dev_private;
3599 struct intel_engine_cs *ring; 3774 struct intel_engine_cs *ring;
3600 u32 rp_state_cap; 3775 u32 rp_state_cap;
3601 u32 gt_perf_status;
3602 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; 3776 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
3603 u32 gtfifodbg; 3777 u32 gtfifodbg;
3604 int rc6_mode; 3778 int rc6_mode;
@@ -3623,7 +3797,6 @@ static void gen6_enable_rps(struct drm_device *dev)
3623 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3797 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3624 3798
3625 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3799 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3626 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3627 3800
3628 parse_rp_state_cap(dev_priv, rp_state_cap); 3801 parse_rp_state_cap(dev_priv, rp_state_cap);
3629 3802
@@ -3965,11 +4138,27 @@ static void valleyview_cleanup_pctx(struct drm_device *dev)
3965static void valleyview_init_gt_powersave(struct drm_device *dev) 4138static void valleyview_init_gt_powersave(struct drm_device *dev)
3966{ 4139{
3967 struct drm_i915_private *dev_priv = dev->dev_private; 4140 struct drm_i915_private *dev_priv = dev->dev_private;
4141 u32 val;
3968 4142
3969 valleyview_setup_pctx(dev); 4143 valleyview_setup_pctx(dev);
3970 4144
3971 mutex_lock(&dev_priv->rps.hw_lock); 4145 mutex_lock(&dev_priv->rps.hw_lock);
3972 4146
4147 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4148 switch ((val >> 6) & 3) {
4149 case 0:
4150 case 1:
4151 dev_priv->mem_freq = 800;
4152 break;
4153 case 2:
4154 dev_priv->mem_freq = 1066;
4155 break;
4156 case 3:
4157 dev_priv->mem_freq = 1333;
4158 break;
4159 }
4160 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
4161
3973 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); 4162 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
3974 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 4163 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
3975 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 4164 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
@@ -4004,11 +4193,38 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
4004static void cherryview_init_gt_powersave(struct drm_device *dev) 4193static void cherryview_init_gt_powersave(struct drm_device *dev)
4005{ 4194{
4006 struct drm_i915_private *dev_priv = dev->dev_private; 4195 struct drm_i915_private *dev_priv = dev->dev_private;
4196 u32 val;
4007 4197
4008 cherryview_setup_pctx(dev); 4198 cherryview_setup_pctx(dev);
4009 4199
4010 mutex_lock(&dev_priv->rps.hw_lock); 4200 mutex_lock(&dev_priv->rps.hw_lock);
4011 4201
4202 val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
4203 switch ((val >> 2) & 0x7) {
4204 case 0:
4205 case 1:
4206 dev_priv->rps.cz_freq = 200;
4207 dev_priv->mem_freq = 1600;
4208 break;
4209 case 2:
4210 dev_priv->rps.cz_freq = 267;
4211 dev_priv->mem_freq = 1600;
4212 break;
4213 case 3:
4214 dev_priv->rps.cz_freq = 333;
4215 dev_priv->mem_freq = 2000;
4216 break;
4217 case 4:
4218 dev_priv->rps.cz_freq = 320;
4219 dev_priv->mem_freq = 1600;
4220 break;
4221 case 5:
4222 dev_priv->rps.cz_freq = 400;
4223 dev_priv->mem_freq = 1600;
4224 break;
4225 }
4226 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
4227
4012 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); 4228 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
4013 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 4229 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4014 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 4230 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
@@ -4030,6 +4246,12 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
4030 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), 4246 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4031 dev_priv->rps.min_freq); 4247 dev_priv->rps.min_freq);
4032 4248
4249 WARN_ONCE((dev_priv->rps.max_freq |
4250 dev_priv->rps.efficient_freq |
4251 dev_priv->rps.rp1_freq |
4252 dev_priv->rps.min_freq) & 1,
4253 "Odd GPU freq values\n");
4254
4033 /* Preserve min/max settings in case of re-init */ 4255 /* Preserve min/max settings in case of re-init */
4034 if (dev_priv->rps.max_freq_softlimit == 0) 4256 if (dev_priv->rps.max_freq_softlimit == 0)
4035 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 4257 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -5088,7 +5310,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
5088 struct drm_i915_private *dev_priv = dev->dev_private; 5310 struct drm_i915_private *dev_priv = dev->dev_private;
5089 int pipe; 5311 int pipe;
5090 5312
5091 for_each_pipe(pipe) { 5313 for_each_pipe(dev_priv, pipe) {
5092 I915_WRITE(DSPCNTR(pipe), 5314 I915_WRITE(DSPCNTR(pipe),
5093 I915_READ(DSPCNTR(pipe)) | 5315 I915_READ(DSPCNTR(pipe)) |
5094 DISPPLANE_TRICKLE_FEED_DISABLE); 5316 DISPPLANE_TRICKLE_FEED_DISABLE);
@@ -5203,7 +5425,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
5203 /* The below fixes the weird display corruption, a few pixels shifted 5425 /* The below fixes the weird display corruption, a few pixels shifted
5204 * downward, on (only) LVDS of some HP laptops with IVY. 5426 * downward, on (only) LVDS of some HP laptops with IVY.
5205 */ 5427 */
5206 for_each_pipe(pipe) { 5428 for_each_pipe(dev_priv, pipe) {
5207 val = I915_READ(TRANS_CHICKEN2(pipe)); 5429 val = I915_READ(TRANS_CHICKEN2(pipe));
5208 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 5430 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
5209 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 5431 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
@@ -5215,7 +5437,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
5215 I915_WRITE(TRANS_CHICKEN2(pipe), val); 5437 I915_WRITE(TRANS_CHICKEN2(pipe), val);
5216 } 5438 }
5217 /* WADP0ClockGatingDisable */ 5439 /* WADP0ClockGatingDisable */
5218 for_each_pipe(pipe) { 5440 for_each_pipe(dev_priv, pipe) {
5219 I915_WRITE(TRANS_CHICKEN1(pipe), 5441 I915_WRITE(TRANS_CHICKEN1(pipe),
5220 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 5442 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5221 } 5443 }
@@ -5383,7 +5605,7 @@ static void lpt_suspend_hw(struct drm_device *dev)
5383 } 5605 }
5384} 5606}
5385 5607
5386static void gen8_init_clock_gating(struct drm_device *dev) 5608static void broadwell_init_clock_gating(struct drm_device *dev)
5387{ 5609{
5388 struct drm_i915_private *dev_priv = dev->dev_private; 5610 struct drm_i915_private *dev_priv = dev->dev_private;
5389 enum pipe pipe; 5611 enum pipe pipe;
@@ -5395,37 +5617,12 @@ static void gen8_init_clock_gating(struct drm_device *dev)
5395 /* FIXME(BDW): Check all the w/a, some might only apply to 5617 /* FIXME(BDW): Check all the w/a, some might only apply to
5396 * pre-production hw. */ 5618 * pre-production hw. */
5397 5619
5398 /* WaDisablePartialInstShootdown:bdw */
5399 I915_WRITE(GEN8_ROW_CHICKEN,
5400 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
5401 5620
5402 /* WaDisableThreadStallDopClockGating:bdw */
5403 /* FIXME: Unclear whether we really need this on production bdw. */
5404 I915_WRITE(GEN8_ROW_CHICKEN,
5405 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
5406
5407 /*
5408 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
5409 * pre-production hardware
5410 */
5411 I915_WRITE(HALF_SLICE_CHICKEN3,
5412 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
5413 I915_WRITE(HALF_SLICE_CHICKEN3,
5414 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5415 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE)); 5621 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5416 5622
5417 I915_WRITE(_3D_CHICKEN3, 5623 I915_WRITE(_3D_CHICKEN3,
5418 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2))); 5624 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
5419 5625
5420 I915_WRITE(COMMON_SLICE_CHICKEN2,
5421 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
5422
5423 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5424 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
5425
5426 /* WaDisableDopClockGating:bdw May not be needed for production */
5427 I915_WRITE(GEN7_ROW_CHICKEN2,
5428 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5429 5626
5430 /* WaSwitchSolVfFArbitrationPriority:bdw */ 5627 /* WaSwitchSolVfFArbitrationPriority:bdw */
5431 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 5628 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -5435,37 +5632,18 @@ static void gen8_init_clock_gating(struct drm_device *dev)
5435 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); 5632 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
5436 5633
5437 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ 5634 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
5438 for_each_pipe(pipe) { 5635 for_each_pipe(dev_priv, pipe) {
5439 I915_WRITE(CHICKEN_PIPESL_1(pipe), 5636 I915_WRITE(CHICKEN_PIPESL_1(pipe),
5440 I915_READ(CHICKEN_PIPESL_1(pipe)) | 5637 I915_READ(CHICKEN_PIPESL_1(pipe)) |
5441 BDW_DPRS_MASK_VBLANK_SRD); 5638 BDW_DPRS_MASK_VBLANK_SRD);
5442 } 5639 }
5443 5640
5444 /* Use Force Non-Coherent whenever executing a 3D context. This is a
5445 * workaround for for a possible hang in the unlikely event a TLB
5446 * invalidation occurs during a PSD flush.
5447 */
5448 I915_WRITE(HDC_CHICKEN0,
5449 I915_READ(HDC_CHICKEN0) |
5450 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
5451
5452 /* WaVSRefCountFullforceMissDisable:bdw */ 5641 /* WaVSRefCountFullforceMissDisable:bdw */
5453 /* WaDSRefCountFullforceMissDisable:bdw */ 5642 /* WaDSRefCountFullforceMissDisable:bdw */
5454 I915_WRITE(GEN7_FF_THREAD_MODE, 5643 I915_WRITE(GEN7_FF_THREAD_MODE,
5455 I915_READ(GEN7_FF_THREAD_MODE) & 5644 I915_READ(GEN7_FF_THREAD_MODE) &
5456 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 5645 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5457 5646
5458 /*
5459 * BSpec recommends 8x4 when MSAA is used,
5460 * however in practice 16x4 seems fastest.
5461 *
5462 * Note that PS/WM thread counts depend on the WIZ hashing
5463 * disable bit, which we don't touch here, but it's good
5464 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5465 */
5466 I915_WRITE(GEN7_GT_MODE,
5467 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5468
5469 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 5647 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5470 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 5648 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5471 5649
@@ -5473,9 +5651,7 @@ static void gen8_init_clock_gating(struct drm_device *dev)
5473 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 5651 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5474 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 5652 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5475 5653
5476 /* Wa4x4STCOptimizationDisable:bdw */ 5654 lpt_init_clock_gating(dev);
5477 I915_WRITE(CACHE_MODE_1,
5478 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
5479} 5655}
5480 5656
5481static void haswell_init_clock_gating(struct drm_device *dev) 5657static void haswell_init_clock_gating(struct drm_device *dev)
@@ -5631,24 +5807,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
5631static void valleyview_init_clock_gating(struct drm_device *dev) 5807static void valleyview_init_clock_gating(struct drm_device *dev)
5632{ 5808{
5633 struct drm_i915_private *dev_priv = dev->dev_private; 5809 struct drm_i915_private *dev_priv = dev->dev_private;
5634 u32 val;
5635
5636 mutex_lock(&dev_priv->rps.hw_lock);
5637 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5638 mutex_unlock(&dev_priv->rps.hw_lock);
5639 switch ((val >> 6) & 3) {
5640 case 0:
5641 case 1:
5642 dev_priv->mem_freq = 800;
5643 break;
5644 case 2:
5645 dev_priv->mem_freq = 1066;
5646 break;
5647 case 3:
5648 dev_priv->mem_freq = 1333;
5649 break;
5650 }
5651 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5652 5810
5653 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 5811 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5654 5812
@@ -5724,48 +5882,11 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
5724static void cherryview_init_clock_gating(struct drm_device *dev) 5882static void cherryview_init_clock_gating(struct drm_device *dev)
5725{ 5883{
5726 struct drm_i915_private *dev_priv = dev->dev_private; 5884 struct drm_i915_private *dev_priv = dev->dev_private;
5727 u32 val;
5728
5729 mutex_lock(&dev_priv->rps.hw_lock);
5730 val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
5731 mutex_unlock(&dev_priv->rps.hw_lock);
5732 switch ((val >> 2) & 0x7) {
5733 case 0:
5734 case 1:
5735 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
5736 dev_priv->mem_freq = 1600;
5737 break;
5738 case 2:
5739 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
5740 dev_priv->mem_freq = 1600;
5741 break;
5742 case 3:
5743 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
5744 dev_priv->mem_freq = 2000;
5745 break;
5746 case 4:
5747 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
5748 dev_priv->mem_freq = 1600;
5749 break;
5750 case 5:
5751 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
5752 dev_priv->mem_freq = 1600;
5753 break;
5754 }
5755 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5756 5885
5757 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 5886 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5758 5887
5759 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 5888 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5760 5889
5761 /* WaDisablePartialInstShootdown:chv */
5762 I915_WRITE(GEN8_ROW_CHICKEN,
5763 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
5764
5765 /* WaDisableThreadStallDopClockGating:chv */
5766 I915_WRITE(GEN8_ROW_CHICKEN,
5767 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
5768
5769 /* WaVSRefCountFullforceMissDisable:chv */ 5890 /* WaVSRefCountFullforceMissDisable:chv */
5770 /* WaDSRefCountFullforceMissDisable:chv */ 5891 /* WaDSRefCountFullforceMissDisable:chv */
5771 I915_WRITE(GEN7_FF_THREAD_MODE, 5892 I915_WRITE(GEN7_FF_THREAD_MODE,
@@ -5784,10 +5905,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
5784 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 5905 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5785 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 5906 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5786 5907
5787 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
5788 I915_WRITE(HALF_SLICE_CHICKEN3,
5789 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5790
5791 /* WaDisableGunitClockGating:chv (pre-production hw) */ 5908 /* WaDisableGunitClockGating:chv (pre-production hw) */
5792 I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) | 5909 I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
5793 GINT_DIS); 5910 GINT_DIS);
@@ -5797,8 +5914,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
5797 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE)); 5914 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
5798 5915
5799 /* WaDisableDopClockGating:chv (pre-production hw) */ 5916 /* WaDisableDopClockGating:chv (pre-production hw) */
5800 I915_WRITE(GEN7_ROW_CHICKEN2,
5801 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5802 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 5917 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5803 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE); 5918 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
5804} 5919}
@@ -5883,6 +5998,9 @@ static void gen3_init_clock_gating(struct drm_device *dev)
5883 5998
5884 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 5999 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
5885 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 6000 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
6001
6002 I915_WRITE(MI_ARB_STATE,
6003 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5886} 6004}
5887 6005
5888static void i85x_init_clock_gating(struct drm_device *dev) 6006static void i85x_init_clock_gating(struct drm_device *dev)
@@ -5894,6 +6012,9 @@ static void i85x_init_clock_gating(struct drm_device *dev)
5894 /* interrupts should cause a wake up from C3 */ 6012 /* interrupts should cause a wake up from C3 */
5895 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | 6013 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
5896 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE)); 6014 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
6015
6016 I915_WRITE(MEM_MODE,
6017 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
5897} 6018}
5898 6019
5899static void i830_init_clock_gating(struct drm_device *dev) 6020static void i830_init_clock_gating(struct drm_device *dev)
@@ -5901,6 +6022,10 @@ static void i830_init_clock_gating(struct drm_device *dev)
5901 struct drm_i915_private *dev_priv = dev->dev_private; 6022 struct drm_i915_private *dev_priv = dev->dev_private;
5902 6023
5903 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 6024 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
6025
6026 I915_WRITE(MEM_MODE,
6027 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
6028 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
5904} 6029}
5905 6030
5906void intel_init_clock_gating(struct drm_device *dev) 6031void intel_init_clock_gating(struct drm_device *dev)
@@ -6203,6 +6328,8 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
6203 spin_unlock_irq(&dev_priv->irq_lock); 6328 spin_unlock_irq(&dev_priv->irq_lock);
6204 6329
6205 vlv_set_power_well(dev_priv, power_well, false); 6330 vlv_set_power_well(dev_priv, power_well, false);
6331
6332 vlv_power_sequencer_reset(dev_priv);
6206} 6333}
6207 6334
6208static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 6335static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
@@ -6238,12 +6365,11 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6238static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 6365static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6239 struct i915_power_well *power_well) 6366 struct i915_power_well *power_well)
6240{ 6367{
6241 struct drm_device *dev = dev_priv->dev;
6242 enum pipe pipe; 6368 enum pipe pipe;
6243 6369
6244 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 6370 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6245 6371
6246 for_each_pipe(pipe) 6372 for_each_pipe(dev_priv, pipe)
6247 assert_pll_disabled(dev_priv, pipe); 6373 assert_pll_disabled(dev_priv, pipe);
6248 6374
6249 /* Assert common reset */ 6375 /* Assert common reset */
@@ -6252,6 +6378,153 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6252 vlv_set_power_well(dev_priv, power_well, false); 6378 vlv_set_power_well(dev_priv, power_well, false);
6253} 6379}
6254 6380
6381static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6382 struct i915_power_well *power_well)
6383{
6384 enum dpio_phy phy;
6385
6386 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6387 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6388
6389 /*
6390 * Enable the CRI clock source so we can get at the
6391 * display and the reference clock for VGA
6392 * hotplug / manual detection.
6393 */
6394 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6395 phy = DPIO_PHY0;
6396 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6397 DPLL_REFA_CLK_ENABLE_VLV);
6398 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6399 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6400 } else {
6401 phy = DPIO_PHY1;
6402 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
6403 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6404 }
6405 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6406 vlv_set_power_well(dev_priv, power_well, true);
6407
6408 /* Poll for phypwrgood signal */
6409 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
6410 DRM_ERROR("Display PHY %d is not power up\n", phy);
6411
6412 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
6413 PHY_COM_LANE_RESET_DEASSERT(phy));
6414}
6415
6416static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6417 struct i915_power_well *power_well)
6418{
6419 enum dpio_phy phy;
6420
6421 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6422 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6423
6424 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6425 phy = DPIO_PHY0;
6426 assert_pll_disabled(dev_priv, PIPE_A);
6427 assert_pll_disabled(dev_priv, PIPE_B);
6428 } else {
6429 phy = DPIO_PHY1;
6430 assert_pll_disabled(dev_priv, PIPE_C);
6431 }
6432
6433 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
6434 ~PHY_COM_LANE_RESET_DEASSERT(phy));
6435
6436 vlv_set_power_well(dev_priv, power_well, false);
6437}
6438
6439static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
6440 struct i915_power_well *power_well)
6441{
6442 enum pipe pipe = power_well->data;
6443 bool enabled;
6444 u32 state, ctrl;
6445
6446 mutex_lock(&dev_priv->rps.hw_lock);
6447
6448 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
6449 /*
6450 * We only ever set the power-on and power-gate states, anything
6451 * else is unexpected.
6452 */
6453 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
6454 enabled = state == DP_SSS_PWR_ON(pipe);
6455
6456 /*
6457 * A transient state at this point would mean some unexpected party
6458 * is poking at the power controls too.
6459 */
6460 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
6461 WARN_ON(ctrl << 16 != state);
6462
6463 mutex_unlock(&dev_priv->rps.hw_lock);
6464
6465 return enabled;
6466}
6467
6468static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
6469 struct i915_power_well *power_well,
6470 bool enable)
6471{
6472 enum pipe pipe = power_well->data;
6473 u32 state;
6474 u32 ctrl;
6475
6476 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
6477
6478 mutex_lock(&dev_priv->rps.hw_lock);
6479
6480#define COND \
6481 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
6482
6483 if (COND)
6484 goto out;
6485
6486 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
6487 ctrl &= ~DP_SSC_MASK(pipe);
6488 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
6489 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
6490
6491 if (wait_for(COND, 100))
6492 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6493 state,
6494 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
6495
6496#undef COND
6497
6498out:
6499 mutex_unlock(&dev_priv->rps.hw_lock);
6500}
6501
6502static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
6503 struct i915_power_well *power_well)
6504{
6505 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
6506}
6507
6508static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
6509 struct i915_power_well *power_well)
6510{
6511 WARN_ON_ONCE(power_well->data != PIPE_A &&
6512 power_well->data != PIPE_B &&
6513 power_well->data != PIPE_C);
6514
6515 chv_set_pipe_power_well(dev_priv, power_well, true);
6516}
6517
6518static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
6519 struct i915_power_well *power_well)
6520{
6521 WARN_ON_ONCE(power_well->data != PIPE_A &&
6522 power_well->data != PIPE_B &&
6523 power_well->data != PIPE_C);
6524
6525 chv_set_pipe_power_well(dev_priv, power_well, false);
6526}
6527
6255static void check_power_well_state(struct drm_i915_private *dev_priv, 6528static void check_power_well_state(struct drm_i915_private *dev_priv,
6256 struct i915_power_well *power_well) 6529 struct i915_power_well *power_well)
6257{ 6530{
@@ -6443,6 +6716,39 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
6443 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 6716 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6444 BIT(POWER_DOMAIN_INIT)) 6717 BIT(POWER_DOMAIN_INIT))
6445 6718
6719#define CHV_PIPE_A_POWER_DOMAINS ( \
6720 BIT(POWER_DOMAIN_PIPE_A) | \
6721 BIT(POWER_DOMAIN_INIT))
6722
6723#define CHV_PIPE_B_POWER_DOMAINS ( \
6724 BIT(POWER_DOMAIN_PIPE_B) | \
6725 BIT(POWER_DOMAIN_INIT))
6726
6727#define CHV_PIPE_C_POWER_DOMAINS ( \
6728 BIT(POWER_DOMAIN_PIPE_C) | \
6729 BIT(POWER_DOMAIN_INIT))
6730
6731#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
6732 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6733 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6734 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6735 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6736 BIT(POWER_DOMAIN_INIT))
6737
6738#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
6739 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6740 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6741 BIT(POWER_DOMAIN_INIT))
6742
6743#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
6744 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6745 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6746 BIT(POWER_DOMAIN_INIT))
6747
6748#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
6749 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6750 BIT(POWER_DOMAIN_INIT))
6751
6446static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 6752static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
6447 .sync_hw = i9xx_always_on_power_well_noop, 6753 .sync_hw = i9xx_always_on_power_well_noop,
6448 .enable = i9xx_always_on_power_well_noop, 6754 .enable = i9xx_always_on_power_well_noop,
@@ -6450,6 +6756,20 @@ static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
6450 .is_enabled = i9xx_always_on_power_well_enabled, 6756 .is_enabled = i9xx_always_on_power_well_enabled,
6451}; 6757};
6452 6758
6759static const struct i915_power_well_ops chv_pipe_power_well_ops = {
6760 .sync_hw = chv_pipe_power_well_sync_hw,
6761 .enable = chv_pipe_power_well_enable,
6762 .disable = chv_pipe_power_well_disable,
6763 .is_enabled = chv_pipe_power_well_enabled,
6764};
6765
6766static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
6767 .sync_hw = vlv_power_well_sync_hw,
6768 .enable = chv_dpio_cmn_power_well_enable,
6769 .disable = chv_dpio_cmn_power_well_disable,
6770 .is_enabled = vlv_power_well_enabled,
6771};
6772
6453static struct i915_power_well i9xx_always_on_power_well[] = { 6773static struct i915_power_well i9xx_always_on_power_well[] = {
6454 { 6774 {
6455 .name = "always-on", 6775 .name = "always-on",
@@ -6572,6 +6892,107 @@ static struct i915_power_well vlv_power_wells[] = {
6572 }, 6892 },
6573}; 6893};
6574 6894
6895static struct i915_power_well chv_power_wells[] = {
6896 {
6897 .name = "always-on",
6898 .always_on = 1,
6899 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
6900 .ops = &i9xx_always_on_power_well_ops,
6901 },
6902#if 0
6903 {
6904 .name = "display",
6905 .domains = VLV_DISPLAY_POWER_DOMAINS,
6906 .data = PUNIT_POWER_WELL_DISP2D,
6907 .ops = &vlv_display_power_well_ops,
6908 },
6909 {
6910 .name = "pipe-a",
6911 .domains = CHV_PIPE_A_POWER_DOMAINS,
6912 .data = PIPE_A,
6913 .ops = &chv_pipe_power_well_ops,
6914 },
6915 {
6916 .name = "pipe-b",
6917 .domains = CHV_PIPE_B_POWER_DOMAINS,
6918 .data = PIPE_B,
6919 .ops = &chv_pipe_power_well_ops,
6920 },
6921 {
6922 .name = "pipe-c",
6923 .domains = CHV_PIPE_C_POWER_DOMAINS,
6924 .data = PIPE_C,
6925 .ops = &chv_pipe_power_well_ops,
6926 },
6927#endif
6928 {
6929 .name = "dpio-common-bc",
6930 /*
6931 * XXX: cmnreset for one PHY seems to disturb the other.
6932 * As a workaround keep both powered on at the same
6933 * time for now.
6934 */
6935 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
6936 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6937 .ops = &chv_dpio_cmn_power_well_ops,
6938 },
6939 {
6940 .name = "dpio-common-d",
6941 /*
6942 * XXX: cmnreset for one PHY seems to disturb the other.
6943 * As a workaround keep both powered on at the same
6944 * time for now.
6945 */
6946 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
6947 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
6948 .ops = &chv_dpio_cmn_power_well_ops,
6949 },
6950#if 0
6951 {
6952 .name = "dpio-tx-b-01",
6953 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6954 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
6955 .ops = &vlv_dpio_power_well_ops,
6956 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
6957 },
6958 {
6959 .name = "dpio-tx-b-23",
6960 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6961 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
6962 .ops = &vlv_dpio_power_well_ops,
6963 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
6964 },
6965 {
6966 .name = "dpio-tx-c-01",
6967 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6968 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6969 .ops = &vlv_dpio_power_well_ops,
6970 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
6971 },
6972 {
6973 .name = "dpio-tx-c-23",
6974 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6975 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6976 .ops = &vlv_dpio_power_well_ops,
6977 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
6978 },
6979 {
6980 .name = "dpio-tx-d-01",
6981 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
6982 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
6983 .ops = &vlv_dpio_power_well_ops,
6984 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
6985 },
6986 {
6987 .name = "dpio-tx-d-23",
6988 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
6989 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
6990 .ops = &vlv_dpio_power_well_ops,
6991 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
6992 },
6993#endif
6994};
6995
6575static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, 6996static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
6576 enum punit_power_well power_well_id) 6997 enum punit_power_well power_well_id)
6577{ 6998{
@@ -6608,6 +7029,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
6608 } else if (IS_BROADWELL(dev_priv->dev)) { 7029 } else if (IS_BROADWELL(dev_priv->dev)) {
6609 set_power_wells(power_domains, bdw_power_wells); 7030 set_power_wells(power_domains, bdw_power_wells);
6610 hsw_pwr = power_domains; 7031 hsw_pwr = power_domains;
7032 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
7033 set_power_wells(power_domains, chv_power_wells);
6611 } else if (IS_VALLEYVIEW(dev_priv->dev)) { 7034 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
6612 set_power_wells(power_domains, vlv_power_wells); 7035 set_power_wells(power_domains, vlv_power_wells);
6613 } else { 7036 } else {
@@ -6833,13 +7256,15 @@ void intel_init_pm(struct drm_device *dev)
6833 else if (IS_HASWELL(dev)) 7256 else if (IS_HASWELL(dev))
6834 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 7257 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6835 else if (INTEL_INFO(dev)->gen == 8) 7258 else if (INTEL_INFO(dev)->gen == 8)
6836 dev_priv->display.init_clock_gating = gen8_init_clock_gating; 7259 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
6837 } else if (IS_CHERRYVIEW(dev)) { 7260 } else if (IS_CHERRYVIEW(dev)) {
6838 dev_priv->display.update_wm = valleyview_update_wm; 7261 dev_priv->display.update_wm = cherryview_update_wm;
7262 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
6839 dev_priv->display.init_clock_gating = 7263 dev_priv->display.init_clock_gating =
6840 cherryview_init_clock_gating; 7264 cherryview_init_clock_gating;
6841 } else if (IS_VALLEYVIEW(dev)) { 7265 } else if (IS_VALLEYVIEW(dev)) {
6842 dev_priv->display.update_wm = valleyview_update_wm; 7266 dev_priv->display.update_wm = valleyview_update_wm;
7267 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
6843 dev_priv->display.init_clock_gating = 7268 dev_priv->display.init_clock_gating =
6844 valleyview_init_clock_gating; 7269 valleyview_init_clock_gating;
6845 } else if (IS_PINEVIEW(dev)) { 7270 } else if (IS_PINEVIEW(dev)) {
@@ -7025,6 +7450,7 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7025 return -1; 7450 return -1;
7026 } 7451 }
7027 7452
7453 /* CHV needs even values */
7028 opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2); 7454 opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
7029 7455
7030 return opcode; 7456 return opcode;
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index fd4f66231d30..6c792d3a9c9c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -24,13 +24,7 @@
24#ifndef _INTEL_RENDERSTATE_H 24#ifndef _INTEL_RENDERSTATE_H
25#define _INTEL_RENDERSTATE_H 25#define _INTEL_RENDERSTATE_H
26 26
27#include <linux/types.h> 27#include "i915_drv.h"
28
29struct intel_renderstate_rodata {
30 const u32 *reloc;
31 const u32 *batch;
32 const u32 batch_items;
33};
34 28
35extern const struct intel_renderstate_rodata gen6_null_state; 29extern const struct intel_renderstate_rodata gen6_null_state;
36extern const struct intel_renderstate_rodata gen7_null_state; 30extern const struct intel_renderstate_rodata gen7_null_state;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 47a126a0493f..0a80e419b589 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,14 +33,24 @@
33#include "i915_trace.h" 33#include "i915_trace.h"
34#include "intel_drv.h" 34#include "intel_drv.h"
35 35
36/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 36bool
37 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 37intel_ring_initialized(struct intel_engine_cs *ring)
38 * to give some inclination as to some of the magic values used in the various 38{
39 * workarounds! 39 struct drm_device *dev = ring->dev;
40 */ 40
41#define CACHELINE_BYTES 64 41 if (!dev)
42 return false;
43
44 if (i915.enable_execlists) {
45 struct intel_context *dctx = ring->default_context;
46 struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
47
48 return ringbuf->obj;
49 } else
50 return ring->buffer && ring->buffer->obj;
51}
42 52
43static inline int __ring_space(int head, int tail, int size) 53int __intel_ring_space(int head, int tail, int size)
44{ 54{
45 int space = head - (tail + I915_RING_FREE_SPACE); 55 int space = head - (tail + I915_RING_FREE_SPACE);
46 if (space < 0) 56 if (space < 0)
@@ -48,12 +58,13 @@ static inline int __ring_space(int head, int tail, int size)
48 return space; 58 return space;
49} 59}
50 60
51static inline int ring_space(struct intel_ringbuffer *ringbuf) 61int intel_ring_space(struct intel_ringbuffer *ringbuf)
52{ 62{
53 return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); 63 return __intel_ring_space(ringbuf->head & HEAD_ADDR,
64 ringbuf->tail, ringbuf->size);
54} 65}
55 66
56static bool intel_ring_stopped(struct intel_engine_cs *ring) 67bool intel_ring_stopped(struct intel_engine_cs *ring)
57{ 68{
58 struct drm_i915_private *dev_priv = ring->dev->dev_private; 69 struct drm_i915_private *dev_priv = ring->dev->dev_private;
59 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); 70 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
@@ -433,7 +444,14 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
433 return ret; 444 return ret;
434 } 445 }
435 446
436 return gen8_emit_pipe_control(ring, flags, scratch_addr); 447 ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
448 if (ret)
449 return ret;
450
451 if (!invalidate_domains && flush_domains)
452 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
453
454 return 0;
437} 455}
438 456
439static void ring_write_tail(struct intel_engine_cs *ring, 457static void ring_write_tail(struct intel_engine_cs *ring,
@@ -476,9 +494,14 @@ static bool stop_ring(struct intel_engine_cs *ring)
476 494
477 if (!IS_GEN2(ring->dev)) { 495 if (!IS_GEN2(ring->dev)) {
478 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); 496 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
479 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { 497 if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
480 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); 498 DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
481 return false; 499 /* Sometimes we observe that the idle flag is not
500 * set even though the ring is empty. So double
501 * check before giving up.
502 */
503 if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
504 return false;
482 } 505 }
483 } 506 }
484 507
@@ -540,6 +563,14 @@ static int init_ring_common(struct intel_engine_cs *ring)
540 * also enforces ordering), otherwise the hw might lose the new ring 563 * also enforces ordering), otherwise the hw might lose the new ring
541 * register values. */ 564 * register values. */
542 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); 565 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
566
567 /* WaClearRingBufHeadRegAtInit:ctg,elk */
568 if (I915_READ_HEAD(ring))
569 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
570 ring->name, I915_READ_HEAD(ring));
571 I915_WRITE_HEAD(ring, 0);
572 (void)I915_READ_HEAD(ring);
573
543 I915_WRITE_CTL(ring, 574 I915_WRITE_CTL(ring,
544 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 575 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
545 | RING_VALID); 576 | RING_VALID);
@@ -563,7 +594,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
563 else { 594 else {
564 ringbuf->head = I915_READ_HEAD(ring); 595 ringbuf->head = I915_READ_HEAD(ring);
565 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 596 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
566 ringbuf->space = ring_space(ringbuf); 597 ringbuf->space = intel_ring_space(ringbuf);
567 ringbuf->last_retired_head = -1; 598 ringbuf->last_retired_head = -1;
568 } 599 }
569 600
@@ -575,8 +606,25 @@ out:
575 return ret; 606 return ret;
576} 607}
577 608
578static int 609void
579init_pipe_control(struct intel_engine_cs *ring) 610intel_fini_pipe_control(struct intel_engine_cs *ring)
611{
612 struct drm_device *dev = ring->dev;
613
614 if (ring->scratch.obj == NULL)
615 return;
616
617 if (INTEL_INFO(dev)->gen >= 5) {
618 kunmap(sg_page(ring->scratch.obj->pages->sgl));
619 i915_gem_object_ggtt_unpin(ring->scratch.obj);
620 }
621
622 drm_gem_object_unreference(&ring->scratch.obj->base);
623 ring->scratch.obj = NULL;
624}
625
626int
627intel_init_pipe_control(struct intel_engine_cs *ring)
580{ 628{
581 int ret; 629 int ret;
582 630
@@ -617,6 +665,135 @@ err:
617 return ret; 665 return ret;
618} 666}
619 667
668static inline void intel_ring_emit_wa(struct intel_engine_cs *ring,
669 u32 addr, u32 value)
670{
671 struct drm_device *dev = ring->dev;
672 struct drm_i915_private *dev_priv = dev->dev_private;
673
674 if (WARN_ON(dev_priv->num_wa_regs >= I915_MAX_WA_REGS))
675 return;
676
677 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
678 intel_ring_emit(ring, addr);
679 intel_ring_emit(ring, value);
680
681 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr;
682 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = value & 0xFFFF;
683 /* value is updated with the status of remaining bits of this
684 * register when it is read from debugfs file
685 */
686 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value;
687 dev_priv->num_wa_regs++;
688
689 return;
690}
691
692static int bdw_init_workarounds(struct intel_engine_cs *ring)
693{
694 int ret;
695 struct drm_device *dev = ring->dev;
696 struct drm_i915_private *dev_priv = dev->dev_private;
697
698 /*
699 * workarounds applied in this fn are part of register state context,
700 * they need to be re-initialized followed by gpu reset, suspend/resume,
701 * module reload.
702 */
703 dev_priv->num_wa_regs = 0;
704 memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
705
706 /*
707 * update the number of dwords required based on the
708 * actual number of workarounds applied
709 */
710 ret = intel_ring_begin(ring, 18);
711 if (ret)
712 return ret;
713
714 /* WaDisablePartialInstShootdown:bdw */
715 /* WaDisableThreadStallDopClockGating:bdw */
716 /* FIXME: Unclear whether we really need this on production bdw. */
717 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
718 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
719 | STALL_DOP_GATING_DISABLE));
720
721 /* WaDisableDopClockGating:bdw May not be needed for production */
722 intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
723 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
724
725 intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
726 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
727
728 /* Use Force Non-Coherent whenever executing a 3D context. This is a
729 * workaround for for a possible hang in the unlikely event a TLB
730 * invalidation occurs during a PSD flush.
731 */
732 intel_ring_emit_wa(ring, HDC_CHICKEN0,
733 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
734
735 /* Wa4x4STCOptimizationDisable:bdw */
736 intel_ring_emit_wa(ring, CACHE_MODE_1,
737 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
738
739 /*
740 * BSpec recommends 8x4 when MSAA is used,
741 * however in practice 16x4 seems fastest.
742 *
743 * Note that PS/WM thread counts depend on the WIZ hashing
744 * disable bit, which we don't touch here, but it's good
745 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
746 */
747 intel_ring_emit_wa(ring, GEN7_GT_MODE,
748 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
749
750 intel_ring_advance(ring);
751
752 DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n",
753 dev_priv->num_wa_regs);
754
755 return 0;
756}
757
758static int chv_init_workarounds(struct intel_engine_cs *ring)
759{
760 int ret;
761 struct drm_device *dev = ring->dev;
762 struct drm_i915_private *dev_priv = dev->dev_private;
763
764 /*
765 * workarounds applied in this fn are part of register state context,
766 * they need to be re-initialized followed by gpu reset, suspend/resume,
767 * module reload.
768 */
769 dev_priv->num_wa_regs = 0;
770 memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
771
772 ret = intel_ring_begin(ring, 12);
773 if (ret)
774 return ret;
775
776 /* WaDisablePartialInstShootdown:chv */
777 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
778 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
779
780 /* WaDisableThreadStallDopClockGating:chv */
781 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
782 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
783
784 /* WaDisableDopClockGating:chv (pre-production hw) */
785 intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
786 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
787
788 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
789 intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
790 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
791
792 intel_ring_advance(ring);
793
794 return 0;
795}
796
620static int init_render_ring(struct intel_engine_cs *ring) 797static int init_render_ring(struct intel_engine_cs *ring)
621{ 798{
622 struct drm_device *dev = ring->dev; 799 struct drm_device *dev = ring->dev;
@@ -651,7 +828,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
651 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 828 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
652 829
653 if (INTEL_INFO(dev)->gen >= 5) { 830 if (INTEL_INFO(dev)->gen >= 5) {
654 ret = init_pipe_control(ring); 831 ret = intel_init_pipe_control(ring);
655 if (ret) 832 if (ret)
656 return ret; 833 return ret;
657 } 834 }
@@ -686,16 +863,7 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
686 dev_priv->semaphore_obj = NULL; 863 dev_priv->semaphore_obj = NULL;
687 } 864 }
688 865
689 if (ring->scratch.obj == NULL) 866 intel_fini_pipe_control(ring);
690 return;
691
692 if (INTEL_INFO(dev)->gen >= 5) {
693 kunmap(sg_page(ring->scratch.obj->pages->sgl));
694 i915_gem_object_ggtt_unpin(ring->scratch.obj);
695 }
696
697 drm_gem_object_unreference(&ring->scratch.obj->base);
698 ring->scratch.obj = NULL;
699} 867}
700 868
701static int gen8_rcs_signal(struct intel_engine_cs *signaller, 869static int gen8_rcs_signal(struct intel_engine_cs *signaller,
@@ -1526,7 +1694,7 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
1526 return 0; 1694 return 0;
1527} 1695}
1528 1696
1529static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1697void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1530{ 1698{
1531 if (!ringbuf->obj) 1699 if (!ringbuf->obj)
1532 return; 1700 return;
@@ -1537,8 +1705,8 @@ static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1537 ringbuf->obj = NULL; 1705 ringbuf->obj = NULL;
1538} 1706}
1539 1707
1540static int intel_alloc_ringbuffer_obj(struct drm_device *dev, 1708int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1541 struct intel_ringbuffer *ringbuf) 1709 struct intel_ringbuffer *ringbuf)
1542{ 1710{
1543 struct drm_i915_private *dev_priv = to_i915(dev); 1711 struct drm_i915_private *dev_priv = to_i915(dev);
1544 struct drm_i915_gem_object *obj; 1712 struct drm_i915_gem_object *obj;
@@ -1600,7 +1768,9 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1600 ring->dev = dev; 1768 ring->dev = dev;
1601 INIT_LIST_HEAD(&ring->active_list); 1769 INIT_LIST_HEAD(&ring->active_list);
1602 INIT_LIST_HEAD(&ring->request_list); 1770 INIT_LIST_HEAD(&ring->request_list);
1771 INIT_LIST_HEAD(&ring->execlist_queue);
1603 ringbuf->size = 32 * PAGE_SIZE; 1772 ringbuf->size = 32 * PAGE_SIZE;
1773 ringbuf->ring = ring;
1604 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 1774 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
1605 1775
1606 init_waitqueue_head(&ring->irq_queue); 1776 init_waitqueue_head(&ring->irq_queue);
@@ -1683,13 +1853,14 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1683 ringbuf->head = ringbuf->last_retired_head; 1853 ringbuf->head = ringbuf->last_retired_head;
1684 ringbuf->last_retired_head = -1; 1854 ringbuf->last_retired_head = -1;
1685 1855
1686 ringbuf->space = ring_space(ringbuf); 1856 ringbuf->space = intel_ring_space(ringbuf);
1687 if (ringbuf->space >= n) 1857 if (ringbuf->space >= n)
1688 return 0; 1858 return 0;
1689 } 1859 }
1690 1860
1691 list_for_each_entry(request, &ring->request_list, list) { 1861 list_for_each_entry(request, &ring->request_list, list) {
1692 if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) { 1862 if (__intel_ring_space(request->tail, ringbuf->tail,
1863 ringbuf->size) >= n) {
1693 seqno = request->seqno; 1864 seqno = request->seqno;
1694 break; 1865 break;
1695 } 1866 }
@@ -1706,7 +1877,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1706 ringbuf->head = ringbuf->last_retired_head; 1877 ringbuf->head = ringbuf->last_retired_head;
1707 ringbuf->last_retired_head = -1; 1878 ringbuf->last_retired_head = -1;
1708 1879
1709 ringbuf->space = ring_space(ringbuf); 1880 ringbuf->space = intel_ring_space(ringbuf);
1710 return 0; 1881 return 0;
1711} 1882}
1712 1883
@@ -1735,7 +1906,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1735 trace_i915_ring_wait_begin(ring); 1906 trace_i915_ring_wait_begin(ring);
1736 do { 1907 do {
1737 ringbuf->head = I915_READ_HEAD(ring); 1908 ringbuf->head = I915_READ_HEAD(ring);
1738 ringbuf->space = ring_space(ringbuf); 1909 ringbuf->space = intel_ring_space(ringbuf);
1739 if (ringbuf->space >= n) { 1910 if (ringbuf->space >= n) {
1740 ret = 0; 1911 ret = 0;
1741 break; 1912 break;
@@ -1787,7 +1958,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
1787 iowrite32(MI_NOOP, virt++); 1958 iowrite32(MI_NOOP, virt++);
1788 1959
1789 ringbuf->tail = 0; 1960 ringbuf->tail = 0;
1790 ringbuf->space = ring_space(ringbuf); 1961 ringbuf->space = intel_ring_space(ringbuf);
1791 1962
1792 return 0; 1963 return 0;
1793} 1964}
@@ -1992,9 +2163,7 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
1992 u64 offset, u32 len, 2163 u64 offset, u32 len,
1993 unsigned flags) 2164 unsigned flags)
1994{ 2165{
1995 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2166 bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
1996 bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
1997 !(flags & I915_DISPATCH_SECURE);
1998 int ret; 2167 int ret;
1999 2168
2000 ret = intel_ring_begin(ring, 4); 2169 ret = intel_ring_begin(ring, 4);
@@ -2023,8 +2192,9 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2023 return ret; 2192 return ret;
2024 2193
2025 intel_ring_emit(ring, 2194 intel_ring_emit(ring,
2026 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | 2195 MI_BATCH_BUFFER_START |
2027 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); 2196 (flags & I915_DISPATCH_SECURE ?
2197 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
2028 /* bit0-7 is the length on GEN6+ */ 2198 /* bit0-7 is the length on GEN6+ */
2029 intel_ring_emit(ring, offset); 2199 intel_ring_emit(ring, offset);
2030 intel_ring_advance(ring); 2200 intel_ring_advance(ring);
@@ -2123,6 +2293,10 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2123 dev_priv->semaphore_obj = obj; 2293 dev_priv->semaphore_obj = obj;
2124 } 2294 }
2125 } 2295 }
2296 if (IS_CHERRYVIEW(dev))
2297 ring->init_context = chv_init_workarounds;
2298 else
2299 ring->init_context = bdw_init_workarounds;
2126 ring->add_request = gen6_add_request; 2300 ring->add_request = gen6_add_request;
2127 ring->flush = gen8_render_ring_flush; 2301 ring->flush = gen8_render_ring_flush;
2128 ring->irq_get = gen8_ring_get_irq; 2302 ring->irq_get = gen8_ring_get_irq;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 70525d0c2c74..96479c89f4bd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -5,6 +5,13 @@
5 5
6#define I915_CMD_HASH_ORDER 9 6#define I915_CMD_HASH_ORDER 9
7 7
8/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
9 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
10 * to give some inclination as to some of the magic values used in the various
11 * workarounds!
12 */
13#define CACHELINE_BYTES 64
14
8/* 15/*
9 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 16 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
10 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 17 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
@@ -90,6 +97,15 @@ struct intel_ringbuffer {
90 struct drm_i915_gem_object *obj; 97 struct drm_i915_gem_object *obj;
91 void __iomem *virtual_start; 98 void __iomem *virtual_start;
92 99
100 struct intel_engine_cs *ring;
101
102 /*
103 * FIXME: This backpointer is an artifact of the history of how the
104 * execlist patches came into being. It will get removed once the basic
105 * code has landed.
106 */
107 struct intel_context *FIXME_lrc_ctx;
108
93 u32 head; 109 u32 head;
94 u32 tail; 110 u32 tail;
95 int space; 111 int space;
@@ -132,6 +148,8 @@ struct intel_engine_cs {
132 148
133 int (*init)(struct intel_engine_cs *ring); 149 int (*init)(struct intel_engine_cs *ring);
134 150
151 int (*init_context)(struct intel_engine_cs *ring);
152
135 void (*write_tail)(struct intel_engine_cs *ring, 153 void (*write_tail)(struct intel_engine_cs *ring,
136 u32 value); 154 u32 value);
137 int __must_check (*flush)(struct intel_engine_cs *ring, 155 int __must_check (*flush)(struct intel_engine_cs *ring,
@@ -214,6 +232,18 @@ struct intel_engine_cs {
214 unsigned int num_dwords); 232 unsigned int num_dwords);
215 } semaphore; 233 } semaphore;
216 234
235 /* Execlists */
236 spinlock_t execlist_lock;
237 struct list_head execlist_queue;
238 u8 next_context_status_buffer;
239 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
240 int (*emit_request)(struct intel_ringbuffer *ringbuf);
241 int (*emit_flush)(struct intel_ringbuffer *ringbuf,
242 u32 invalidate_domains,
243 u32 flush_domains);
244 int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
245 u64 offset, unsigned flags);
246
217 /** 247 /**
218 * List of objects currently involved in rendering from the 248 * List of objects currently involved in rendering from the
219 * ringbuffer. 249 * ringbuffer.
@@ -287,11 +317,7 @@ struct intel_engine_cs {
287 u32 (*get_cmd_length_mask)(u32 cmd_header); 317 u32 (*get_cmd_length_mask)(u32 cmd_header);
288}; 318};
289 319
290static inline bool 320bool intel_ring_initialized(struct intel_engine_cs *ring);
291intel_ring_initialized(struct intel_engine_cs *ring)
292{
293 return ring->buffer && ring->buffer->obj;
294}
295 321
296static inline unsigned 322static inline unsigned
297intel_ring_flag(struct intel_engine_cs *ring) 323intel_ring_flag(struct intel_engine_cs *ring)
@@ -355,6 +381,10 @@ intel_write_status_page(struct intel_engine_cs *ring,
355#define I915_GEM_HWS_SCRATCH_INDEX 0x30 381#define I915_GEM_HWS_SCRATCH_INDEX 0x30
356#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 382#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
357 383
384void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
385int intel_alloc_ringbuffer_obj(struct drm_device *dev,
386 struct intel_ringbuffer *ringbuf);
387
358void intel_stop_ring_buffer(struct intel_engine_cs *ring); 388void intel_stop_ring_buffer(struct intel_engine_cs *ring);
359void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); 389void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
360 390
@@ -372,6 +402,9 @@ static inline void intel_ring_advance(struct intel_engine_cs *ring)
372 struct intel_ringbuffer *ringbuf = ring->buffer; 402 struct intel_ringbuffer *ringbuf = ring->buffer;
373 ringbuf->tail &= ringbuf->size - 1; 403 ringbuf->tail &= ringbuf->size - 1;
374} 404}
405int __intel_ring_space(int head, int tail, int size);
406int intel_ring_space(struct intel_ringbuffer *ringbuf);
407bool intel_ring_stopped(struct intel_engine_cs *ring);
375void __intel_ring_advance(struct intel_engine_cs *ring); 408void __intel_ring_advance(struct intel_engine_cs *ring);
376 409
377int __must_check intel_ring_idle(struct intel_engine_cs *ring); 410int __must_check intel_ring_idle(struct intel_engine_cs *ring);
@@ -379,6 +412,9 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
379int intel_ring_flush_all_caches(struct intel_engine_cs *ring); 412int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
380int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring); 413int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
381 414
415void intel_fini_pipe_control(struct intel_engine_cs *ring);
416int intel_init_pipe_control(struct intel_engine_cs *ring);
417
382int intel_init_render_ring_buffer(struct drm_device *dev); 418int intel_init_render_ring_buffer(struct drm_device *dev);
383int intel_init_bsd_ring_buffer(struct drm_device *dev); 419int intel_init_bsd_ring_buffer(struct drm_device *dev);
384int intel_init_bsd2_ring_buffer(struct drm_device *dev); 420int intel_init_bsd2_ring_buffer(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 168c6652cda1..07a74ef589bd 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -53,6 +53,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
53 enum pipe pipe = crtc->pipe; 53 enum pipe pipe = crtc->pipe;
54 long timeout = msecs_to_jiffies_timeout(1); 54 long timeout = msecs_to_jiffies_timeout(1);
55 int scanline, min, max, vblank_start; 55 int scanline, min, max, vblank_start;
56 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
56 DEFINE_WAIT(wait); 57 DEFINE_WAIT(wait);
57 58
58 WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex)); 59 WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
@@ -81,7 +82,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
81 * other CPUs can see the task state update by the time we 82 * other CPUs can see the task state update by the time we
82 * read the scanline. 83 * read the scanline.
83 */ 84 */
84 prepare_to_wait(&crtc->vbl_wait, &wait, TASK_UNINTERRUPTIBLE); 85 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
85 86
86 scanline = intel_get_crtc_scanline(crtc); 87 scanline = intel_get_crtc_scanline(crtc);
87 if (scanline < min || scanline > max) 88 if (scanline < min || scanline > max)
@@ -100,7 +101,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
100 local_irq_disable(); 101 local_irq_disable();
101 } 102 }
102 103
103 finish_wait(&crtc->vbl_wait, &wait); 104 finish_wait(wq, &wait);
104 105
105 drm_vblank_put(dev, pipe); 106 drm_vblank_put(dev, pipe);
106 107
@@ -163,6 +164,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
163 sprctl &= ~SP_PIXFORMAT_MASK; 164 sprctl &= ~SP_PIXFORMAT_MASK;
164 sprctl &= ~SP_YUV_BYTE_ORDER_MASK; 165 sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
165 sprctl &= ~SP_TILED; 166 sprctl &= ~SP_TILED;
167 sprctl &= ~SP_ROTATE_180;
166 168
167 switch (fb->pixel_format) { 169 switch (fb->pixel_format) {
168 case DRM_FORMAT_YUYV: 170 case DRM_FORMAT_YUYV:
@@ -235,6 +237,14 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
235 fb->pitches[0]); 237 fb->pitches[0]);
236 linear_offset -= sprsurf_offset; 238 linear_offset -= sprsurf_offset;
237 239
240 if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
241 sprctl |= SP_ROTATE_180;
242
243 x += src_w;
244 y += src_h;
245 linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
246 }
247
238 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); 248 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
239 249
240 intel_update_primary_plane(intel_crtc); 250 intel_update_primary_plane(intel_crtc);
@@ -364,6 +374,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
364 sprctl &= ~SPRITE_RGB_ORDER_RGBX; 374 sprctl &= ~SPRITE_RGB_ORDER_RGBX;
365 sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK; 375 sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
366 sprctl &= ~SPRITE_TILED; 376 sprctl &= ~SPRITE_TILED;
377 sprctl &= ~SPRITE_ROTATE_180;
367 378
368 switch (fb->pixel_format) { 379 switch (fb->pixel_format) {
369 case DRM_FORMAT_XBGR8888: 380 case DRM_FORMAT_XBGR8888:
@@ -426,6 +437,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
426 pixel_size, fb->pitches[0]); 437 pixel_size, fb->pitches[0]);
427 linear_offset -= sprsurf_offset; 438 linear_offset -= sprsurf_offset;
428 439
440 if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
441 sprctl |= SPRITE_ROTATE_180;
442
443 /* HSW and BDW does this automagically in hardware */
444 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
445 x += src_w;
446 y += src_h;
447 linear_offset += src_h * fb->pitches[0] +
448 src_w * pixel_size;
449 }
450 }
451
429 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); 452 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
430 453
431 intel_update_primary_plane(intel_crtc); 454 intel_update_primary_plane(intel_crtc);
@@ -571,6 +594,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
571 dvscntr &= ~DVS_RGB_ORDER_XBGR; 594 dvscntr &= ~DVS_RGB_ORDER_XBGR;
572 dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK; 595 dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
573 dvscntr &= ~DVS_TILED; 596 dvscntr &= ~DVS_TILED;
597 dvscntr &= ~DVS_ROTATE_180;
574 598
575 switch (fb->pixel_format) { 599 switch (fb->pixel_format) {
576 case DRM_FORMAT_XBGR8888: 600 case DRM_FORMAT_XBGR8888:
@@ -628,6 +652,14 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
628 pixel_size, fb->pitches[0]); 652 pixel_size, fb->pitches[0]);
629 linear_offset -= dvssurf_offset; 653 linear_offset -= dvssurf_offset;
630 654
655 if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
656 dvscntr |= DVS_ROTATE_180;
657
658 x += src_w;
659 y += src_h;
660 linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
661 }
662
631 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); 663 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
632 664
633 intel_update_primary_plane(intel_crtc); 665 intel_update_primary_plane(intel_crtc);
@@ -895,6 +927,9 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
895 max_scale = intel_plane->max_downscale << 16; 927 max_scale = intel_plane->max_downscale << 16;
896 min_scale = intel_plane->can_scale ? 1 : (1 << 16); 928 min_scale = intel_plane->can_scale ? 1 : (1 << 16);
897 929
930 drm_rect_rotate(&src, fb->width << 16, fb->height << 16,
931 intel_plane->rotation);
932
898 hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale); 933 hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
899 BUG_ON(hscale < 0); 934 BUG_ON(hscale < 0);
900 935
@@ -933,6 +968,9 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
933 drm_rect_width(&dst) * hscale - drm_rect_width(&src), 968 drm_rect_width(&dst) * hscale - drm_rect_width(&src),
934 drm_rect_height(&dst) * vscale - drm_rect_height(&src)); 969 drm_rect_height(&dst) * vscale - drm_rect_height(&src));
935 970
971 drm_rect_rotate_inv(&src, fb->width << 16, fb->height << 16,
972 intel_plane->rotation);
973
936 /* sanity check to make sure the src viewport wasn't enlarged */ 974 /* sanity check to make sure the src viewport wasn't enlarged */
937 WARN_ON(src.x1 < (int) src_x || 975 WARN_ON(src.x1 < (int) src_x ||
938 src.y1 < (int) src_y || 976 src.y1 < (int) src_y ||
@@ -1180,18 +1218,45 @@ out_unlock:
1180 return ret; 1218 return ret;
1181} 1219}
1182 1220
1183void intel_plane_restore(struct drm_plane *plane) 1221int intel_plane_set_property(struct drm_plane *plane,
1222 struct drm_property *prop,
1223 uint64_t val)
1224{
1225 struct drm_device *dev = plane->dev;
1226 struct intel_plane *intel_plane = to_intel_plane(plane);
1227 uint64_t old_val;
1228 int ret = -ENOENT;
1229
1230 if (prop == dev->mode_config.rotation_property) {
1231 /* exactly one rotation angle please */
1232 if (hweight32(val & 0xf) != 1)
1233 return -EINVAL;
1234
1235 if (intel_plane->rotation == val)
1236 return 0;
1237
1238 old_val = intel_plane->rotation;
1239 intel_plane->rotation = val;
1240 ret = intel_plane_restore(plane);
1241 if (ret)
1242 intel_plane->rotation = old_val;
1243 }
1244
1245 return ret;
1246}
1247
1248int intel_plane_restore(struct drm_plane *plane)
1184{ 1249{
1185 struct intel_plane *intel_plane = to_intel_plane(plane); 1250 struct intel_plane *intel_plane = to_intel_plane(plane);
1186 1251
1187 if (!plane->crtc || !plane->fb) 1252 if (!plane->crtc || !plane->fb)
1188 return; 1253 return 0;
1189 1254
1190 intel_update_plane(plane, plane->crtc, plane->fb, 1255 return plane->funcs->update_plane(plane, plane->crtc, plane->fb,
1191 intel_plane->crtc_x, intel_plane->crtc_y, 1256 intel_plane->crtc_x, intel_plane->crtc_y,
1192 intel_plane->crtc_w, intel_plane->crtc_h, 1257 intel_plane->crtc_w, intel_plane->crtc_h,
1193 intel_plane->src_x, intel_plane->src_y, 1258 intel_plane->src_x, intel_plane->src_y,
1194 intel_plane->src_w, intel_plane->src_h); 1259 intel_plane->src_w, intel_plane->src_h);
1195} 1260}
1196 1261
1197void intel_plane_disable(struct drm_plane *plane) 1262void intel_plane_disable(struct drm_plane *plane)
@@ -1206,6 +1271,7 @@ static const struct drm_plane_funcs intel_plane_funcs = {
1206 .update_plane = intel_update_plane, 1271 .update_plane = intel_update_plane,
1207 .disable_plane = intel_disable_plane, 1272 .disable_plane = intel_disable_plane,
1208 .destroy = intel_destroy_plane, 1273 .destroy = intel_destroy_plane,
1274 .set_property = intel_plane_set_property,
1209}; 1275};
1210 1276
1211static uint32_t ilk_plane_formats[] = { 1277static uint32_t ilk_plane_formats[] = {
@@ -1310,13 +1376,28 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1310 1376
1311 intel_plane->pipe = pipe; 1377 intel_plane->pipe = pipe;
1312 intel_plane->plane = plane; 1378 intel_plane->plane = plane;
1379 intel_plane->rotation = BIT(DRM_ROTATE_0);
1313 possible_crtcs = (1 << pipe); 1380 possible_crtcs = (1 << pipe);
1314 ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs, 1381 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
1315 &intel_plane_funcs, 1382 &intel_plane_funcs,
1316 plane_formats, num_plane_formats, 1383 plane_formats, num_plane_formats,
1317 false); 1384 DRM_PLANE_TYPE_OVERLAY);
1318 if (ret) 1385 if (ret) {
1319 kfree(intel_plane); 1386 kfree(intel_plane);
1387 goto out;
1388 }
1389
1390 if (!dev->mode_config.rotation_property)
1391 dev->mode_config.rotation_property =
1392 drm_mode_create_rotation_property(dev,
1393 BIT(DRM_ROTATE_0) |
1394 BIT(DRM_ROTATE_180));
1395
1396 if (dev->mode_config.rotation_property)
1397 drm_object_attach_property(&intel_plane->base.base,
1398 dev->mode_config.rotation_property,
1399 intel_plane->rotation);
1320 1400
1401 out:
1321 return ret; 1402 return ret;
1322} 1403}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index e81bc3bdc533..918b76163965 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -101,7 +101,7 @@ static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
101{ 101{
102 u32 forcewake_ack; 102 u32 forcewake_ack;
103 103
104 if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev)) 104 if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
105 forcewake_ack = FORCEWAKE_ACK_HSW; 105 forcewake_ack = FORCEWAKE_ACK_HSW;
106 else 106 else
107 forcewake_ack = FORCEWAKE_MT_ACK; 107 forcewake_ack = FORCEWAKE_MT_ACK;
@@ -334,7 +334,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
334 else if (IS_GEN6(dev) || IS_GEN7(dev)) 334 else if (IS_GEN6(dev) || IS_GEN7(dev))
335 __gen6_gt_force_wake_reset(dev_priv); 335 __gen6_gt_force_wake_reset(dev_priv);
336 336
337 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev)) 337 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
338 __gen7_gt_force_wake_mt_reset(dev_priv); 338 __gen7_gt_force_wake_mt_reset(dev_priv);
339 339
340 if (restore) { /* If reset with a user forcewake, try to restore */ 340 if (restore) { /* If reset with a user forcewake, try to restore */
@@ -838,7 +838,7 @@ void intel_uncore_init(struct drm_device *dev)
838 if (IS_VALLEYVIEW(dev)) { 838 if (IS_VALLEYVIEW(dev)) {
839 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; 839 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
840 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; 840 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
841 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { 841 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
842 dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get; 842 dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
843 dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put; 843 dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
844 } else if (IS_IVYBRIDGE(dev)) { 844 } else if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index c3bf059ba720..8cfa9cb74c86 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -502,31 +502,31 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
502 return err; 502 return err;
503 } 503 }
504 504
505 /* Make drm_addbufs happy by not trying to create a mapping for less 505 /* Make drm_legacy_addbufs happy by not trying to create a mapping for
506 * than a page. 506 * less than a page.
507 */ 507 */
508 if (warp_size < PAGE_SIZE) 508 if (warp_size < PAGE_SIZE)
509 warp_size = PAGE_SIZE; 509 warp_size = PAGE_SIZE;
510 510
511 offset = 0; 511 offset = 0;
512 err = drm_addmap(dev, offset, warp_size, 512 err = drm_legacy_addmap(dev, offset, warp_size,
513 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp); 513 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
514 if (err) { 514 if (err) {
515 DRM_ERROR("Unable to map WARP microcode: %d\n", err); 515 DRM_ERROR("Unable to map WARP microcode: %d\n", err);
516 return err; 516 return err;
517 } 517 }
518 518
519 offset += warp_size; 519 offset += warp_size;
520 err = drm_addmap(dev, offset, dma_bs->primary_size, 520 err = drm_legacy_addmap(dev, offset, dma_bs->primary_size,
521 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary); 521 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
522 if (err) { 522 if (err) {
523 DRM_ERROR("Unable to map primary DMA region: %d\n", err); 523 DRM_ERROR("Unable to map primary DMA region: %d\n", err);
524 return err; 524 return err;
525 } 525 }
526 526
527 offset += dma_bs->primary_size; 527 offset += dma_bs->primary_size;
528 err = drm_addmap(dev, offset, secondary_size, 528 err = drm_legacy_addmap(dev, offset, secondary_size,
529 _DRM_AGP, 0, &dev->agp_buffer_map); 529 _DRM_AGP, 0, &dev->agp_buffer_map);
530 if (err) { 530 if (err) {
531 DRM_ERROR("Unable to map secondary DMA region: %d\n", err); 531 DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
532 return err; 532 return err;
@@ -538,7 +538,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
538 req.flags = _DRM_AGP_BUFFER; 538 req.flags = _DRM_AGP_BUFFER;
539 req.agp_start = offset; 539 req.agp_start = offset;
540 540
541 err = drm_addbufs_agp(dev, &req); 541 err = drm_legacy_addbufs_agp(dev, &req);
542 if (err) { 542 if (err) {
543 DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); 543 DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
544 return err; 544 return err;
@@ -559,16 +559,16 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
559 } 559 }
560 560
561 offset += secondary_size; 561 offset += secondary_size;
562 err = drm_addmap(dev, offset, agp_size - offset, 562 err = drm_legacy_addmap(dev, offset, agp_size - offset,
563 _DRM_AGP, 0, &dev_priv->agp_textures); 563 _DRM_AGP, 0, &dev_priv->agp_textures);
564 if (err) { 564 if (err) {
565 DRM_ERROR("Unable to map AGP texture region %d\n", err); 565 DRM_ERROR("Unable to map AGP texture region %d\n", err);
566 return err; 566 return err;
567 } 567 }
568 568
569 drm_core_ioremap(dev_priv->warp, dev); 569 drm_legacy_ioremap(dev_priv->warp, dev);
570 drm_core_ioremap(dev_priv->primary, dev); 570 drm_legacy_ioremap(dev_priv->primary, dev);
571 drm_core_ioremap(dev->agp_buffer_map, dev); 571 drm_legacy_ioremap(dev->agp_buffer_map, dev);
572 572
573 if (!dev_priv->warp->handle || 573 if (!dev_priv->warp->handle ||
574 !dev_priv->primary->handle || !dev->agp_buffer_map->handle) { 574 !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
@@ -602,7 +602,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
602 * 602 *
603 * \todo 603 * \todo
604 * Determine whether the maximum address passed to drm_pci_alloc is correct. 604 * Determine whether the maximum address passed to drm_pci_alloc is correct.
605 * The same goes for drm_addbufs_pci. 605 * The same goes for drm_legacy_addbufs_pci.
606 * 606 *
607 * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap 607 * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
608 */ 608 */
@@ -622,15 +622,15 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
622 return -EFAULT; 622 return -EFAULT;
623 } 623 }
624 624
625 /* Make drm_addbufs happy by not trying to create a mapping for less 625 /* Make drm_legacy_addbufs happy by not trying to create a mapping for
626 * than a page. 626 * less than a page.
627 */ 627 */
628 if (warp_size < PAGE_SIZE) 628 if (warp_size < PAGE_SIZE)
629 warp_size = PAGE_SIZE; 629 warp_size = PAGE_SIZE;
630 630
631 /* The proper alignment is 0x100 for this mapping */ 631 /* The proper alignment is 0x100 for this mapping */
632 err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT, 632 err = drm_legacy_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
633 _DRM_READ_ONLY, &dev_priv->warp); 633 _DRM_READ_ONLY, &dev_priv->warp);
634 if (err != 0) { 634 if (err != 0) {
635 DRM_ERROR("Unable to create mapping for WARP microcode: %d\n", 635 DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
636 err); 636 err);
@@ -645,8 +645,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
645 for (primary_size = dma_bs->primary_size; primary_size != 0; 645 for (primary_size = dma_bs->primary_size; primary_size != 0;
646 primary_size >>= 1) { 646 primary_size >>= 1) {
647 /* The proper alignment for this mapping is 0x04 */ 647 /* The proper alignment for this mapping is 0x04 */
648 err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT, 648 err = drm_legacy_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
649 _DRM_READ_ONLY, &dev_priv->primary); 649 _DRM_READ_ONLY, &dev_priv->primary);
650 if (!err) 650 if (!err)
651 break; 651 break;
652 } 652 }
@@ -669,7 +669,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
669 req.count = bin_count; 669 req.count = bin_count;
670 req.size = dma_bs->secondary_bin_size; 670 req.size = dma_bs->secondary_bin_size;
671 671
672 err = drm_addbufs_pci(dev, &req); 672 err = drm_legacy_addbufs_pci(dev, &req);
673 if (!err) 673 if (!err)
674 break; 674 break;
675 } 675 }
@@ -708,15 +708,16 @@ static int mga_do_dma_bootstrap(struct drm_device *dev,
708 /* The first steps are the same for both PCI and AGP based DMA. Map 708 /* The first steps are the same for both PCI and AGP based DMA. Map
709 * the cards MMIO registers and map a status page. 709 * the cards MMIO registers and map a status page.
710 */ 710 */
711 err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size, 711 err = drm_legacy_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
712 _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); 712 _DRM_REGISTERS, _DRM_READ_ONLY,
713 &dev_priv->mmio);
713 if (err) { 714 if (err) {
714 DRM_ERROR("Unable to map MMIO region: %d\n", err); 715 DRM_ERROR("Unable to map MMIO region: %d\n", err);
715 return err; 716 return err;
716 } 717 }
717 718
718 err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, 719 err = drm_legacy_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
719 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, 720 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
720 &dev_priv->status); 721 &dev_priv->status);
721 if (err) { 722 if (err) {
722 DRM_ERROR("Unable to map status region: %d\n", err); 723 DRM_ERROR("Unable to map status region: %d\n", err);
@@ -809,7 +810,7 @@ static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init)
809 dev_priv->texture_offset = init->texture_offset[0]; 810 dev_priv->texture_offset = init->texture_offset[0];
810 dev_priv->texture_size = init->texture_size[0]; 811 dev_priv->texture_size = init->texture_size[0];
811 812
812 dev_priv->sarea = drm_getsarea(dev); 813 dev_priv->sarea = drm_legacy_getsarea(dev);
813 if (!dev_priv->sarea) { 814 if (!dev_priv->sarea) {
814 DRM_ERROR("failed to find sarea!\n"); 815 DRM_ERROR("failed to find sarea!\n");
815 return -EINVAL; 816 return -EINVAL;
@@ -820,37 +821,37 @@ static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init)
820 dev_priv->dma_access = MGA_PAGPXFER; 821 dev_priv->dma_access = MGA_PAGPXFER;
821 dev_priv->wagp_enable = MGA_WAGP_ENABLE; 822 dev_priv->wagp_enable = MGA_WAGP_ENABLE;
822 823
823 dev_priv->status = drm_core_findmap(dev, init->status_offset); 824 dev_priv->status = drm_legacy_findmap(dev, init->status_offset);
824 if (!dev_priv->status) { 825 if (!dev_priv->status) {
825 DRM_ERROR("failed to find status page!\n"); 826 DRM_ERROR("failed to find status page!\n");
826 return -EINVAL; 827 return -EINVAL;
827 } 828 }
828 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 829 dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset);
829 if (!dev_priv->mmio) { 830 if (!dev_priv->mmio) {
830 DRM_ERROR("failed to find mmio region!\n"); 831 DRM_ERROR("failed to find mmio region!\n");
831 return -EINVAL; 832 return -EINVAL;
832 } 833 }
833 dev_priv->warp = drm_core_findmap(dev, init->warp_offset); 834 dev_priv->warp = drm_legacy_findmap(dev, init->warp_offset);
834 if (!dev_priv->warp) { 835 if (!dev_priv->warp) {
835 DRM_ERROR("failed to find warp microcode region!\n"); 836 DRM_ERROR("failed to find warp microcode region!\n");
836 return -EINVAL; 837 return -EINVAL;
837 } 838 }
838 dev_priv->primary = drm_core_findmap(dev, init->primary_offset); 839 dev_priv->primary = drm_legacy_findmap(dev, init->primary_offset);
839 if (!dev_priv->primary) { 840 if (!dev_priv->primary) {
840 DRM_ERROR("failed to find primary dma region!\n"); 841 DRM_ERROR("failed to find primary dma region!\n");
841 return -EINVAL; 842 return -EINVAL;
842 } 843 }
843 dev->agp_buffer_token = init->buffers_offset; 844 dev->agp_buffer_token = init->buffers_offset;
844 dev->agp_buffer_map = 845 dev->agp_buffer_map =
845 drm_core_findmap(dev, init->buffers_offset); 846 drm_legacy_findmap(dev, init->buffers_offset);
846 if (!dev->agp_buffer_map) { 847 if (!dev->agp_buffer_map) {
847 DRM_ERROR("failed to find dma buffer region!\n"); 848 DRM_ERROR("failed to find dma buffer region!\n");
848 return -EINVAL; 849 return -EINVAL;
849 } 850 }
850 851
851 drm_core_ioremap(dev_priv->warp, dev); 852 drm_legacy_ioremap(dev_priv->warp, dev);
852 drm_core_ioremap(dev_priv->primary, dev); 853 drm_legacy_ioremap(dev_priv->primary, dev);
853 drm_core_ioremap(dev->agp_buffer_map, dev); 854 drm_legacy_ioremap(dev->agp_buffer_map, dev);
854 } 855 }
855 856
856 dev_priv->sarea_priv = 857 dev_priv->sarea_priv =
@@ -936,14 +937,14 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
936 937
937 if ((dev_priv->warp != NULL) 938 if ((dev_priv->warp != NULL)
938 && (dev_priv->warp->type != _DRM_CONSISTENT)) 939 && (dev_priv->warp->type != _DRM_CONSISTENT))
939 drm_core_ioremapfree(dev_priv->warp, dev); 940 drm_legacy_ioremapfree(dev_priv->warp, dev);
940 941
941 if ((dev_priv->primary != NULL) 942 if ((dev_priv->primary != NULL)
942 && (dev_priv->primary->type != _DRM_CONSISTENT)) 943 && (dev_priv->primary->type != _DRM_CONSISTENT))
943 drm_core_ioremapfree(dev_priv->primary, dev); 944 drm_legacy_ioremapfree(dev_priv->primary, dev);
944 945
945 if (dev->agp_buffer_map != NULL) 946 if (dev->agp_buffer_map != NULL)
946 drm_core_ioremapfree(dev->agp_buffer_map, dev); 947 drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
947 948
948 if (dev_priv->used_new_dma_init) { 949 if (dev_priv->used_new_dma_init) {
949#if __OS_HAS_AGP 950#if __OS_HAS_AGP
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 6b1a87c8aac5..5e2f131a6a72 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -48,7 +48,7 @@ static const struct file_operations mga_driver_fops = {
48 .open = drm_open, 48 .open = drm_open,
49 .release = drm_release, 49 .release = drm_release,
50 .unlocked_ioctl = drm_ioctl, 50 .unlocked_ioctl = drm_ioctl,
51 .mmap = drm_mmap, 51 .mmap = drm_legacy_mmap,
52 .poll = drm_poll, 52 .poll = drm_poll,
53#ifdef CONFIG_COMPAT 53#ifdef CONFIG_COMPAT
54 .compat_ioctl = mga_compat_ioctl, 54 .compat_ioctl = mga_compat_ioctl,
@@ -64,6 +64,7 @@ static struct drm_driver driver = {
64 .load = mga_driver_load, 64 .load = mga_driver_load,
65 .unload = mga_driver_unload, 65 .unload = mga_driver_unload,
66 .lastclose = mga_driver_lastclose, 66 .lastclose = mga_driver_lastclose,
67 .set_busid = drm_pci_set_busid,
67 .dma_quiescent = mga_driver_dma_quiescent, 68 .dma_quiescent = mga_driver_dma_quiescent,
68 .device_is_agp = mga_driver_device_is_agp, 69 .device_is_agp = mga_driver_device_is_agp,
69 .get_vblank_counter = mga_get_vblank_counter, 70 .get_vblank_counter = mga_get_vblank_counter,
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index fe453213600a..b4a2014917e5 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -31,6 +31,8 @@
31#ifndef __MGA_DRV_H__ 31#ifndef __MGA_DRV_H__
32#define __MGA_DRV_H__ 32#define __MGA_DRV_H__
33 33
34#include <drm/drm_legacy.h>
35
34/* General customization: 36/* General customization:
35 */ 37 */
36 38
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 2d75d6df0789..97745991544d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -91,6 +91,7 @@ static struct drm_driver driver = {
91 .driver_features = DRIVER_GEM | DRIVER_MODESET, 91 .driver_features = DRIVER_GEM | DRIVER_MODESET,
92 .load = mgag200_driver_load, 92 .load = mgag200_driver_load,
93 .unload = mgag200_driver_unload, 93 .unload = mgag200_driver_unload,
94 .set_busid = drm_pci_set_busid,
94 .fops = &mgag200_driver_fops, 95 .fops = &mgag200_driver_fops,
95 .name = DRIVER_NAME, 96 .name = DRIVER_NAME,
96 .desc = DRIVER_DESC, 97 .desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 80de23d9b9c9..e9eea1d4e7c3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -22,6 +22,8 @@
22#include <drm/ttm/ttm_memory.h> 22#include <drm/ttm/ttm_memory.h>
23#include <drm/ttm/ttm_module.h> 23#include <drm/ttm/ttm_module.h>
24 24
25#include <drm/drm_gem.h>
26
25#include <linux/i2c.h> 27#include <linux/i2c.h>
26#include <linux/i2c-algo-bit.h> 28#include <linux/i2c-algo-bit.h>
27 29
@@ -190,8 +192,6 @@ struct mga_device {
190 resource_size_t rmmio_size; 192 resource_size_t rmmio_size;
191 void __iomem *rmmio; 193 void __iomem *rmmio;
192 194
193 drm_local_map_t *framebuffer;
194
195 struct mga_mc mc; 195 struct mga_mc mc;
196 struct mga_mode_info mode_info; 196 struct mga_mode_info mode_info;
197 197
@@ -224,7 +224,7 @@ struct mgag200_bo {
224 struct ttm_placement placement; 224 struct ttm_placement placement;
225 struct ttm_bo_kmap_obj kmap; 225 struct ttm_bo_kmap_obj kmap;
226 struct drm_gem_object gem; 226 struct drm_gem_object gem;
227 u32 placements[3]; 227 struct ttm_place placements[3];
228 int pin_count; 228 int pin_count;
229}; 229};
230#define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem) 230#define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem)
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 5451dc58eff1..4415af3666ab 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -158,7 +158,8 @@ static int mgag200fb_create_object(struct mga_fbdev *afbdev,
158static int mgag200fb_create(struct drm_fb_helper *helper, 158static int mgag200fb_create(struct drm_fb_helper *helper,
159 struct drm_fb_helper_surface_size *sizes) 159 struct drm_fb_helper_surface_size *sizes)
160{ 160{
161 struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper; 161 struct mga_fbdev *mfbdev =
162 container_of(helper, struct mga_fbdev, helper);
162 struct drm_device *dev = mfbdev->helper.dev; 163 struct drm_device *dev = mfbdev->helper.dev;
163 struct drm_mode_fb_cmd2 mode_cmd; 164 struct drm_mode_fb_cmd2 mode_cmd;
164 struct mga_device *mdev = dev->dev_private; 165 struct mga_device *mdev = dev->dev_private;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 45f04dea0ac2..83485ab81ce8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1483,11 +1483,7 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
1483{ 1483{
1484 struct drm_device *dev = connector->dev; 1484 struct drm_device *dev = connector->dev;
1485 struct mga_device *mdev = (struct mga_device*)dev->dev_private; 1485 struct mga_device *mdev = (struct mga_device*)dev->dev_private;
1486 struct mga_fbdev *mfbdev = mdev->mfbdev;
1487 struct drm_fb_helper *fb_helper = &mfbdev->helper;
1488 struct drm_fb_helper_connector *fb_helper_conn = NULL;
1489 int bpp = 32; 1486 int bpp = 32;
1490 int i = 0;
1491 1487
1492 if (IS_G200_SE(mdev)) { 1488 if (IS_G200_SE(mdev)) {
1493 if (mdev->unique_rev_id == 0x01) { 1489 if (mdev->unique_rev_id == 0x01) {
@@ -1537,21 +1533,14 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
1537 } 1533 }
1538 1534
1539 /* Validate the mode input by the user */ 1535 /* Validate the mode input by the user */
1540 for (i = 0; i < fb_helper->connector_count; i++) { 1536 if (connector->cmdline_mode.specified) {
1541 if (fb_helper->connector_info[i]->connector == connector) { 1537 if (connector->cmdline_mode.bpp_specified)
1542 /* Found the helper for this connector */ 1538 bpp = connector->cmdline_mode.bpp;
1543 fb_helper_conn = fb_helper->connector_info[i];
1544 if (fb_helper_conn->cmdline_mode.specified) {
1545 if (fb_helper_conn->cmdline_mode.bpp_specified) {
1546 bpp = fb_helper_conn->cmdline_mode.bpp;
1547 }
1548 }
1549 }
1550 } 1539 }
1551 1540
1552 if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) { 1541 if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
1553 if (fb_helper_conn) 1542 if (connector->cmdline_mode.specified)
1554 fb_helper_conn->cmdline_mode.specified = false; 1543 connector->cmdline_mode.specified = false;
1555 return MODE_BAD; 1544 return MODE_BAD;
1556 } 1545 }
1557 1546
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 5a00e90696de..d16964ea0ed4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -293,18 +293,22 @@ void mgag200_mm_fini(struct mga_device *mdev)
293void mgag200_ttm_placement(struct mgag200_bo *bo, int domain) 293void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
294{ 294{
295 u32 c = 0; 295 u32 c = 0;
296 bo->placement.fpfn = 0; 296 unsigned i;
297 bo->placement.lpfn = 0; 297
298 bo->placement.placement = bo->placements; 298 bo->placement.placement = bo->placements;
299 bo->placement.busy_placement = bo->placements; 299 bo->placement.busy_placement = bo->placements;
300 if (domain & TTM_PL_FLAG_VRAM) 300 if (domain & TTM_PL_FLAG_VRAM)
301 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; 301 bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
302 if (domain & TTM_PL_FLAG_SYSTEM) 302 if (domain & TTM_PL_FLAG_SYSTEM)
303 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 303 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
304 if (!c) 304 if (!c)
305 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 305 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
306 bo->placement.num_placement = c; 306 bo->placement.num_placement = c;
307 bo->placement.num_busy_placement = c; 307 bo->placement.num_busy_placement = c;
308 for (i = 0; i < c; ++i) {
309 bo->placements[i].fpfn = 0;
310 bo->placements[i].lpfn = 0;
311 }
308} 312}
309 313
310int mgag200_bo_create(struct drm_device *dev, int size, int align, 314int mgag200_bo_create(struct drm_device *dev, int size, int align,
@@ -335,7 +339,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
335 ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size, 339 ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
336 ttm_bo_type_device, &mgabo->placement, 340 ttm_bo_type_device, &mgabo->placement,
337 align >> PAGE_SHIFT, false, NULL, acc_size, 341 align >> PAGE_SHIFT, false, NULL, acc_size,
338 NULL, mgag200_bo_ttm_destroy); 342 NULL, NULL, mgag200_bo_ttm_destroy);
339 if (ret) 343 if (ret)
340 return ret; 344 return ret;
341 345
@@ -361,7 +365,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
361 365
362 mgag200_ttm_placement(bo, pl_flag); 366 mgag200_ttm_placement(bo, pl_flag);
363 for (i = 0; i < bo->placement.num_placement; i++) 367 for (i = 0; i < bo->placement.num_placement; i++)
364 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 368 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
365 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 369 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
366 if (ret) 370 if (ret)
367 return ret; 371 return ret;
@@ -384,7 +388,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
384 return 0; 388 return 0;
385 389
386 for (i = 0; i < bo->placement.num_placement ; i++) 390 for (i = 0; i < bo->placement.num_placement ; i++)
387 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 391 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
388 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 392 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
389 if (ret) 393 if (ret)
390 return ret; 394 return ret;
@@ -408,7 +412,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
408 412
409 mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); 413 mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
410 for (i = 0; i < bo->placement.num_placement ; i++) 414 for (i = 0; i < bo->placement.num_placement ; i++)
411 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 415 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
412 416
413 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 417 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
414 if (ret) { 418 if (ret) {
@@ -424,7 +428,7 @@ int mgag200_mmap(struct file *filp, struct vm_area_struct *vma)
424 struct mga_device *mdev; 428 struct mga_device *mdev;
425 429
426 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 430 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
427 return drm_mmap(filp, vma); 431 return -EINVAL;
428 432
429 file_priv = filp->private_data; 433 file_priv = filp->private_data;
430 mdev = file_priv->minor->dev->dev_private; 434 mdev = file_priv->minor->dev->dev_private;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index c99c50de3226..9d907c526c94 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -4,6 +4,7 @@ config DRM_MSM
4 depends on DRM 4 depends on DRM
5 depends on ARCH_QCOM || (ARM && COMPILE_TEST) 5 depends on ARCH_QCOM || (ARM && COMPILE_TEST)
6 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
7 select DRM_PANEL
7 select SHMEM 8 select SHMEM
8 select TMPFS 9 select TMPFS
9 default y 10 default y
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 93ca49c8df44..6283dcb96af5 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -4,6 +4,7 @@ ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
4endif 4endif
5 5
6msm-y := \ 6msm-y := \
7 adreno/adreno_device.o \
7 adreno/adreno_gpu.o \ 8 adreno/adreno_gpu.o \
8 adreno/a3xx_gpu.o \ 9 adreno/a3xx_gpu.o \
9 hdmi/hdmi.o \ 10 hdmi/hdmi.o \
@@ -18,6 +19,8 @@ msm-y := \
18 mdp/mdp_kms.o \ 19 mdp/mdp_kms.o \
19 mdp/mdp4/mdp4_crtc.o \ 20 mdp/mdp4/mdp4_crtc.o \
20 mdp/mdp4/mdp4_dtv_encoder.o \ 21 mdp/mdp4/mdp4_dtv_encoder.o \
22 mdp/mdp4/mdp4_lcdc_encoder.o \
23 mdp/mdp4/mdp4_lvds_connector.o \
21 mdp/mdp4/mdp4_irq.o \ 24 mdp/mdp4/mdp4_irq.o \
22 mdp/mdp4/mdp4_kms.o \ 25 mdp/mdp4/mdp4_kms.o \
23 mdp/mdp4/mdp4_plane.o \ 26 mdp/mdp4/mdp4_plane.o \
@@ -39,5 +42,6 @@ msm-y := \
39 msm_ringbuffer.o 42 msm_ringbuffer.o
40 43
41msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o 44msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
45msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
42 46
43obj-$(CONFIG_DRM_MSM) += msm.o 47obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index a8a144b38eaa..a3104598c27f 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 303e8a9e91a5..82d015279b47 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
@@ -654,7 +654,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
654#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0 654#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
655static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val) 655static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
656{ 656{
657 return ((((uint32_t)(val * 40.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK; 657 return ((((uint32_t)(val * 28.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
658} 658}
659 659
660#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d 660#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
@@ -662,7 +662,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
662#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 662#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
663static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) 663static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
664{ 664{
665 return ((((uint32_t)(val * 44.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; 665 return ((((uint32_t)(val * 28.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
666} 666}
667 667
668#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070 668#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
@@ -1696,7 +1696,7 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
1696{ 1696{
1697 return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK; 1697 return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK;
1698} 1698}
1699#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x3f000000 1699#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000
1700#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24 1700#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
1701static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val) 1701static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
1702{ 1702{
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 2773600c9488..218c5b060398 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -35,10 +35,8 @@
35 A3XX_INT0_CP_AHB_ERROR_HALT | \ 35 A3XX_INT0_CP_AHB_ERROR_HALT | \
36 A3XX_INT0_UCHE_OOB_ACCESS) 36 A3XX_INT0_UCHE_OOB_ACCESS)
37 37
38extern bool hang_debug;
38 39
39static bool hang_debug = false;
40MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
41module_param_named(hang_debug, hang_debug, bool, 0600);
42static void a3xx_dump(struct msm_gpu *gpu); 40static void a3xx_dump(struct msm_gpu *gpu);
43 41
44static void a3xx_me_init(struct msm_gpu *gpu) 42static void a3xx_me_init(struct msm_gpu *gpu)
@@ -387,58 +385,26 @@ static const unsigned int a3xx_registers[] = {
387 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d, 385 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
388 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036, 386 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
389 0x303c, 0x303c, 0x305e, 0x305f, 387 0x303c, 0x303c, 0x305e, 0x305f,
388 ~0 /* sentinel */
390}; 389};
391 390
392#ifdef CONFIG_DEBUG_FS 391#ifdef CONFIG_DEBUG_FS
393static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m) 392static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
394{ 393{
395 int i;
396
397 adreno_show(gpu, m);
398
399 gpu->funcs->pm_resume(gpu); 394 gpu->funcs->pm_resume(gpu);
400
401 seq_printf(m, "status: %08x\n", 395 seq_printf(m, "status: %08x\n",
402 gpu_read(gpu, REG_A3XX_RBBM_STATUS)); 396 gpu_read(gpu, REG_A3XX_RBBM_STATUS));
403
404 /* dump these out in a form that can be parsed by demsm: */
405 seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
406 for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
407 uint32_t start = a3xx_registers[i];
408 uint32_t end = a3xx_registers[i+1];
409 uint32_t addr;
410
411 for (addr = start; addr <= end; addr++) {
412 uint32_t val = gpu_read(gpu, addr);
413 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
414 }
415 }
416
417 gpu->funcs->pm_suspend(gpu); 397 gpu->funcs->pm_suspend(gpu);
398 adreno_show(gpu, m);
418} 399}
419#endif 400#endif
420 401
421/* would be nice to not have to duplicate the _show() stuff with printk(): */ 402/* would be nice to not have to duplicate the _show() stuff with printk(): */
422static void a3xx_dump(struct msm_gpu *gpu) 403static void a3xx_dump(struct msm_gpu *gpu)
423{ 404{
424 int i;
425
426 adreno_dump(gpu);
427 printk("status: %08x\n", 405 printk("status: %08x\n",
428 gpu_read(gpu, REG_A3XX_RBBM_STATUS)); 406 gpu_read(gpu, REG_A3XX_RBBM_STATUS));
429 407 adreno_dump(gpu);
430 /* dump these out in a form that can be parsed by demsm: */
431 printk("IO:region %s 00000000 00020000\n", gpu->name);
432 for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
433 uint32_t start = a3xx_registers[i];
434 uint32_t end = a3xx_registers[i+1];
435 uint32_t addr;
436
437 for (addr = start; addr <= end; addr++) {
438 uint32_t val = gpu_read(gpu, addr);
439 printk("IO:R %08x %08x\n", addr<<2, val);
440 }
441 }
442} 408}
443 409
444static const struct adreno_gpu_funcs funcs = { 410static const struct adreno_gpu_funcs funcs = {
@@ -474,7 +440,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
474 struct msm_gpu *gpu; 440 struct msm_gpu *gpu;
475 struct msm_drm_private *priv = dev->dev_private; 441 struct msm_drm_private *priv = dev->dev_private;
476 struct platform_device *pdev = priv->gpu_pdev; 442 struct platform_device *pdev = priv->gpu_pdev;
477 struct adreno_platform_config *config;
478 int ret; 443 int ret;
479 444
480 if (!pdev) { 445 if (!pdev) {
@@ -483,8 +448,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
483 goto fail; 448 goto fail;
484 } 449 }
485 450
486 config = pdev->dev.platform_data;
487
488 a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL); 451 a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
489 if (!a3xx_gpu) { 452 if (!a3xx_gpu) {
490 ret = -ENOMEM; 453 ret = -ENOMEM;
@@ -496,20 +459,12 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
496 459
497 a3xx_gpu->pdev = pdev; 460 a3xx_gpu->pdev = pdev;
498 461
499 gpu->fast_rate = config->fast_rate;
500 gpu->slow_rate = config->slow_rate;
501 gpu->bus_freq = config->bus_freq;
502#ifdef CONFIG_MSM_BUS_SCALING
503 gpu->bus_scale_table = config->bus_scale_table;
504#endif
505
506 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
507 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
508
509 gpu->perfcntrs = perfcntrs; 462 gpu->perfcntrs = perfcntrs;
510 gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs); 463 gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
511 464
512 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, config->rev); 465 adreno_gpu->registers = a3xx_registers;
466
467 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
513 if (ret) 468 if (ret)
514 goto fail; 469 goto fail;
515 470
@@ -549,158 +504,3 @@ fail:
549 504
550 return ERR_PTR(ret); 505 return ERR_PTR(ret);
551} 506}
552
553/*
554 * The a3xx device:
555 */
556
557#if defined(CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
558# include <mach/kgsl.h>
559#endif
560
561static void set_gpu_pdev(struct drm_device *dev,
562 struct platform_device *pdev)
563{
564 struct msm_drm_private *priv = dev->dev_private;
565 priv->gpu_pdev = pdev;
566}
567
568static int a3xx_bind(struct device *dev, struct device *master, void *data)
569{
570 static struct adreno_platform_config config = {};
571#ifdef CONFIG_OF
572 struct device_node *child, *node = dev->of_node;
573 u32 val;
574 int ret;
575
576 ret = of_property_read_u32(node, "qcom,chipid", &val);
577 if (ret) {
578 dev_err(dev, "could not find chipid: %d\n", ret);
579 return ret;
580 }
581
582 config.rev = ADRENO_REV((val >> 24) & 0xff,
583 (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
584
585 /* find clock rates: */
586 config.fast_rate = 0;
587 config.slow_rate = ~0;
588 for_each_child_of_node(node, child) {
589 if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
590 struct device_node *pwrlvl;
591 for_each_child_of_node(child, pwrlvl) {
592 ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
593 if (ret) {
594 dev_err(dev, "could not find gpu-freq: %d\n", ret);
595 return ret;
596 }
597 config.fast_rate = max(config.fast_rate, val);
598 config.slow_rate = min(config.slow_rate, val);
599 }
600 }
601 }
602
603 if (!config.fast_rate) {
604 dev_err(dev, "could not find clk rates\n");
605 return -ENXIO;
606 }
607
608#else
609 struct kgsl_device_platform_data *pdata = dev->platform_data;
610 uint32_t version = socinfo_get_version();
611 if (cpu_is_apq8064ab()) {
612 config.fast_rate = 450000000;
613 config.slow_rate = 27000000;
614 config.bus_freq = 4;
615 config.rev = ADRENO_REV(3, 2, 1, 0);
616 } else if (cpu_is_apq8064()) {
617 config.fast_rate = 400000000;
618 config.slow_rate = 27000000;
619 config.bus_freq = 4;
620
621 if (SOCINFO_VERSION_MAJOR(version) == 2)
622 config.rev = ADRENO_REV(3, 2, 0, 2);
623 else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
624 (SOCINFO_VERSION_MINOR(version) == 1))
625 config.rev = ADRENO_REV(3, 2, 0, 1);
626 else
627 config.rev = ADRENO_REV(3, 2, 0, 0);
628
629 } else if (cpu_is_msm8960ab()) {
630 config.fast_rate = 400000000;
631 config.slow_rate = 320000000;
632 config.bus_freq = 4;
633
634 if (SOCINFO_VERSION_MINOR(version) == 0)
635 config.rev = ADRENO_REV(3, 2, 1, 0);
636 else
637 config.rev = ADRENO_REV(3, 2, 1, 1);
638
639 } else if (cpu_is_msm8930()) {
640 config.fast_rate = 400000000;
641 config.slow_rate = 27000000;
642 config.bus_freq = 3;
643
644 if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
645 (SOCINFO_VERSION_MINOR(version) == 2))
646 config.rev = ADRENO_REV(3, 0, 5, 2);
647 else
648 config.rev = ADRENO_REV(3, 0, 5, 0);
649
650 }
651# ifdef CONFIG_MSM_BUS_SCALING
652 config.bus_scale_table = pdata->bus_scale_table;
653# endif
654#endif
655 dev->platform_data = &config;
656 set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
657 return 0;
658}
659
660static void a3xx_unbind(struct device *dev, struct device *master,
661 void *data)
662{
663 set_gpu_pdev(dev_get_drvdata(master), NULL);
664}
665
666static const struct component_ops a3xx_ops = {
667 .bind = a3xx_bind,
668 .unbind = a3xx_unbind,
669};
670
671static int a3xx_probe(struct platform_device *pdev)
672{
673 return component_add(&pdev->dev, &a3xx_ops);
674}
675
676static int a3xx_remove(struct platform_device *pdev)
677{
678 component_del(&pdev->dev, &a3xx_ops);
679 return 0;
680}
681
682static const struct of_device_id dt_match[] = {
683 { .compatible = "qcom,adreno-3xx" },
684 /* for backwards compat w/ downstream kgsl DT files: */
685 { .compatible = "qcom,kgsl-3d0" },
686 {}
687};
688
689static struct platform_driver a3xx_driver = {
690 .probe = a3xx_probe,
691 .remove = a3xx_remove,
692 .driver = {
693 .name = "kgsl-3d0",
694 .of_match_table = dt_match,
695 },
696};
697
698void __init a3xx_register(void)
699{
700 platform_driver_register(&a3xx_driver);
701}
702
703void __exit a3xx_unregister(void)
704{
705 platform_driver_unregister(&a3xx_driver);
706}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 9de19ac2e86c..cc341bc62b51 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
new file mode 100644
index 000000000000..7ab85af3a7db
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -0,0 +1,285 @@
1/*
2 * Copyright (C) 2013-2014 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "adreno_gpu.h"
19
20#if defined(CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
21# include <mach/kgsl.h>
22#endif
23
24#define ANY_ID 0xff
25
26bool hang_debug = false;
27MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
28module_param_named(hang_debug, hang_debug, bool, 0600);
29
30struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
31
32static const struct adreno_info gpulist[] = {
33 {
34 .rev = ADRENO_REV(3, 0, 5, ANY_ID),
35 .revn = 305,
36 .name = "A305",
37 .pm4fw = "a300_pm4.fw",
38 .pfpfw = "a300_pfp.fw",
39 .gmem = SZ_256K,
40 .init = a3xx_gpu_init,
41 }, {
42 .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
43 .revn = 320,
44 .name = "A320",
45 .pm4fw = "a300_pm4.fw",
46 .pfpfw = "a300_pfp.fw",
47 .gmem = SZ_512K,
48 .init = a3xx_gpu_init,
49 }, {
50 .rev = ADRENO_REV(3, 3, 0, ANY_ID),
51 .revn = 330,
52 .name = "A330",
53 .pm4fw = "a330_pm4.fw",
54 .pfpfw = "a330_pfp.fw",
55 .gmem = SZ_1M,
56 .init = a3xx_gpu_init,
57 },
58};
59
60MODULE_FIRMWARE("a300_pm4.fw");
61MODULE_FIRMWARE("a300_pfp.fw");
62MODULE_FIRMWARE("a330_pm4.fw");
63MODULE_FIRMWARE("a330_pfp.fw");
64
65static inline bool _rev_match(uint8_t entry, uint8_t id)
66{
67 return (entry == ANY_ID) || (entry == id);
68}
69
70const struct adreno_info *adreno_info(struct adreno_rev rev)
71{
72 int i;
73
74 /* identify gpu: */
75 for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
76 const struct adreno_info *info = &gpulist[i];
77 if (_rev_match(info->rev.core, rev.core) &&
78 _rev_match(info->rev.major, rev.major) &&
79 _rev_match(info->rev.minor, rev.minor) &&
80 _rev_match(info->rev.patchid, rev.patchid))
81 return info;
82 }
83
84 return NULL;
85}
86
87struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
88{
89 struct msm_drm_private *priv = dev->dev_private;
90 struct platform_device *pdev = priv->gpu_pdev;
91 struct adreno_platform_config *config;
92 struct adreno_rev rev;
93 const struct adreno_info *info;
94 struct msm_gpu *gpu = NULL;
95
96 if (!pdev) {
97 dev_err(dev->dev, "no adreno device\n");
98 return NULL;
99 }
100
101 config = pdev->dev.platform_data;
102 rev = config->rev;
103 info = adreno_info(config->rev);
104
105 if (!info) {
106 dev_warn(dev->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
107 rev.core, rev.major, rev.minor, rev.patchid);
108 return NULL;
109 }
110
111 DBG("Found GPU: %u.%u.%u.%u", rev.core, rev.major,
112 rev.minor, rev.patchid);
113
114 gpu = info->init(dev);
115 if (IS_ERR(gpu)) {
116 dev_warn(dev->dev, "failed to load adreno gpu\n");
117 gpu = NULL;
118 /* not fatal */
119 }
120
121 if (gpu) {
122 int ret;
123 mutex_lock(&dev->struct_mutex);
124 gpu->funcs->pm_resume(gpu);
125 mutex_unlock(&dev->struct_mutex);
126 ret = gpu->funcs->hw_init(gpu);
127 if (ret) {
128 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
129 gpu->funcs->destroy(gpu);
130 gpu = NULL;
131 } else {
132 /* give inactive pm a chance to kick in: */
133 msm_gpu_retire(gpu);
134 }
135 }
136
137 return gpu;
138}
139
140static void set_gpu_pdev(struct drm_device *dev,
141 struct platform_device *pdev)
142{
143 struct msm_drm_private *priv = dev->dev_private;
144 priv->gpu_pdev = pdev;
145}
146
147static int adreno_bind(struct device *dev, struct device *master, void *data)
148{
149 static struct adreno_platform_config config = {};
150#ifdef CONFIG_OF
151 struct device_node *child, *node = dev->of_node;
152 u32 val;
153 int ret;
154
155 ret = of_property_read_u32(node, "qcom,chipid", &val);
156 if (ret) {
157 dev_err(dev, "could not find chipid: %d\n", ret);
158 return ret;
159 }
160
161 config.rev = ADRENO_REV((val >> 24) & 0xff,
162 (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
163
164 /* find clock rates: */
165 config.fast_rate = 0;
166 config.slow_rate = ~0;
167 for_each_child_of_node(node, child) {
168 if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
169 struct device_node *pwrlvl;
170 for_each_child_of_node(child, pwrlvl) {
171 ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
172 if (ret) {
173 dev_err(dev, "could not find gpu-freq: %d\n", ret);
174 return ret;
175 }
176 config.fast_rate = max(config.fast_rate, val);
177 config.slow_rate = min(config.slow_rate, val);
178 }
179 }
180 }
181
182 if (!config.fast_rate) {
183 dev_err(dev, "could not find clk rates\n");
184 return -ENXIO;
185 }
186
187#else
188 struct kgsl_device_platform_data *pdata = dev->platform_data;
189 uint32_t version = socinfo_get_version();
190 if (cpu_is_apq8064ab()) {
191 config.fast_rate = 450000000;
192 config.slow_rate = 27000000;
193 config.bus_freq = 4;
194 config.rev = ADRENO_REV(3, 2, 1, 0);
195 } else if (cpu_is_apq8064()) {
196 config.fast_rate = 400000000;
197 config.slow_rate = 27000000;
198 config.bus_freq = 4;
199
200 if (SOCINFO_VERSION_MAJOR(version) == 2)
201 config.rev = ADRENO_REV(3, 2, 0, 2);
202 else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
203 (SOCINFO_VERSION_MINOR(version) == 1))
204 config.rev = ADRENO_REV(3, 2, 0, 1);
205 else
206 config.rev = ADRENO_REV(3, 2, 0, 0);
207
208 } else if (cpu_is_msm8960ab()) {
209 config.fast_rate = 400000000;
210 config.slow_rate = 320000000;
211 config.bus_freq = 4;
212
213 if (SOCINFO_VERSION_MINOR(version) == 0)
214 config.rev = ADRENO_REV(3, 2, 1, 0);
215 else
216 config.rev = ADRENO_REV(3, 2, 1, 1);
217
218 } else if (cpu_is_msm8930()) {
219 config.fast_rate = 400000000;
220 config.slow_rate = 27000000;
221 config.bus_freq = 3;
222
223 if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
224 (SOCINFO_VERSION_MINOR(version) == 2))
225 config.rev = ADRENO_REV(3, 0, 5, 2);
226 else
227 config.rev = ADRENO_REV(3, 0, 5, 0);
228
229 }
230# ifdef CONFIG_MSM_BUS_SCALING
231 config.bus_scale_table = pdata->bus_scale_table;
232# endif
233#endif
234 dev->platform_data = &config;
235 set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
236 return 0;
237}
238
239static void adreno_unbind(struct device *dev, struct device *master,
240 void *data)
241{
242 set_gpu_pdev(dev_get_drvdata(master), NULL);
243}
244
245static const struct component_ops a3xx_ops = {
246 .bind = adreno_bind,
247 .unbind = adreno_unbind,
248};
249
250static int adreno_probe(struct platform_device *pdev)
251{
252 return component_add(&pdev->dev, &a3xx_ops);
253}
254
255static int adreno_remove(struct platform_device *pdev)
256{
257 component_del(&pdev->dev, &a3xx_ops);
258 return 0;
259}
260
261static const struct of_device_id dt_match[] = {
262 { .compatible = "qcom,adreno-3xx" },
263 /* for backwards compat w/ downstream kgsl DT files: */
264 { .compatible = "qcom,kgsl-3d0" },
265 {}
266};
267
268static struct platform_driver adreno_driver = {
269 .probe = adreno_probe,
270 .remove = adreno_remove,
271 .driver = {
272 .name = "adreno",
273 .of_match_table = dt_match,
274 },
275};
276
277void __init adreno_register(void)
278{
279 platform_driver_register(&adreno_driver);
280}
281
282void __exit adreno_unregister(void)
283{
284 platform_driver_unregister(&adreno_driver);
285}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 655ce5b14ad0..6afa29167fee 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -19,46 +19,6 @@
19#include "msm_gem.h" 19#include "msm_gem.h"
20#include "msm_mmu.h" 20#include "msm_mmu.h"
21 21
22struct adreno_info {
23 struct adreno_rev rev;
24 uint32_t revn;
25 const char *name;
26 const char *pm4fw, *pfpfw;
27 uint32_t gmem;
28};
29
30#define ANY_ID 0xff
31
32static const struct adreno_info gpulist[] = {
33 {
34 .rev = ADRENO_REV(3, 0, 5, ANY_ID),
35 .revn = 305,
36 .name = "A305",
37 .pm4fw = "a300_pm4.fw",
38 .pfpfw = "a300_pfp.fw",
39 .gmem = SZ_256K,
40 }, {
41 .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
42 .revn = 320,
43 .name = "A320",
44 .pm4fw = "a300_pm4.fw",
45 .pfpfw = "a300_pfp.fw",
46 .gmem = SZ_512K,
47 }, {
48 .rev = ADRENO_REV(3, 3, 0, ANY_ID),
49 .revn = 330,
50 .name = "A330",
51 .pm4fw = "a330_pm4.fw",
52 .pfpfw = "a330_pfp.fw",
53 .gmem = SZ_1M,
54 },
55};
56
57MODULE_FIRMWARE("a300_pm4.fw");
58MODULE_FIRMWARE("a300_pfp.fw");
59MODULE_FIRMWARE("a330_pm4.fw");
60MODULE_FIRMWARE("a330_pfp.fw");
61
62#define RB_SIZE SZ_32K 22#define RB_SIZE SZ_32K
63#define RB_BLKSIZE 16 23#define RB_BLKSIZE 16
64 24
@@ -252,6 +212,7 @@ void adreno_idle(struct msm_gpu *gpu)
252void adreno_show(struct msm_gpu *gpu, struct seq_file *m) 212void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
253{ 213{
254 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 214 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
215 int i;
255 216
256 seq_printf(m, "revision: %d (%d.%d.%d.%d)\n", 217 seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
257 adreno_gpu->info->revn, adreno_gpu->rev.core, 218 adreno_gpu->info->revn, adreno_gpu->rev.core,
@@ -263,6 +224,23 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
263 seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr); 224 seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
264 seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); 225 seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
265 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); 226 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
227
228 gpu->funcs->pm_resume(gpu);
229
230 /* dump these out in a form that can be parsed by demsm: */
231 seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
232 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
233 uint32_t start = adreno_gpu->registers[i];
234 uint32_t end = adreno_gpu->registers[i+1];
235 uint32_t addr;
236
237 for (addr = start; addr <= end; addr++) {
238 uint32_t val = gpu_read(gpu, addr);
239 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
240 }
241 }
242
243 gpu->funcs->pm_suspend(gpu);
266} 244}
267#endif 245#endif
268 246
@@ -270,6 +248,7 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
270void adreno_dump(struct msm_gpu *gpu) 248void adreno_dump(struct msm_gpu *gpu)
271{ 249{
272 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 250 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
251 int i;
273 252
274 printk("revision: %d (%d.%d.%d.%d)\n", 253 printk("revision: %d (%d.%d.%d.%d)\n",
275 adreno_gpu->info->revn, adreno_gpu->rev.core, 254 adreno_gpu->info->revn, adreno_gpu->rev.core,
@@ -282,6 +261,18 @@ void adreno_dump(struct msm_gpu *gpu)
282 printk("wptr: %d\n", adreno_gpu->memptrs->wptr); 261 printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
283 printk("rb wptr: %d\n", get_wptr(gpu->rb)); 262 printk("rb wptr: %d\n", get_wptr(gpu->rb));
284 263
264 /* dump these out in a form that can be parsed by demsm: */
265 printk("IO:region %s 00000000 00020000\n", gpu->name);
266 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
267 uint32_t start = adreno_gpu->registers[i];
268 uint32_t end = adreno_gpu->registers[i+1];
269 uint32_t addr;
270
271 for (addr = start; addr <= end; addr++) {
272 uint32_t val = gpu_read(gpu, addr);
273 printk("IO:R %08x %08x\n", addr<<2, val);
274 }
275 }
285} 276}
286 277
287static uint32_t ring_freewords(struct msm_gpu *gpu) 278static uint32_t ring_freewords(struct msm_gpu *gpu)
@@ -304,65 +295,51 @@ static const char *iommu_ports[] = {
304 "gfx3d1_user", "gfx3d1_priv", 295 "gfx3d1_user", "gfx3d1_priv",
305}; 296};
306 297
307static inline bool _rev_match(uint8_t entry, uint8_t id)
308{
309 return (entry == ANY_ID) || (entry == id);
310}
311
312int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, 298int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
313 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, 299 struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
314 struct adreno_rev rev)
315{ 300{
301 struct adreno_platform_config *config = pdev->dev.platform_data;
302 struct msm_gpu *gpu = &adreno_gpu->base;
316 struct msm_mmu *mmu; 303 struct msm_mmu *mmu;
317 int i, ret; 304 int ret;
318
319 /* identify gpu: */
320 for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
321 const struct adreno_info *info = &gpulist[i];
322 if (_rev_match(info->rev.core, rev.core) &&
323 _rev_match(info->rev.major, rev.major) &&
324 _rev_match(info->rev.minor, rev.minor) &&
325 _rev_match(info->rev.patchid, rev.patchid)) {
326 gpu->info = info;
327 gpu->revn = info->revn;
328 break;
329 }
330 }
331
332 if (i == ARRAY_SIZE(gpulist)) {
333 dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
334 rev.core, rev.major, rev.minor, rev.patchid);
335 return -ENXIO;
336 }
337 305
338 DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name, 306 adreno_gpu->funcs = funcs;
339 rev.core, rev.major, rev.minor, rev.patchid); 307 adreno_gpu->info = adreno_info(config->rev);
308 adreno_gpu->gmem = adreno_gpu->info->gmem;
309 adreno_gpu->revn = adreno_gpu->info->revn;
310 adreno_gpu->rev = config->rev;
311
312 gpu->fast_rate = config->fast_rate;
313 gpu->slow_rate = config->slow_rate;
314 gpu->bus_freq = config->bus_freq;
315#ifdef CONFIG_MSM_BUS_SCALING
316 gpu->bus_scale_table = config->bus_scale_table;
317#endif
340 318
341 gpu->funcs = funcs; 319 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
342 gpu->gmem = gpu->info->gmem; 320 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
343 gpu->rev = rev;
344 321
345 ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev); 322 ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
346 if (ret) { 323 if (ret) {
347 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", 324 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
348 gpu->info->pm4fw, ret); 325 adreno_gpu->info->pm4fw, ret);
349 return ret; 326 return ret;
350 } 327 }
351 328
352 ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev); 329 ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev);
353 if (ret) { 330 if (ret) {
354 dev_err(drm->dev, "failed to load %s PFP firmware: %d\n", 331 dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
355 gpu->info->pfpfw, ret); 332 adreno_gpu->info->pfpfw, ret);
356 return ret; 333 return ret;
357 } 334 }
358 335
359 ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base, 336 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
360 gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq", 337 adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
361 RB_SIZE); 338 RB_SIZE);
362 if (ret) 339 if (ret)
363 return ret; 340 return ret;
364 341
365 mmu = gpu->base.mmu; 342 mmu = gpu->mmu;
366 if (mmu) { 343 if (mmu) {
367 ret = mmu->funcs->attach(mmu, iommu_ports, 344 ret = mmu->funcs->attach(mmu, iommu_ports,
368 ARRAY_SIZE(iommu_ports)); 345 ARRAY_SIZE(iommu_ports));
@@ -371,24 +348,24 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
371 } 348 }
372 349
373 mutex_lock(&drm->struct_mutex); 350 mutex_lock(&drm->struct_mutex);
374 gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs), 351 adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
375 MSM_BO_UNCACHED); 352 MSM_BO_UNCACHED);
376 mutex_unlock(&drm->struct_mutex); 353 mutex_unlock(&drm->struct_mutex);
377 if (IS_ERR(gpu->memptrs_bo)) { 354 if (IS_ERR(adreno_gpu->memptrs_bo)) {
378 ret = PTR_ERR(gpu->memptrs_bo); 355 ret = PTR_ERR(adreno_gpu->memptrs_bo);
379 gpu->memptrs_bo = NULL; 356 adreno_gpu->memptrs_bo = NULL;
380 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); 357 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
381 return ret; 358 return ret;
382 } 359 }
383 360
384 gpu->memptrs = msm_gem_vaddr(gpu->memptrs_bo); 361 adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
385 if (!gpu->memptrs) { 362 if (!adreno_gpu->memptrs) {
386 dev_err(drm->dev, "could not vmap memptrs\n"); 363 dev_err(drm->dev, "could not vmap memptrs\n");
387 return -ENOMEM; 364 return -ENOMEM;
388 } 365 }
389 366
390 ret = msm_gem_get_iova(gpu->memptrs_bo, gpu->base.id, 367 ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
391 &gpu->memptrs_iova); 368 &adreno_gpu->memptrs_iova);
392 if (ret) { 369 if (ret) {
393 dev_err(drm->dev, "could not map memptrs: %d\n", ret); 370 dev_err(drm->dev, "could not map memptrs: %d\n", ret);
394 return ret; 371 return ret;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 63c36ce33020..52f051579753 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -39,7 +39,16 @@ struct adreno_gpu_funcs {
39 struct msm_gpu_funcs base; 39 struct msm_gpu_funcs base;
40}; 40};
41 41
42struct adreno_info; 42struct adreno_info {
43 struct adreno_rev rev;
44 uint32_t revn;
45 const char *name;
46 const char *pm4fw, *pfpfw;
47 uint32_t gmem;
48 struct msm_gpu *(*init)(struct drm_device *dev);
49};
50
51const struct adreno_info *adreno_info(struct adreno_rev rev);
43 52
44struct adreno_rbmemptrs { 53struct adreno_rbmemptrs {
45 volatile uint32_t rptr; 54 volatile uint32_t rptr;
@@ -55,6 +64,9 @@ struct adreno_gpu {
55 uint32_t revn; /* numeric revision name */ 64 uint32_t revn; /* numeric revision name */
56 const struct adreno_gpu_funcs *funcs; 65 const struct adreno_gpu_funcs *funcs;
57 66
67 /* interesting register offsets to dump: */
68 const unsigned int *registers;
69
58 /* firmware: */ 70 /* firmware: */
59 const struct firmware *pm4, *pfp; 71 const struct firmware *pm4, *pfp;
60 72
@@ -131,8 +143,7 @@ void adreno_dump(struct msm_gpu *gpu);
131void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords); 143void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
132 144
133int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, 145int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
134 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, 146 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs);
135 struct adreno_rev rev);
136void adreno_gpu_cleanup(struct adreno_gpu *gpu); 147void adreno_gpu_cleanup(struct adreno_gpu *gpu);
137 148
138 149
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 4eee0ec8f069..6ef43f66c30a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
@@ -163,12 +163,16 @@ enum adreno_pm4_type3_packets {
163 CP_INDIRECT_BUFFER_PFE = 63, 163 CP_INDIRECT_BUFFER_PFE = 63,
164 CP_SET_BIN = 76, 164 CP_SET_BIN = 76,
165 CP_TEST_TWO_MEMS = 113, 165 CP_TEST_TWO_MEMS = 113,
166 CP_REG_WR_NO_CTXT = 120,
167 CP_RECORD_PFP_TIMESTAMP = 17,
166 CP_WAIT_FOR_ME = 19, 168 CP_WAIT_FOR_ME = 19,
167 CP_SET_DRAW_STATE = 67, 169 CP_SET_DRAW_STATE = 67,
168 CP_DRAW_INDX_OFFSET = 56, 170 CP_DRAW_INDX_OFFSET = 56,
169 CP_DRAW_INDIRECT = 40, 171 CP_DRAW_INDIRECT = 40,
170 CP_DRAW_INDX_INDIRECT = 41, 172 CP_DRAW_INDX_INDIRECT = 41,
171 CP_DRAW_AUTO = 36, 173 CP_DRAW_AUTO = 36,
174 CP_UNKNOWN_1A = 26,
175 CP_WIDE_REG_WRITE = 116,
172 IN_IB_PREFETCH_END = 23, 176 IN_IB_PREFETCH_END = 23,
173 IN_SUBBLK_PREFETCH = 31, 177 IN_SUBBLK_PREFETCH = 31,
174 IN_INSTR_PREFETCH = 32, 178 IN_INSTR_PREFETCH = 32,
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 0f1f5b9459a5..e965898dfda6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -10,14 +10,14 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
22Copyright (C) 2013 by the following authors: 22Copyright (C) 2013 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index d468f86f637c..f2bdda957205 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -10,16 +10,16 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
22Copyright (C) 2013 by the following authors: 22Copyright (C) 2013-2014 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
24 24
25Permission is hereby granted, free of charge, to any person obtaining 25Permission is hereby granted, free of charge, to any person obtaining
@@ -112,5 +112,11 @@ static inline uint32_t MMSS_CC_CLK_NS_VAL(uint32_t val)
112 return ((val) << MMSS_CC_CLK_NS_VAL__SHIFT) & MMSS_CC_CLK_NS_VAL__MASK; 112 return ((val) << MMSS_CC_CLK_NS_VAL__SHIFT) & MMSS_CC_CLK_NS_VAL__MASK;
113} 113}
114 114
115#define REG_MMSS_CC_DSI2_PIXEL_CC 0x00000094
116
117#define REG_MMSS_CC_DSI2_PIXEL_NS 0x000000e4
118
119#define REG_MMSS_CC_DSI2_PIXEL_CC2 0x00000264
120
115 121
116#endif /* MMSS_CC_XML */ 122#endif /* MMSS_CC_XML */
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index da8740054cdf..e5b071ffd865 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -10,14 +10,14 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
22Copyright (C) 2013 by the following authors: 22Copyright (C) 2013 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index c6c9b02e0ada..9d00dcba6959 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -123,7 +123,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
123 for (i = 0; i < config->hpd_reg_cnt; i++) { 123 for (i = 0; i < config->hpd_reg_cnt; i++) {
124 struct regulator *reg; 124 struct regulator *reg;
125 125
126 reg = devm_regulator_get_exclusive(&pdev->dev, 126 reg = devm_regulator_get(&pdev->dev,
127 config->hpd_reg_names[i]); 127 config->hpd_reg_names[i]);
128 if (IS_ERR(reg)) { 128 if (IS_ERR(reg)) {
129 ret = PTR_ERR(reg); 129 ret = PTR_ERR(reg);
@@ -139,7 +139,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
139 for (i = 0; i < config->pwr_reg_cnt; i++) { 139 for (i = 0; i < config->pwr_reg_cnt; i++) {
140 struct regulator *reg; 140 struct regulator *reg;
141 141
142 reg = devm_regulator_get_exclusive(&pdev->dev, 142 reg = devm_regulator_get(&pdev->dev,
143 config->pwr_reg_names[i]); 143 config->pwr_reg_names[i]);
144 if (IS_ERR(reg)) { 144 if (IS_ERR(reg)) {
145 ret = PTR_ERR(reg); 145 ret = PTR_ERR(reg);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index e89fe053d375..76fd0cfc6558 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -10,14 +10,14 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
22Copyright (C) 2013-2014 by the following authors: 22Copyright (C) 2013-2014 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index bd81db6a7829..d53c29327df9 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -10,14 +10,14 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
22Copyright (C) 2013 by the following authors: 22Copyright (C) 2013 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 122208e8a2ee..03c0bd9cd5b9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -10,16 +10,16 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
22Copyright (C) 2013 by the following authors: 22Copyright (C) 2013-2014 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
24 24
25Permission is hereby granted, free of charge, to any person obtaining 25Permission is hereby granted, free of charge, to any person obtaining
@@ -871,6 +871,101 @@ static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val)
871#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002 871#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002
872#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004 872#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004
873 873
874#define REG_MDP4_LCDC_LVDS_INTF_CTL 0x000c2000
875#define MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL 0x00000004
876#define MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT 0x00000008
877#define MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP 0x00000010
878#define MDP4_LCDC_LVDS_INTF_CTL_CH1_RES_BIT 0x00000020
879#define MDP4_LCDC_LVDS_INTF_CTL_CH2_RES_BIT 0x00000040
880#define MDP4_LCDC_LVDS_INTF_CTL_ENABLE 0x00000080
881#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN 0x00000100
882#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN 0x00000200
883#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN 0x00000400
884#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN 0x00000800
885#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN 0x00001000
886#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN 0x00002000
887#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN 0x00004000
888#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN 0x00008000
889#define MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN 0x00010000
890#define MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN 0x00020000
891
892static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL(uint32_t i0) { return 0x000c2014 + 0x8*i0; }
893
894static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(uint32_t i0) { return 0x000c2014 + 0x8*i0; }
895#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK 0x000000ff
896#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT 0
897static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(uint32_t val)
898{
899 return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK;
900}
901#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK 0x0000ff00
902#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT 8
903static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(uint32_t val)
904{
905 return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK;
906}
907#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK 0x00ff0000
908#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT 16
909static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(uint32_t val)
910{
911 return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK;
912}
913#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK 0xff000000
914#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT 24
915static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(uint32_t val)
916{
917 return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK;
918}
919
920static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(uint32_t i0) { return 0x000c2018 + 0x8*i0; }
921#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK 0x000000ff
922#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT 0
923static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(uint32_t val)
924{
925 return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK;
926}
927#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK 0x0000ff00
928#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT 8
929static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(uint32_t val)
930{
931 return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK;
932}
933#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK 0x00ff0000
934#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT 16
935static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(uint32_t val)
936{
937 return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK;
938}
939
940#define REG_MDP4_LCDC_LVDS_PHY_RESET 0x000c2034
941
942#define REG_MDP4_LVDS_PHY_PLL_CTRL_0 0x000c3000
943
944#define REG_MDP4_LVDS_PHY_PLL_CTRL_1 0x000c3004
945
946#define REG_MDP4_LVDS_PHY_PLL_CTRL_2 0x000c3008
947
948#define REG_MDP4_LVDS_PHY_PLL_CTRL_3 0x000c300c
949
950#define REG_MDP4_LVDS_PHY_PLL_CTRL_5 0x000c3014
951
952#define REG_MDP4_LVDS_PHY_PLL_CTRL_6 0x000c3018
953
954#define REG_MDP4_LVDS_PHY_PLL_CTRL_7 0x000c301c
955
956#define REG_MDP4_LVDS_PHY_PLL_CTRL_8 0x000c3020
957
958#define REG_MDP4_LVDS_PHY_PLL_CTRL_9 0x000c3024
959
960#define REG_MDP4_LVDS_PHY_PLL_LOCKED 0x000c3080
961
962#define REG_MDP4_LVDS_PHY_CFG2 0x000c3108
963
964#define REG_MDP4_LVDS_PHY_CFG0 0x000c3100
965#define MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE 0x00000010
966#define MDP4_LVDS_PHY_CFG0_CHANNEL0 0x00000040
967#define MDP4_LVDS_PHY_CFG0_CHANNEL1 0x00000080
968
874#define REG_MDP4_DTV 0x000d0000 969#define REG_MDP4_DTV 0x000d0000
875 970
876#define REG_MDP4_DTV_ENABLE 0x000d0000 971#define REG_MDP4_DTV_ENABLE 0x000d0000
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index c6c80ea28c35..7d00f7fb5773 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -273,14 +273,17 @@ static void blend_setup(struct drm_crtc *crtc)
273 }; 273 };
274 bool alpha[4]= { false, false, false, false }; 274 bool alpha[4]= { false, false, false, false };
275 275
276 /* Don't rely on value read back from hw, but instead use our
277 * own shadowed value. Possibly disable/reenable looses the
278 * previous value and goes back to power-on default?
279 */
280 mixer_cfg = mdp4_kms->mixer_cfg;
281
276 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0); 282 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
277 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0); 283 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
278 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0); 284 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
279 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0); 285 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
280 286
281 /* TODO single register for all CRTCs, so this won't work properly
282 * when multiple CRTCs are active..
283 */
284 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { 287 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
285 struct drm_plane *plane = mdp4_crtc->planes[i]; 288 struct drm_plane *plane = mdp4_crtc->planes[i];
286 if (plane) { 289 if (plane) {
@@ -291,7 +294,8 @@ static void blend_setup(struct drm_crtc *crtc)
291 to_mdp_format(msm_framebuffer_format(plane->fb)); 294 to_mdp_format(msm_framebuffer_format(plane->fb));
292 alpha[idx-1] = format->alpha_enable; 295 alpha[idx-1] = format->alpha_enable;
293 } 296 }
294 mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]); 297 mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
298 pipe_id, stages[idx]);
295 } 299 }
296 } 300 }
297 301
@@ -320,6 +324,7 @@ static void blend_setup(struct drm_crtc *crtc)
320 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0); 324 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
321 } 325 }
322 326
327 mdp4_kms->mixer_cfg = mixer_cfg;
323 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg); 328 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
324} 329}
325 330
@@ -672,7 +677,7 @@ void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
672} 677}
673 678
674/* set interface for routing crtc->encoder: */ 679/* set interface for routing crtc->encoder: */
675void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf) 680void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
676{ 681{
677 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 682 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
678 struct mdp4_kms *mdp4_kms = get_kms(crtc); 683 struct mdp4_kms *mdp4_kms = get_kms(crtc);
@@ -698,15 +703,13 @@ void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
698 if (intf == INTF_DSI_VIDEO) { 703 if (intf == INTF_DSI_VIDEO) {
699 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD; 704 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
700 intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO; 705 intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
701 mdp4_crtc->mixer = 0;
702 } else if (intf == INTF_DSI_CMD) { 706 } else if (intf == INTF_DSI_CMD) {
703 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO; 707 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
704 intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD; 708 intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
705 mdp4_crtc->mixer = 0;
706 } else if (intf == INTF_LCDC_DTV){
707 mdp4_crtc->mixer = 1;
708 } 709 }
709 710
711 mdp4_crtc->mixer = mixer;
712
710 blend_setup(crtc); 713 blend_setup(crtc);
711 714
712 DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel); 715 DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index 067ed03b35fe..c3878420180b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -233,7 +233,7 @@ static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder)
233 MDP4_DMA_CONFIG_G_BPC(BPC8) | 233 MDP4_DMA_CONFIG_G_BPC(BPC8) |
234 MDP4_DMA_CONFIG_B_BPC(BPC8) | 234 MDP4_DMA_CONFIG_B_BPC(BPC8) |
235 MDP4_DMA_CONFIG_PACK(0x21)); 235 MDP4_DMA_CONFIG_PACK(0x21));
236 mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV); 236 mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1);
237 mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON); 237 mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
238} 238}
239 239
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 733646c0d3f8..79d804e61cc4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -106,6 +106,7 @@ static int mdp4_hw_init(struct msm_kms *kms)
106 106
107 if (mdp4_kms->rev >= 2) 107 if (mdp4_kms->rev >= 2)
108 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1); 108 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
109 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0);
109 110
110 /* disable CSC matrix / YUV by default: */ 111 /* disable CSC matrix / YUV by default: */
111 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0); 112 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
@@ -196,6 +197,28 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
196 return 0; 197 return 0;
197} 198}
198 199
200#ifdef CONFIG_OF
201static struct drm_panel *detect_panel(struct drm_device *dev, const char *name)
202{
203 struct device_node *n;
204 struct drm_panel *panel = NULL;
205
206 n = of_parse_phandle(dev->dev->of_node, name, 0);
207 if (n) {
208 panel = of_drm_find_panel(n);
209 if (!panel)
210 panel = ERR_PTR(-EPROBE_DEFER);
211 }
212
213 return panel;
214}
215#else
216static struct drm_panel *detect_panel(struct drm_device *dev, const char *name)
217{
218 // ??? maybe use a module param to specify which panel is attached?
219}
220#endif
221
199static int modeset_init(struct mdp4_kms *mdp4_kms) 222static int modeset_init(struct mdp4_kms *mdp4_kms)
200{ 223{
201 struct drm_device *dev = mdp4_kms->dev; 224 struct drm_device *dev = mdp4_kms->dev;
@@ -203,14 +226,11 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
203 struct drm_plane *plane; 226 struct drm_plane *plane;
204 struct drm_crtc *crtc; 227 struct drm_crtc *crtc;
205 struct drm_encoder *encoder; 228 struct drm_encoder *encoder;
229 struct drm_connector *connector;
230 struct drm_panel *panel;
206 struct hdmi *hdmi; 231 struct hdmi *hdmi;
207 int ret; 232 int ret;
208 233
209 /*
210 * NOTE: this is a bit simplistic until we add support
211 * for more than just RGB1->DMA_E->DTV->HDMI
212 */
213
214 /* construct non-private planes: */ 234 /* construct non-private planes: */
215 plane = mdp4_plane_init(dev, VG1, false); 235 plane = mdp4_plane_init(dev, VG1, false);
216 if (IS_ERR(plane)) { 236 if (IS_ERR(plane)) {
@@ -228,7 +248,57 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
228 } 248 }
229 priv->planes[priv->num_planes++] = plane; 249 priv->planes[priv->num_planes++] = plane;
230 250
231 /* the CRTCs get constructed with a private plane: */ 251 /*
252 * Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS:
253 */
254
255 panel = detect_panel(dev, "qcom,lvds-panel");
256 if (IS_ERR(panel)) {
257 ret = PTR_ERR(panel);
258 dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret);
259 goto fail;
260 }
261
262 plane = mdp4_plane_init(dev, RGB2, true);
263 if (IS_ERR(plane)) {
264 dev_err(dev->dev, "failed to construct plane for RGB2\n");
265 ret = PTR_ERR(plane);
266 goto fail;
267 }
268
269 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 0, DMA_P);
270 if (IS_ERR(crtc)) {
271 dev_err(dev->dev, "failed to construct crtc for DMA_P\n");
272 ret = PTR_ERR(crtc);
273 goto fail;
274 }
275
276 encoder = mdp4_lcdc_encoder_init(dev, panel);
277 if (IS_ERR(encoder)) {
278 dev_err(dev->dev, "failed to construct LCDC encoder\n");
279 ret = PTR_ERR(encoder);
280 goto fail;
281 }
282
283 /* LCDC can be hooked to DMA_P: */
284 encoder->possible_crtcs = 1 << priv->num_crtcs;
285
286 priv->crtcs[priv->num_crtcs++] = crtc;
287 priv->encoders[priv->num_encoders++] = encoder;
288
289 connector = mdp4_lvds_connector_init(dev, panel, encoder);
290 if (IS_ERR(connector)) {
291 ret = PTR_ERR(connector);
292 dev_err(dev->dev, "failed to initialize LVDS connector: %d\n", ret);
293 goto fail;
294 }
295
296 priv->connectors[priv->num_connectors++] = connector;
297
298 /*
299 * Setup DTV/HDMI path: RGB1 -> DMA_E -> DTV -> HDMI:
300 */
301
232 plane = mdp4_plane_init(dev, RGB1, true); 302 plane = mdp4_plane_init(dev, RGB1, true);
233 if (IS_ERR(plane)) { 303 if (IS_ERR(plane)) {
234 dev_err(dev->dev, "failed to construct plane for RGB1\n"); 304 dev_err(dev->dev, "failed to construct plane for RGB1\n");
@@ -242,7 +312,6 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
242 ret = PTR_ERR(crtc); 312 ret = PTR_ERR(crtc);
243 goto fail; 313 goto fail;
244 } 314 }
245 priv->crtcs[priv->num_crtcs++] = crtc;
246 315
247 encoder = mdp4_dtv_encoder_init(dev); 316 encoder = mdp4_dtv_encoder_init(dev);
248 if (IS_ERR(encoder)) { 317 if (IS_ERR(encoder)) {
@@ -250,7 +319,11 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
250 ret = PTR_ERR(encoder); 319 ret = PTR_ERR(encoder);
251 goto fail; 320 goto fail;
252 } 321 }
253 encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */ 322
323 /* DTV can be hooked to DMA_E: */
324 encoder->possible_crtcs = 1 << priv->num_crtcs;
325
326 priv->crtcs[priv->num_crtcs++] = crtc;
254 priv->encoders[priv->num_encoders++] = encoder; 327 priv->encoders[priv->num_encoders++] = encoder;
255 328
256 hdmi = hdmi_init(dev, encoder); 329 hdmi = hdmi_init(dev, encoder);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 3225da804c61..9ff6e7ccfe90 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -23,6 +23,8 @@
23#include "mdp/mdp_kms.h" 23#include "mdp/mdp_kms.h"
24#include "mdp4.xml.h" 24#include "mdp4.xml.h"
25 25
26#include "drm_panel.h"
27
26struct mdp4_kms { 28struct mdp4_kms {
27 struct mdp_kms base; 29 struct mdp_kms base;
28 30
@@ -30,6 +32,13 @@ struct mdp4_kms {
30 32
31 int rev; 33 int rev;
32 34
35 /* Shadow value for MDP4_LAYERMIXER_IN_CFG.. since setup for all
36 * crtcs/encoders is in one shared register, we need to update it
37 * via read/modify/write. But to avoid getting confused by power-
38 * on-default values after resume, use this shadow value instead:
39 */
40 uint32_t mixer_cfg;
41
33 /* mapper-id used to request GEM buffer mapped for scanout: */ 42 /* mapper-id used to request GEM buffer mapped for scanout: */
34 int id; 43 int id;
35 44
@@ -74,7 +83,7 @@ static inline uint32_t pipe2flush(enum mdp4_pipe pipe)
74 case VG1: return MDP4_OVERLAY_FLUSH_VG1; 83 case VG1: return MDP4_OVERLAY_FLUSH_VG1;
75 case VG2: return MDP4_OVERLAY_FLUSH_VG2; 84 case VG2: return MDP4_OVERLAY_FLUSH_VG2;
76 case RGB1: return MDP4_OVERLAY_FLUSH_RGB1; 85 case RGB1: return MDP4_OVERLAY_FLUSH_RGB1;
77 case RGB2: return MDP4_OVERLAY_FLUSH_RGB1; 86 case RGB2: return MDP4_OVERLAY_FLUSH_RGB2;
78 default: return 0; 87 default: return 0;
79 } 88 }
80} 89}
@@ -108,38 +117,50 @@ static inline uint32_t dma2err(enum mdp4_dma dma)
108 } 117 }
109} 118}
110 119
111static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe, 120static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer,
112 enum mdp_mixer_stage_id stage) 121 enum mdp4_pipe pipe, enum mdp_mixer_stage_id stage)
113{ 122{
114 uint32_t mixer_cfg = 0;
115
116 switch (pipe) { 123 switch (pipe) {
117 case VG1: 124 case VG1:
118 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) | 125 mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK |
126 MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
127 mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) |
119 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1); 128 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
120 break; 129 break;
121 case VG2: 130 case VG2:
122 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) | 131 mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK |
132 MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
133 mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) |
123 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1); 134 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
124 break; 135 break;
125 case RGB1: 136 case RGB1:
126 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) | 137 mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK |
138 MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
139 mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) |
127 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1); 140 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
128 break; 141 break;
129 case RGB2: 142 case RGB2:
130 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) | 143 mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK |
144 MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
145 mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) |
131 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1); 146 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
132 break; 147 break;
133 case RGB3: 148 case RGB3:
134 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) | 149 mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK |
150 MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
151 mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) |
135 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1); 152 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
136 break; 153 break;
137 case VG3: 154 case VG3:
138 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) | 155 mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK |
156 MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
157 mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) |
139 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1); 158 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
140 break; 159 break;
141 case VG4: 160 case VG4:
142 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) | 161 mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK |
162 MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
163 mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) |
143 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1); 164 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
144 break; 165 break;
145 default: 166 default:
@@ -188,7 +209,7 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
188uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc); 209uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
189void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); 210void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
190void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config); 211void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
191void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf); 212void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
192void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane); 213void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
193void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane); 214void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
194struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, 215struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
@@ -198,6 +219,22 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
198long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate); 219long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
199struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev); 220struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
200 221
222long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
223struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
224 struct drm_panel *panel);
225
226struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
227 struct drm_panel *panel, struct drm_encoder *encoder);
228
229#ifdef CONFIG_COMMON_CLK
230struct clk *mpd4_lvds_pll_init(struct drm_device *dev);
231#else
232static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
233{
234 return ERR_PTR(-ENODEV);
235}
236#endif
237
201#ifdef CONFIG_MSM_BUS_SCALING 238#ifdef CONFIG_MSM_BUS_SCALING
202static inline int match_dev_name(struct device *dev, void *data) 239static inline int match_dev_name(struct device *dev, void *data)
203{ 240{
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
new file mode 100644
index 000000000000..41f6436754fc
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -0,0 +1,506 @@
1/*
2 * Copyright (C) 2014 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include "mdp4_kms.h"
20
21#include "drm_crtc.h"
22#include "drm_crtc_helper.h"
23
24struct mdp4_lcdc_encoder {
25 struct drm_encoder base;
26 struct drm_panel *panel;
27 struct clk *lcdc_clk;
28 unsigned long int pixclock;
29 struct regulator *regs[3];
30 bool enabled;
31 uint32_t bsc;
32};
33#define to_mdp4_lcdc_encoder(x) container_of(x, struct mdp4_lcdc_encoder, base)
34
35static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
36{
37 struct msm_drm_private *priv = encoder->dev->dev_private;
38 return to_mdp4_kms(to_mdp_kms(priv->kms));
39}
40
41#ifdef CONFIG_MSM_BUS_SCALING
42#include <mach/board.h>
43static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
44{
45 struct drm_device *dev = mdp4_lcdc_encoder->base.dev;
46 struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0");
47
48 if (!lcdc_pdata) {
49 dev_err(dev->dev, "could not find lvds pdata\n");
50 return;
51 }
52
53 if (lcdc_pdata->bus_scale_table) {
54 mdp4_lcdc_encoder->bsc = msm_bus_scale_register_client(
55 lcdc_pdata->bus_scale_table);
56 DBG("lvds : bus scale client: %08x", mdp4_lcdc_encoder->bsc);
57 }
58}
59
60static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
61{
62 if (mdp4_lcdc_encoder->bsc) {
63 msm_bus_scale_unregister_client(mdp4_lcdc_encoder->bsc);
64 mdp4_lcdc_encoder->bsc = 0;
65 }
66}
67
68static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx)
69{
70 if (mdp4_lcdc_encoder->bsc) {
71 DBG("set bus scaling: %d", idx);
72 msm_bus_scale_client_update_request(mdp4_lcdc_encoder->bsc, idx);
73 }
74}
75#else
76static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {}
77static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {}
78static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx) {}
79#endif
80
81static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder)
82{
83 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
84 to_mdp4_lcdc_encoder(encoder);
85 bs_fini(mdp4_lcdc_encoder);
86 drm_encoder_cleanup(encoder);
87 kfree(mdp4_lcdc_encoder);
88}
89
90static const struct drm_encoder_funcs mdp4_lcdc_encoder_funcs = {
91 .destroy = mdp4_lcdc_encoder_destroy,
92};
93
94/* this should probably be a helper: */
95struct drm_connector *get_connector(struct drm_encoder *encoder)
96{
97 struct drm_device *dev = encoder->dev;
98 struct drm_connector *connector;
99
100 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
101 if (connector->encoder == encoder)
102 return connector;
103
104 return NULL;
105}
106
107static void setup_phy(struct drm_encoder *encoder)
108{
109 struct drm_device *dev = encoder->dev;
110 struct drm_connector *connector = get_connector(encoder);
111 struct mdp4_kms *mdp4_kms = get_kms(encoder);
112 uint32_t lvds_intf = 0, lvds_phy_cfg0 = 0;
113 int bpp, nchan, swap;
114
115 if (!connector)
116 return;
117
118 bpp = 3 * connector->display_info.bpc;
119
120 if (!bpp)
121 bpp = 18;
122
123 /* TODO, these should come from panel somehow: */
124 nchan = 1;
125 swap = 0;
126
127 switch (bpp) {
128 case 24:
129 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(0),
130 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x08) |
131 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x05) |
132 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x04) |
133 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x03));
134 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(0),
135 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x02) |
136 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x01) |
137 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x00));
138 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(1),
139 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x11) |
140 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x10) |
141 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x0d) |
142 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0c));
143 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(1),
144 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0b) |
145 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x0a) |
146 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x09));
147 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(2),
148 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1a) |
149 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x19) |
150 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x18) |
151 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x15));
152 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(2),
153 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x14) |
154 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x13) |
155 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x12));
156 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(3),
157 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1b) |
158 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x17) |
159 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x16) |
160 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0f));
161 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(3),
162 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0e) |
163 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x07) |
164 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x06));
165 if (nchan == 2) {
166 lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN |
167 MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN |
168 MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN |
169 MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN |
170 MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN |
171 MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
172 MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
173 MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
174 } else {
175 lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN |
176 MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
177 MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
178 MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
179 }
180 break;
181
182 case 18:
183 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(0),
184 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x0a) |
185 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x07) |
186 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x06) |
187 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x05));
188 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(0),
189 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x04) |
190 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x03) |
191 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x02));
192 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(1),
193 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x13) |
194 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x12) |
195 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x0f) |
196 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0e));
197 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(1),
198 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0d) |
199 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x0c) |
200 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x0b));
201 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(2),
202 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1a) |
203 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x19) |
204 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x18) |
205 MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x17));
206 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(2),
207 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x16) |
208 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x15) |
209 MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x14));
210 if (nchan == 2) {
211 lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN |
212 MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN |
213 MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN |
214 MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
215 MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
216 MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
217 } else {
218 lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
219 MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
220 MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
221 }
222 lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT;
223 break;
224
225 default:
226 dev_err(dev->dev, "unknown bpp: %d\n", bpp);
227 return;
228 }
229
230 switch (nchan) {
231 case 1:
232 lvds_phy_cfg0 = MDP4_LVDS_PHY_CFG0_CHANNEL0;
233 lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN |
234 MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL;
235 break;
236 case 2:
237 lvds_phy_cfg0 = MDP4_LVDS_PHY_CFG0_CHANNEL0 |
238 MDP4_LVDS_PHY_CFG0_CHANNEL1;
239 lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN |
240 MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN;
241 break;
242 default:
243 dev_err(dev->dev, "unknown # of channels: %d\n", nchan);
244 return;
245 }
246
247 if (swap)
248 lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP;
249
250 lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_ENABLE;
251
252 mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0);
253 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_INTF_CTL, lvds_intf);
254 mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG2, 0x30);
255
256 mb();
257 udelay(1);
258 lvds_phy_cfg0 |= MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE;
259 mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0);
260}
261
262static void mdp4_lcdc_encoder_dpms(struct drm_encoder *encoder, int mode)
263{
264 struct drm_device *dev = encoder->dev;
265 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
266 to_mdp4_lcdc_encoder(encoder);
267 struct mdp4_kms *mdp4_kms = get_kms(encoder);
268 struct drm_panel *panel = mdp4_lcdc_encoder->panel;
269 bool enabled = (mode == DRM_MODE_DPMS_ON);
270 int i, ret;
271
272 DBG("mode=%d", mode);
273
274 if (enabled == mdp4_lcdc_encoder->enabled)
275 return;
276
277 if (enabled) {
278 unsigned long pc = mdp4_lcdc_encoder->pixclock;
279 int ret;
280
281 bs_set(mdp4_lcdc_encoder, 1);
282
283 for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
284 ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
285 if (ret)
286 dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
287 }
288
289 DBG("setting lcdc_clk=%lu", pc);
290 ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
291 if (ret)
292 dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
293 ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk);
294 if (ret)
295 dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
296
297 if (panel)
298 drm_panel_enable(panel);
299
300 setup_phy(encoder);
301
302 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1);
303 } else {
304 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
305
306 if (panel)
307 drm_panel_disable(panel);
308
309 /*
310 * Wait for a vsync so we know the ENABLE=0 latched before
311 * the (connector) source of the vsync's gets disabled,
312 * otherwise we end up in a funny state if we re-enable
313 * before the disable latches, which results that some of
314 * the settings changes for the new modeset (like new
315 * scanout buffer) don't latch properly..
316 */
317 mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC);
318
319 clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk);
320
321 for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
322 ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
323 if (ret)
324 dev_err(dev->dev, "failed to disable regulator: %d\n", ret);
325 }
326
327 bs_set(mdp4_lcdc_encoder, 0);
328 }
329
330 mdp4_lcdc_encoder->enabled = enabled;
331}
332
333static bool mdp4_lcdc_encoder_mode_fixup(struct drm_encoder *encoder,
334 const struct drm_display_mode *mode,
335 struct drm_display_mode *adjusted_mode)
336{
337 return true;
338}
339
340static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder,
341 struct drm_display_mode *mode,
342 struct drm_display_mode *adjusted_mode)
343{
344 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
345 to_mdp4_lcdc_encoder(encoder);
346 struct mdp4_kms *mdp4_kms = get_kms(encoder);
347 uint32_t lcdc_hsync_skew, vsync_period, vsync_len, ctrl_pol;
348 uint32_t display_v_start, display_v_end;
349 uint32_t hsync_start_x, hsync_end_x;
350
351 mode = adjusted_mode;
352
353 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
354 mode->base.id, mode->name,
355 mode->vrefresh, mode->clock,
356 mode->hdisplay, mode->hsync_start,
357 mode->hsync_end, mode->htotal,
358 mode->vdisplay, mode->vsync_start,
359 mode->vsync_end, mode->vtotal,
360 mode->type, mode->flags);
361
362 mdp4_lcdc_encoder->pixclock = mode->clock * 1000;
363
364 DBG("pixclock=%lu", mdp4_lcdc_encoder->pixclock);
365
366 ctrl_pol = 0;
367 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
368 ctrl_pol |= MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW;
369 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
370 ctrl_pol |= MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW;
371 /* probably need to get DATA_EN polarity from panel.. */
372
373 lcdc_hsync_skew = 0; /* get this from panel? */
374
375 hsync_start_x = (mode->htotal - mode->hsync_start);
376 hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
377
378 vsync_period = mode->vtotal * mode->htotal;
379 vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
380 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + lcdc_hsync_skew;
381 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + lcdc_hsync_skew - 1;
382
383 mdp4_write(mdp4_kms, REG_MDP4_LCDC_HSYNC_CTRL,
384 MDP4_LCDC_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
385 MDP4_LCDC_HSYNC_CTRL_PERIOD(mode->htotal));
386 mdp4_write(mdp4_kms, REG_MDP4_LCDC_VSYNC_PERIOD, vsync_period);
387 mdp4_write(mdp4_kms, REG_MDP4_LCDC_VSYNC_LEN, vsync_len);
388 mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_HCTRL,
389 MDP4_LCDC_DISPLAY_HCTRL_START(hsync_start_x) |
390 MDP4_LCDC_DISPLAY_HCTRL_END(hsync_end_x));
391 mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_VSTART, display_v_start);
392 mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_VEND, display_v_end);
393 mdp4_write(mdp4_kms, REG_MDP4_LCDC_BORDER_CLR, 0);
394 mdp4_write(mdp4_kms, REG_MDP4_LCDC_UNDERFLOW_CLR,
395 MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY |
396 MDP4_LCDC_UNDERFLOW_CLR_COLOR(0xff));
397 mdp4_write(mdp4_kms, REG_MDP4_LCDC_HSYNC_SKEW, lcdc_hsync_skew);
398 mdp4_write(mdp4_kms, REG_MDP4_LCDC_CTRL_POLARITY, ctrl_pol);
399 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_HCTL,
400 MDP4_LCDC_ACTIVE_HCTL_START(0) |
401 MDP4_LCDC_ACTIVE_HCTL_END(0));
402 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VSTART, 0);
403 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VEND, 0);
404}
405
406static void mdp4_lcdc_encoder_prepare(struct drm_encoder *encoder)
407{
408 mdp4_lcdc_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
409}
410
411static void mdp4_lcdc_encoder_commit(struct drm_encoder *encoder)
412{
413 /* TODO: hard-coded for 18bpp: */
414 mdp4_crtc_set_config(encoder->crtc,
415 MDP4_DMA_CONFIG_R_BPC(BPC6) |
416 MDP4_DMA_CONFIG_G_BPC(BPC6) |
417 MDP4_DMA_CONFIG_B_BPC(BPC6) |
418 MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
419 MDP4_DMA_CONFIG_PACK(0x21) |
420 MDP4_DMA_CONFIG_DEFLKR_EN |
421 MDP4_DMA_CONFIG_DITHER_EN);
422 mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
423 mdp4_lcdc_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
424}
425
426static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = {
427 .dpms = mdp4_lcdc_encoder_dpms,
428 .mode_fixup = mdp4_lcdc_encoder_mode_fixup,
429 .mode_set = mdp4_lcdc_encoder_mode_set,
430 .prepare = mdp4_lcdc_encoder_prepare,
431 .commit = mdp4_lcdc_encoder_commit,
432};
433
434long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
435{
436 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
437 to_mdp4_lcdc_encoder(encoder);
438 return clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, rate);
439}
440
441/* initialize encoder */
442struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
443 struct drm_panel *panel)
444{
445 struct drm_encoder *encoder = NULL;
446 struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
447 struct regulator *reg;
448 int ret;
449
450 mdp4_lcdc_encoder = kzalloc(sizeof(*mdp4_lcdc_encoder), GFP_KERNEL);
451 if (!mdp4_lcdc_encoder) {
452 ret = -ENOMEM;
453 goto fail;
454 }
455
456 mdp4_lcdc_encoder->panel = panel;
457
458 encoder = &mdp4_lcdc_encoder->base;
459
460 drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs,
461 DRM_MODE_ENCODER_LVDS);
462 drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
463
464 /* TODO: do we need different pll in other cases? */
465 mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
466 if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
467 dev_err(dev->dev, "failed to get lvds_clk\n");
468 ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
469 goto fail;
470 }
471
472 /* TODO: different regulators in other cases? */
473 reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
474 if (IS_ERR(reg)) {
475 ret = PTR_ERR(reg);
476 dev_err(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
477 goto fail;
478 }
479 mdp4_lcdc_encoder->regs[0] = reg;
480
481 reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
482 if (IS_ERR(reg)) {
483 ret = PTR_ERR(reg);
484 dev_err(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
485 goto fail;
486 }
487 mdp4_lcdc_encoder->regs[1] = reg;
488
489 reg = devm_regulator_get(dev->dev, "lvds-vdda");
490 if (IS_ERR(reg)) {
491 ret = PTR_ERR(reg);
492 dev_err(dev->dev, "failed to get lvds-vdda: %d\n", ret);
493 goto fail;
494 }
495 mdp4_lcdc_encoder->regs[2] = reg;
496
497 bs_init(mdp4_lcdc_encoder);
498
499 return encoder;
500
501fail:
502 if (encoder)
503 mdp4_lcdc_encoder_destroy(encoder);
504
505 return ERR_PTR(ret);
506}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
new file mode 100644
index 000000000000..310034688c15
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -0,0 +1,151 @@
1/*
2 * Copyright (C) 2014 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/gpio.h>
20
21#include "mdp4_kms.h"
22
23struct mdp4_lvds_connector {
24 struct drm_connector base;
25 struct drm_encoder *encoder;
26 struct drm_panel *panel;
27};
28#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base)
29
30static enum drm_connector_status mdp4_lvds_connector_detect(
31 struct drm_connector *connector, bool force)
32{
33 struct mdp4_lvds_connector *mdp4_lvds_connector =
34 to_mdp4_lvds_connector(connector);
35
36 return mdp4_lvds_connector->panel ?
37 connector_status_connected :
38 connector_status_disconnected;
39}
40
41static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
42{
43 struct mdp4_lvds_connector *mdp4_lvds_connector =
44 to_mdp4_lvds_connector(connector);
45 struct drm_panel *panel = mdp4_lvds_connector->panel;
46
47 if (panel)
48 drm_panel_detach(panel);
49
50 drm_connector_unregister(connector);
51 drm_connector_cleanup(connector);
52
53 kfree(mdp4_lvds_connector);
54}
55
56static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
57{
58 struct mdp4_lvds_connector *mdp4_lvds_connector =
59 to_mdp4_lvds_connector(connector);
60 struct drm_panel *panel = mdp4_lvds_connector->panel;
61 int ret = 0;
62
63 if (panel)
64 ret = panel->funcs->get_modes(panel);
65
66 return ret;
67}
68
69static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
70 struct drm_display_mode *mode)
71{
72 struct mdp4_lvds_connector *mdp4_lvds_connector =
73 to_mdp4_lvds_connector(connector);
74 struct drm_encoder *encoder = mdp4_lvds_connector->encoder;
75 long actual, requested;
76
77 requested = 1000 * mode->clock;
78 actual = mdp4_lcdc_round_pixclk(encoder, requested);
79
80 DBG("requested=%ld, actual=%ld", requested, actual);
81
82 if (actual != requested)
83 return MODE_CLOCK_RANGE;
84
85 return MODE_OK;
86}
87
88static struct drm_encoder *
89mdp4_lvds_connector_best_encoder(struct drm_connector *connector)
90{
91 struct mdp4_lvds_connector *mdp4_lvds_connector =
92 to_mdp4_lvds_connector(connector);
93 return mdp4_lvds_connector->encoder;
94}
95
96static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
97 .dpms = drm_helper_connector_dpms,
98 .detect = mdp4_lvds_connector_detect,
99 .fill_modes = drm_helper_probe_single_connector_modes,
100 .destroy = mdp4_lvds_connector_destroy,
101};
102
103static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
104 .get_modes = mdp4_lvds_connector_get_modes,
105 .mode_valid = mdp4_lvds_connector_mode_valid,
106 .best_encoder = mdp4_lvds_connector_best_encoder,
107};
108
109/* initialize connector */
110struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
111 struct drm_panel *panel, struct drm_encoder *encoder)
112{
113 struct drm_connector *connector = NULL;
114 struct mdp4_lvds_connector *mdp4_lvds_connector;
115 int ret;
116
117 mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL);
118 if (!mdp4_lvds_connector) {
119 ret = -ENOMEM;
120 goto fail;
121 }
122
123 mdp4_lvds_connector->encoder = encoder;
124 mdp4_lvds_connector->panel = panel;
125
126 connector = &mdp4_lvds_connector->base;
127
128 drm_connector_init(dev, connector, &mdp4_lvds_connector_funcs,
129 DRM_MODE_CONNECTOR_LVDS);
130 drm_connector_helper_add(connector, &mdp4_lvds_connector_helper_funcs);
131
132 connector->polled = 0;
133
134 connector->interlace_allowed = 0;
135 connector->doublescan_allowed = 0;
136
137 drm_connector_register(connector);
138
139 drm_mode_connector_attach_encoder(connector, encoder);
140
141 if (panel)
142 drm_panel_attach(panel, connector);
143
144 return connector;
145
146fail:
147 if (connector)
148 mdp4_lvds_connector_destroy(connector);
149
150 return ERR_PTR(ret);
151}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c
new file mode 100644
index 000000000000..ce4245971673
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c
@@ -0,0 +1,172 @@
1/*
2 * Copyright (C) 2014 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/clk.h>
19#include <linux/clk-provider.h>
20
21#include "mdp4_kms.h"
22
23struct mdp4_lvds_pll {
24 struct clk_hw pll_hw;
25 struct drm_device *dev;
26 unsigned long pixclk;
27};
28#define to_mdp4_lvds_pll(x) container_of(x, struct mdp4_lvds_pll, pll_hw)
29
30static struct mdp4_kms *get_kms(struct mdp4_lvds_pll *lvds_pll)
31{
32 struct msm_drm_private *priv = lvds_pll->dev->dev_private;
33 return to_mdp4_kms(to_mdp_kms(priv->kms));
34}
35
36struct pll_rate {
37 unsigned long rate;
38 struct {
39 uint32_t val;
40 uint32_t reg;
41 } conf[32];
42};
43
44/* NOTE: keep sorted highest freq to lowest: */
45static const struct pll_rate freqtbl[] = {
46 { 72000000, {
47 { 0x8f, REG_MDP4_LVDS_PHY_PLL_CTRL_1 },
48 { 0x30, REG_MDP4_LVDS_PHY_PLL_CTRL_2 },
49 { 0xc6, REG_MDP4_LVDS_PHY_PLL_CTRL_3 },
50 { 0x10, REG_MDP4_LVDS_PHY_PLL_CTRL_5 },
51 { 0x07, REG_MDP4_LVDS_PHY_PLL_CTRL_6 },
52 { 0x62, REG_MDP4_LVDS_PHY_PLL_CTRL_7 },
53 { 0x41, REG_MDP4_LVDS_PHY_PLL_CTRL_8 },
54 { 0x0d, REG_MDP4_LVDS_PHY_PLL_CTRL_9 },
55 { 0, 0 } }
56 },
57};
58
59static const struct pll_rate *find_rate(unsigned long rate)
60{
61 int i;
62 for (i = 1; i < ARRAY_SIZE(freqtbl); i++)
63 if (rate > freqtbl[i].rate)
64 return &freqtbl[i-1];
65 return &freqtbl[i-1];
66}
67
68static int mpd4_lvds_pll_enable(struct clk_hw *hw)
69{
70 struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
71 struct mdp4_kms *mdp4_kms = get_kms(lvds_pll);
72 const struct pll_rate *pll_rate = find_rate(lvds_pll->pixclk);
73 int i;
74
75 DBG("pixclk=%lu (%lu)", lvds_pll->pixclk, pll_rate->rate);
76
77 if (WARN_ON(!pll_rate))
78 return -EINVAL;
79
80 mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_PHY_RESET, 0x33);
81
82 for (i = 0; pll_rate->conf[i].reg; i++)
83 mdp4_write(mdp4_kms, pll_rate->conf[i].reg, pll_rate->conf[i].val);
84
85 mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x01);
86
87 /* Wait until LVDS PLL is locked and ready */
88 while (!mdp4_read(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_LOCKED))
89 cpu_relax();
90
91 return 0;
92}
93
94static void mpd4_lvds_pll_disable(struct clk_hw *hw)
95{
96 struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
97 struct mdp4_kms *mdp4_kms = get_kms(lvds_pll);
98
99 DBG("");
100
101 mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, 0x0);
102 mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x0);
103}
104
105static unsigned long mpd4_lvds_pll_recalc_rate(struct clk_hw *hw,
106 unsigned long parent_rate)
107{
108 struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
109 return lvds_pll->pixclk;
110}
111
112static long mpd4_lvds_pll_round_rate(struct clk_hw *hw, unsigned long rate,
113 unsigned long *parent_rate)
114{
115 const struct pll_rate *pll_rate = find_rate(rate);
116 return pll_rate->rate;
117}
118
119static int mpd4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate,
120 unsigned long parent_rate)
121{
122 struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
123 lvds_pll->pixclk = rate;
124 return 0;
125}
126
127
128static const struct clk_ops mpd4_lvds_pll_ops = {
129 .enable = mpd4_lvds_pll_enable,
130 .disable = mpd4_lvds_pll_disable,
131 .recalc_rate = mpd4_lvds_pll_recalc_rate,
132 .round_rate = mpd4_lvds_pll_round_rate,
133 .set_rate = mpd4_lvds_pll_set_rate,
134};
135
136static const char *mpd4_lvds_pll_parents[] = {
137 "pxo",
138};
139
140static struct clk_init_data pll_init = {
141 .name = "mpd4_lvds_pll",
142 .ops = &mpd4_lvds_pll_ops,
143 .parent_names = mpd4_lvds_pll_parents,
144 .num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents),
145};
146
147struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
148{
149 struct mdp4_lvds_pll *lvds_pll;
150 struct clk *clk;
151 int ret;
152
153 lvds_pll = devm_kzalloc(dev->dev, sizeof(*lvds_pll), GFP_KERNEL);
154 if (!lvds_pll) {
155 ret = -ENOMEM;
156 goto fail;
157 }
158
159 lvds_pll->dev = dev;
160
161 lvds_pll->pll_hw.init = &pll_init;
162 clk = devm_clk_register(dev->dev, &lvds_pll->pll_hw);
163 if (IS_ERR(clk)) {
164 ret = PTR_ERR(clk);
165 goto fail;
166 }
167
168 return clk;
169
170fail:
171 return ERR_PTR(ret);
172}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index fcf95680413d..b67ef5985125 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -280,7 +280,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
280 dev->mode_config.max_height = 2048; 280 dev->mode_config.max_height = 2048;
281 dev->mode_config.funcs = &mode_config_funcs; 281 dev->mode_config.funcs = &mode_config_funcs;
282 282
283 ret = drm_vblank_init(dev, 1); 283 ret = drm_vblank_init(dev, priv->num_crtcs);
284 if (ret < 0) { 284 if (ret < 0) {
285 dev_err(dev->dev, "failed to initialize vblank\n"); 285 dev_err(dev->dev, "failed to initialize vblank\n");
286 goto fail; 286 goto fail;
@@ -315,39 +315,12 @@ static void load_gpu(struct drm_device *dev)
315{ 315{
316 static DEFINE_MUTEX(init_lock); 316 static DEFINE_MUTEX(init_lock);
317 struct msm_drm_private *priv = dev->dev_private; 317 struct msm_drm_private *priv = dev->dev_private;
318 struct msm_gpu *gpu;
319 318
320 mutex_lock(&init_lock); 319 mutex_lock(&init_lock);
321 320
322 if (priv->gpu) 321 if (!priv->gpu)
323 goto out; 322 priv->gpu = adreno_load_gpu(dev);
324
325 gpu = a3xx_gpu_init(dev);
326 if (IS_ERR(gpu)) {
327 dev_warn(dev->dev, "failed to load a3xx gpu\n");
328 gpu = NULL;
329 /* not fatal */
330 }
331
332 if (gpu) {
333 int ret;
334 mutex_lock(&dev->struct_mutex);
335 gpu->funcs->pm_resume(gpu);
336 mutex_unlock(&dev->struct_mutex);
337 ret = gpu->funcs->hw_init(gpu);
338 if (ret) {
339 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
340 gpu->funcs->destroy(gpu);
341 gpu = NULL;
342 } else {
343 /* give inactive pm a chance to kick in: */
344 msm_gpu_retire(gpu);
345 }
346 }
347
348 priv->gpu = gpu;
349 323
350out:
351 mutex_unlock(&init_lock); 324 mutex_unlock(&init_lock);
352} 325}
353 326
@@ -836,6 +809,7 @@ static struct drm_driver msm_driver = {
836 .open = msm_open, 809 .open = msm_open,
837 .preclose = msm_preclose, 810 .preclose = msm_preclose,
838 .lastclose = msm_lastclose, 811 .lastclose = msm_lastclose,
812 .set_busid = drm_platform_set_busid,
839 .irq_handler = msm_irq, 813 .irq_handler = msm_irq,
840 .irq_preinstall = msm_irq_preinstall, 814 .irq_preinstall = msm_irq_preinstall,
841 .irq_postinstall = msm_irq_postinstall, 815 .irq_postinstall = msm_irq_postinstall,
@@ -1025,7 +999,7 @@ static int __init msm_drm_register(void)
1025{ 999{
1026 DBG("init"); 1000 DBG("init");
1027 hdmi_register(); 1001 hdmi_register();
1028 a3xx_register(); 1002 adreno_register();
1029 return platform_driver_register(&msm_platform_driver); 1003 return platform_driver_register(&msm_platform_driver);
1030} 1004}
1031 1005
@@ -1034,7 +1008,7 @@ static void __exit msm_drm_unregister(void)
1034 DBG("fini"); 1008 DBG("fini");
1035 platform_driver_unregister(&msm_platform_driver); 1009 platform_driver_unregister(&msm_platform_driver);
1036 hdmi_unregister(); 1010 hdmi_unregister();
1037 a3xx_unregister(); 1011 adreno_unregister();
1038} 1012}
1039 1013
1040module_init(msm_drm_register); 1014module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 8a2c5fd0893e..67f9d0a2332c 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -51,6 +51,7 @@ static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
51#include <drm/drm_crtc_helper.h> 51#include <drm/drm_crtc_helper.h>
52#include <drm/drm_fb_helper.h> 52#include <drm/drm_fb_helper.h>
53#include <drm/msm_drm.h> 53#include <drm/msm_drm.h>
54#include <drm/drm_gem.h>
54 55
55struct msm_kms; 56struct msm_kms;
56struct msm_gpu; 57struct msm_gpu;
@@ -170,7 +171,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
170void *msm_gem_prime_vmap(struct drm_gem_object *obj); 171void *msm_gem_prime_vmap(struct drm_gem_object *obj);
171void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 172void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
172struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, 173struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
173 size_t size, struct sg_table *sg); 174 struct dma_buf_attachment *attach, struct sg_table *sg);
174int msm_gem_prime_pin(struct drm_gem_object *obj); 175int msm_gem_prime_pin(struct drm_gem_object *obj);
175void msm_gem_prime_unpin(struct drm_gem_object *obj); 176void msm_gem_prime_unpin(struct drm_gem_object *obj);
176void *msm_gem_vaddr_locked(struct drm_gem_object *obj); 177void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index d48f9fc5129b..ad772fe36115 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -18,6 +18,7 @@
18#include "msm_drv.h" 18#include "msm_drv.h"
19#include "msm_gem.h" 19#include "msm_gem.h"
20 20
21#include <linux/dma-buf.h>
21 22
22struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) 23struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
23{ 24{
@@ -37,9 +38,9 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
37} 38}
38 39
39struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, 40struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
40 size_t size, struct sg_table *sg) 41 struct dma_buf_attachment *attach, struct sg_table *sg)
41{ 42{
42 return msm_gem_import(dev, size, sg); 43 return msm_gem_import(dev, attach->dmabuf->size, sg);
43} 44}
44 45
45int msm_gem_prime_pin(struct drm_gem_object *obj) 46int msm_gem_prime_pin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 9b579b792840..fd1e4b4a6d40 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -166,8 +166,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
166 const char *name, const char *ioname, const char *irqname, int ringsz); 166 const char *name, const char *ioname, const char *irqname, int ringsz);
167void msm_gpu_cleanup(struct msm_gpu *gpu); 167void msm_gpu_cleanup(struct msm_gpu *gpu);
168 168
169struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); 169struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
170void __init a3xx_register(void); 170void __init adreno_register(void);
171void __exit a3xx_unregister(void); 171void __exit adreno_unregister(void);
172 172
173#endif /* __MSM_GPU_H__ */ 173#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index f5d7f7ce4bc6..12c24c8abf7f 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -38,6 +38,7 @@ nouveau-y += core/subdev/bios/dcb.o
38nouveau-y += core/subdev/bios/disp.o 38nouveau-y += core/subdev/bios/disp.o
39nouveau-y += core/subdev/bios/dp.o 39nouveau-y += core/subdev/bios/dp.o
40nouveau-y += core/subdev/bios/extdev.o 40nouveau-y += core/subdev/bios/extdev.o
41nouveau-y += core/subdev/bios/fan.o
41nouveau-y += core/subdev/bios/gpio.o 42nouveau-y += core/subdev/bios/gpio.o
42nouveau-y += core/subdev/bios/i2c.o 43nouveau-y += core/subdev/bios/i2c.o
43nouveau-y += core/subdev/bios/init.o 44nouveau-y += core/subdev/bios/init.o
@@ -51,6 +52,8 @@ nouveau-y += core/subdev/bios/therm.o
51nouveau-y += core/subdev/bios/vmap.o 52nouveau-y += core/subdev/bios/vmap.o
52nouveau-y += core/subdev/bios/volt.o 53nouveau-y += core/subdev/bios/volt.o
53nouveau-y += core/subdev/bios/xpio.o 54nouveau-y += core/subdev/bios/xpio.o
55nouveau-y += core/subdev/bios/M0205.o
56nouveau-y += core/subdev/bios/M0209.o
54nouveau-y += core/subdev/bios/P0260.o 57nouveau-y += core/subdev/bios/P0260.o
55nouveau-y += core/subdev/bus/hwsq.o 58nouveau-y += core/subdev/bus/hwsq.o
56nouveau-y += core/subdev/bus/nv04.o 59nouveau-y += core/subdev/bus/nv04.o
@@ -124,12 +127,17 @@ nouveau-y += core/subdev/fb/ramnvc0.o
124nouveau-y += core/subdev/fb/ramnve0.o 127nouveau-y += core/subdev/fb/ramnve0.o
125nouveau-y += core/subdev/fb/ramgk20a.o 128nouveau-y += core/subdev/fb/ramgk20a.o
126nouveau-y += core/subdev/fb/ramgm107.o 129nouveau-y += core/subdev/fb/ramgm107.o
130nouveau-y += core/subdev/fb/sddr2.o
127nouveau-y += core/subdev/fb/sddr3.o 131nouveau-y += core/subdev/fb/sddr3.o
128nouveau-y += core/subdev/fb/gddr5.o 132nouveau-y += core/subdev/fb/gddr5.o
133nouveau-y += core/subdev/fuse/base.o
134nouveau-y += core/subdev/fuse/g80.o
135nouveau-y += core/subdev/fuse/gf100.o
136nouveau-y += core/subdev/fuse/gm107.o
129nouveau-y += core/subdev/gpio/base.o 137nouveau-y += core/subdev/gpio/base.o
130nouveau-y += core/subdev/gpio/nv10.o 138nouveau-y += core/subdev/gpio/nv10.o
131nouveau-y += core/subdev/gpio/nv50.o 139nouveau-y += core/subdev/gpio/nv50.o
132nouveau-y += core/subdev/gpio/nv92.o 140nouveau-y += core/subdev/gpio/nv94.o
133nouveau-y += core/subdev/gpio/nvd0.o 141nouveau-y += core/subdev/gpio/nvd0.o
134nouveau-y += core/subdev/gpio/nve0.o 142nouveau-y += core/subdev/gpio/nve0.o
135nouveau-y += core/subdev/i2c/base.o 143nouveau-y += core/subdev/i2c/base.o
@@ -190,6 +198,7 @@ nouveau-y += core/subdev/therm/nv50.o
190nouveau-y += core/subdev/therm/nv84.o 198nouveau-y += core/subdev/therm/nv84.o
191nouveau-y += core/subdev/therm/nva3.o 199nouveau-y += core/subdev/therm/nva3.o
192nouveau-y += core/subdev/therm/nvd0.o 200nouveau-y += core/subdev/therm/nvd0.o
201nouveau-y += core/subdev/therm/gm107.o
193nouveau-y += core/subdev/timer/base.o 202nouveau-y += core/subdev/timer/base.o
194nouveau-y += core/subdev/timer/nv04.o 203nouveau-y += core/subdev/timer/nv04.o
195nouveau-y += core/subdev/timer/gk20a.o 204nouveau-y += core/subdev/timer/gk20a.o
@@ -252,6 +261,7 @@ nouveau-y += core/engine/disp/hdanvd0.o
252nouveau-y += core/engine/disp/hdminv84.o 261nouveau-y += core/engine/disp/hdminv84.o
253nouveau-y += core/engine/disp/hdminva3.o 262nouveau-y += core/engine/disp/hdminva3.o
254nouveau-y += core/engine/disp/hdminvd0.o 263nouveau-y += core/engine/disp/hdminvd0.o
264nouveau-y += core/engine/disp/hdminve0.o
255nouveau-y += core/engine/disp/piornv50.o 265nouveau-y += core/engine/disp/piornv50.o
256nouveau-y += core/engine/disp/sornv50.o 266nouveau-y += core/engine/disp/sornv50.o
257nouveau-y += core/engine/disp/sornv94.o 267nouveau-y += core/engine/disp/sornv94.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index 68bf06768123..e962433294c3 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -91,9 +91,10 @@ nvkm_client_notify_del(struct nouveau_client *client, int index)
91} 91}
92 92
93int 93int
94nvkm_client_notify_new(struct nouveau_client *client, 94nvkm_client_notify_new(struct nouveau_object *object,
95 struct nvkm_event *event, void *data, u32 size) 95 struct nvkm_event *event, void *data, u32 size)
96{ 96{
97 struct nouveau_client *client = nouveau_client(object);
97 struct nvkm_client_notify *notify; 98 struct nvkm_client_notify *notify;
98 union { 99 union {
99 struct nvif_notify_req_v0 v0; 100 struct nvif_notify_req_v0 v0;
@@ -127,8 +128,8 @@ nvkm_client_notify_new(struct nouveau_client *client,
127 } 128 }
128 129
129 if (ret == 0) { 130 if (ret == 0) {
130 ret = nvkm_notify_init(event, nvkm_client_notify, false, 131 ret = nvkm_notify_init(object, event, nvkm_client_notify,
131 data, size, reply, &notify->n); 132 false, data, size, reply, &notify->n);
132 if (ret == 0) { 133 if (ret == 0) {
133 client->notify[index] = notify; 134 client->notify[index] = notify;
134 notify->client = client; 135 notify->client = client;
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index 0540a48c5678..ff2b434b3db4 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -20,7 +20,7 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23#include <core/os.h> 23#include <core/object.h>
24#include <core/event.h> 24#include <core/event.h>
25 25
26void 26void
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
index 560b2214cf1c..daee87702502 100644
--- a/drivers/gpu/drm/nouveau/core/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -115,7 +115,7 @@ nouveau_gpuobj_create_(struct nouveau_object *parent,
115 gpuobj->size = size; 115 gpuobj->size = size;
116 116
117 if (heap) { 117 if (heap) {
118 ret = nouveau_mm_head(heap, 1, size, size, 118 ret = nouveau_mm_head(heap, 0, 1, size, size,
119 max(align, (u32)1), &gpuobj->node); 119 max(align, (u32)1), &gpuobj->node);
120 if (ret) 120 if (ret)
121 return ret; 121 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/core/ioctl.c b/drivers/gpu/drm/nouveau/core/core/ioctl.c
index f7e19bfb489c..692aa92dd850 100644
--- a/drivers/gpu/drm/nouveau/core/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/core/core/ioctl.c
@@ -349,7 +349,6 @@ nvkm_ioctl_unmap(struct nouveau_handle *handle, void *data, u32 size)
349static int 349static int
350nvkm_ioctl_ntfy_new(struct nouveau_handle *handle, void *data, u32 size) 350nvkm_ioctl_ntfy_new(struct nouveau_handle *handle, void *data, u32 size)
351{ 351{
352 struct nouveau_client *client = nouveau_client(handle->object);
353 struct nouveau_object *object = handle->object; 352 struct nouveau_object *object = handle->object;
354 struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; 353 struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
355 union { 354 union {
@@ -365,7 +364,7 @@ nvkm_ioctl_ntfy_new(struct nouveau_handle *handle, void *data, u32 size)
365 if (ret = -ENODEV, ofuncs->ntfy) 364 if (ret = -ENODEV, ofuncs->ntfy)
366 ret = ofuncs->ntfy(object, args->v0.event, &event); 365 ret = ofuncs->ntfy(object, args->v0.event, &event);
367 if (ret == 0) { 366 if (ret == 0) {
368 ret = nvkm_client_notify_new(client, event, data, size); 367 ret = nvkm_client_notify_new(object, event, data, size);
369 if (ret >= 0) { 368 if (ret >= 0) {
370 args->v0.index = ret; 369 args->v0.index = ret;
371 ret = 0; 370 ret = 0;
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index 7a4e0891c5f8..b4f5db66d5b5 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -28,6 +28,24 @@
28#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ 28#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
29 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) 29 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
30 30
31static void
32nouveau_mm_dump(struct nouveau_mm *mm, const char *header)
33{
34 struct nouveau_mm_node *node;
35
36 printk(KERN_ERR "nouveau: %s\n", header);
37 printk(KERN_ERR "nouveau: node list:\n");
38 list_for_each_entry(node, &mm->nodes, nl_entry) {
39 printk(KERN_ERR "nouveau: \t%08x %08x %d\n",
40 node->offset, node->length, node->type);
41 }
42 printk(KERN_ERR "nouveau: free list:\n");
43 list_for_each_entry(node, &mm->free, fl_entry) {
44 printk(KERN_ERR "nouveau: \t%08x %08x %d\n",
45 node->offset, node->length, node->type);
46 }
47}
48
31void 49void
32nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis) 50nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis)
33{ 51{
@@ -37,29 +55,29 @@ nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis)
37 struct nouveau_mm_node *prev = node(this, prev); 55 struct nouveau_mm_node *prev = node(this, prev);
38 struct nouveau_mm_node *next = node(this, next); 56 struct nouveau_mm_node *next = node(this, next);
39 57
40 if (prev && prev->type == 0) { 58 if (prev && prev->type == NVKM_MM_TYPE_NONE) {
41 prev->length += this->length; 59 prev->length += this->length;
42 list_del(&this->nl_entry); 60 list_del(&this->nl_entry);
43 kfree(this); this = prev; 61 kfree(this); this = prev;
44 } 62 }
45 63
46 if (next && next->type == 0) { 64 if (next && next->type == NVKM_MM_TYPE_NONE) {
47 next->offset = this->offset; 65 next->offset = this->offset;
48 next->length += this->length; 66 next->length += this->length;
49 if (this->type == 0) 67 if (this->type == NVKM_MM_TYPE_NONE)
50 list_del(&this->fl_entry); 68 list_del(&this->fl_entry);
51 list_del(&this->nl_entry); 69 list_del(&this->nl_entry);
52 kfree(this); this = NULL; 70 kfree(this); this = NULL;
53 } 71 }
54 72
55 if (this && this->type != 0) { 73 if (this && this->type != NVKM_MM_TYPE_NONE) {
56 list_for_each_entry(prev, &mm->free, fl_entry) { 74 list_for_each_entry(prev, &mm->free, fl_entry) {
57 if (this->offset < prev->offset) 75 if (this->offset < prev->offset)
58 break; 76 break;
59 } 77 }
60 78
61 list_add_tail(&this->fl_entry, &prev->fl_entry); 79 list_add_tail(&this->fl_entry, &prev->fl_entry);
62 this->type = 0; 80 this->type = NVKM_MM_TYPE_NONE;
63 } 81 }
64 } 82 }
65 83
@@ -80,27 +98,32 @@ region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
80 98
81 b->offset = a->offset; 99 b->offset = a->offset;
82 b->length = size; 100 b->length = size;
101 b->heap = a->heap;
83 b->type = a->type; 102 b->type = a->type;
84 a->offset += size; 103 a->offset += size;
85 a->length -= size; 104 a->length -= size;
86 list_add_tail(&b->nl_entry, &a->nl_entry); 105 list_add_tail(&b->nl_entry, &a->nl_entry);
87 if (b->type == 0) 106 if (b->type == NVKM_MM_TYPE_NONE)
88 list_add_tail(&b->fl_entry, &a->fl_entry); 107 list_add_tail(&b->fl_entry, &a->fl_entry);
89 return b; 108 return b;
90} 109}
91 110
92int 111int
93nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, 112nouveau_mm_head(struct nouveau_mm *mm, u8 heap, u8 type, u32 size_max,
94 u32 align, struct nouveau_mm_node **pnode) 113 u32 size_min, u32 align, struct nouveau_mm_node **pnode)
95{ 114{
96 struct nouveau_mm_node *prev, *this, *next; 115 struct nouveau_mm_node *prev, *this, *next;
97 u32 mask = align - 1; 116 u32 mask = align - 1;
98 u32 splitoff; 117 u32 splitoff;
99 u32 s, e; 118 u32 s, e;
100 119
101 BUG_ON(!type); 120 BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
102 121
103 list_for_each_entry(this, &mm->free, fl_entry) { 122 list_for_each_entry(this, &mm->free, fl_entry) {
123 if (unlikely(heap != NVKM_MM_HEAP_ANY)) {
124 if (this->heap != heap)
125 continue;
126 }
104 e = this->offset + this->length; 127 e = this->offset + this->length;
105 s = this->offset; 128 s = this->offset;
106 129
@@ -149,27 +172,32 @@ region_tail(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
149 a->length -= size; 172 a->length -= size;
150 b->offset = a->offset + a->length; 173 b->offset = a->offset + a->length;
151 b->length = size; 174 b->length = size;
175 b->heap = a->heap;
152 b->type = a->type; 176 b->type = a->type;
153 177
154 list_add(&b->nl_entry, &a->nl_entry); 178 list_add(&b->nl_entry, &a->nl_entry);
155 if (b->type == 0) 179 if (b->type == NVKM_MM_TYPE_NONE)
156 list_add(&b->fl_entry, &a->fl_entry); 180 list_add(&b->fl_entry, &a->fl_entry);
157 return b; 181 return b;
158} 182}
159 183
160int 184int
161nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, 185nouveau_mm_tail(struct nouveau_mm *mm, u8 heap, u8 type, u32 size_max,
162 u32 align, struct nouveau_mm_node **pnode) 186 u32 size_min, u32 align, struct nouveau_mm_node **pnode)
163{ 187{
164 struct nouveau_mm_node *prev, *this, *next; 188 struct nouveau_mm_node *prev, *this, *next;
165 u32 mask = align - 1; 189 u32 mask = align - 1;
166 190
167 BUG_ON(!type); 191 BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
168 192
169 list_for_each_entry_reverse(this, &mm->free, fl_entry) { 193 list_for_each_entry_reverse(this, &mm->free, fl_entry) {
170 u32 e = this->offset + this->length; 194 u32 e = this->offset + this->length;
171 u32 s = this->offset; 195 u32 s = this->offset;
172 u32 c = 0, a; 196 u32 c = 0, a;
197 if (unlikely(heap != NVKM_MM_HEAP_ANY)) {
198 if (this->heap != heap)
199 continue;
200 }
173 201
174 prev = node(this, prev); 202 prev = node(this, prev);
175 if (prev && prev->type != type) 203 if (prev && prev->type != type)
@@ -209,9 +237,23 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
209int 237int
210nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block) 238nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
211{ 239{
212 struct nouveau_mm_node *node; 240 struct nouveau_mm_node *node, *prev;
241 u32 next;
213 242
214 if (block) { 243 if (nouveau_mm_initialised(mm)) {
244 prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry);
245 next = prev->offset + prev->length;
246 if (next != offset) {
247 BUG_ON(next > offset);
248 if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
249 return -ENOMEM;
250 node->type = NVKM_MM_TYPE_HOLE;
251 node->offset = next;
252 node->length = offset - next;
253 list_add_tail(&node->nl_entry, &mm->nodes);
254 }
255 BUG_ON(block != mm->block_size);
256 } else {
215 INIT_LIST_HEAD(&mm->nodes); 257 INIT_LIST_HEAD(&mm->nodes);
216 INIT_LIST_HEAD(&mm->free); 258 INIT_LIST_HEAD(&mm->free);
217 mm->block_size = block; 259 mm->block_size = block;
@@ -230,25 +272,32 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
230 272
231 list_add_tail(&node->nl_entry, &mm->nodes); 273 list_add_tail(&node->nl_entry, &mm->nodes);
232 list_add_tail(&node->fl_entry, &mm->free); 274 list_add_tail(&node->fl_entry, &mm->free);
233 mm->heap_nodes++; 275 node->heap = ++mm->heap_nodes;
234 return 0; 276 return 0;
235} 277}
236 278
237int 279int
238nouveau_mm_fini(struct nouveau_mm *mm) 280nouveau_mm_fini(struct nouveau_mm *mm)
239{ 281{
240 if (nouveau_mm_initialised(mm)) { 282 struct nouveau_mm_node *node, *temp;
241 struct nouveau_mm_node *node, *heap = 283 int nodes = 0;
242 list_first_entry(&mm->nodes, typeof(*heap), nl_entry);
243 int nodes = 0;
244 284
245 list_for_each_entry(node, &mm->nodes, nl_entry) { 285 if (!nouveau_mm_initialised(mm))
246 if (WARN_ON(nodes++ == mm->heap_nodes)) 286 return 0;
287
288 list_for_each_entry(node, &mm->nodes, nl_entry) {
289 if (node->type != NVKM_MM_TYPE_HOLE) {
290 if (++nodes > mm->heap_nodes) {
291 nouveau_mm_dump(mm, "mm not clean!");
247 return -EBUSY; 292 return -EBUSY;
293 }
248 } 294 }
249
250 kfree(heap);
251 } 295 }
252 296
297 list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry) {
298 list_del(&node->nl_entry);
299 kfree(node);
300 }
301 mm->heap_nodes = 0;
253 return 0; 302 return 0;
254} 303}
diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c
index 76adb81bdea2..d1bcde55e9d7 100644
--- a/drivers/gpu/drm/nouveau/core/core/notify.c
+++ b/drivers/gpu/drm/nouveau/core/core/notify.c
@@ -134,14 +134,15 @@ nvkm_notify_fini(struct nvkm_notify *notify)
134} 134}
135 135
136int 136int
137nvkm_notify_init(struct nvkm_event *event, int (*func)(struct nvkm_notify *), 137nvkm_notify_init(struct nouveau_object *object, struct nvkm_event *event,
138 bool work, void *data, u32 size, u32 reply, 138 int (*func)(struct nvkm_notify *), bool work,
139 void *data, u32 size, u32 reply,
139 struct nvkm_notify *notify) 140 struct nvkm_notify *notify)
140{ 141{
141 unsigned long flags; 142 unsigned long flags;
142 int ret = -ENODEV; 143 int ret = -ENODEV;
143 if ((notify->event = event), event->refs) { 144 if ((notify->event = event), event->refs) {
144 ret = event->func->ctor(data, size, notify); 145 ret = event->func->ctor(object, data, size, notify);
145 if (ret == 0 && (ret = -EINVAL, notify->size == reply)) { 146 if (ret == 0 && (ret = -EINVAL, notify->size == reply)) {
146 notify->flags = 0; 147 notify->flags = 0;
147 notify->block = 1; 148 notify->block = 1;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 8928f7981d4a..0ef5a5713182 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -505,7 +505,8 @@ nouveau_device_sclass[] = {
505}; 505};
506 506
507static int 507static int
508nouveau_device_event_ctor(void *data, u32 size, struct nvkm_notify *notify) 508nouveau_device_event_ctor(struct nouveau_object *object, void *data, u32 size,
509 struct nvkm_notify *notify)
509{ 510{
510 if (!WARN_ON(size != 0)) { 511 if (!WARN_ON(size != 0)) {
511 notify->size = 0; 512 notify->size = 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
index 377ec0b8851e..6295668e29a5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
@@ -26,6 +26,7 @@
26#include <subdev/bus.h> 26#include <subdev/bus.h>
27#include <subdev/gpio.h> 27#include <subdev/gpio.h>
28#include <subdev/i2c.h> 28#include <subdev/i2c.h>
29#include <subdev/fuse.h>
29#include <subdev/clock.h> 30#include <subdev/clock.h>
30#include <subdev/therm.h> 31#include <subdev/therm.h>
31#include <subdev/mxm.h> 32#include <subdev/mxm.h>
@@ -62,10 +63,9 @@ gm100_identify(struct nouveau_device *device)
62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 63 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
63 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass; 64 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
64 device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass; 65 device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
66 device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
65 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 67 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
66#if 0 68 device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
67 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
68#endif
69 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 69 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
70 device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass; 70 device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass;
71 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; 71 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
@@ -77,8 +77,9 @@ gm100_identify(struct nouveau_device *device)
77 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 77 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
78 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 78 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
79 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 79 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
80#if 0
81 device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass; 80 device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
81
82#if 0
82 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 83 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
83#endif 84#endif
84 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; 85 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index 932f84fae459..96f568d1321b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -26,6 +26,7 @@
26#include <subdev/bus.h> 26#include <subdev/bus.h>
27#include <subdev/gpio.h> 27#include <subdev/gpio.h>
28#include <subdev/i2c.h> 28#include <subdev/i2c.h>
29#include <subdev/fuse.h>
29#include <subdev/clock.h> 30#include <subdev/clock.h>
30#include <subdev/therm.h> 31#include <subdev/therm.h>
31#include <subdev/mxm.h> 32#include <subdev/mxm.h>
@@ -62,6 +63,7 @@ nv50_identify(struct nouveau_device *device)
62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 63 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
63 device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass; 64 device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
64 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass; 65 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
66 device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
65 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass; 67 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass;
66 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 68 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 69 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -87,6 +89,7 @@ nv50_identify(struct nouveau_device *device)
87 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 89 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
88 device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass; 90 device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
89 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass; 91 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
92 device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
90 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 93 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
91 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 94 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
92 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 95 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -115,6 +118,7 @@ nv50_identify(struct nouveau_device *device)
115 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 118 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
116 device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass; 119 device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
117 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass; 120 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
121 device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
118 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 122 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
119 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 123 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
120 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 124 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -141,8 +145,9 @@ nv50_identify(struct nouveau_device *device)
141 case 0x92: 145 case 0x92:
142 device->cname = "G92"; 146 device->cname = "G92";
143 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 147 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
144 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 148 device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
145 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass; 149 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
150 device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
146 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 151 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
147 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 152 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
148 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 153 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -169,8 +174,9 @@ nv50_identify(struct nouveau_device *device)
169 case 0x94: 174 case 0x94:
170 device->cname = "G94"; 175 device->cname = "G94";
171 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 176 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
172 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 177 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
173 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 178 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
179 device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
174 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 180 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
175 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 181 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
176 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 182 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -197,8 +203,9 @@ nv50_identify(struct nouveau_device *device)
197 case 0x96: 203 case 0x96:
198 device->cname = "G96"; 204 device->cname = "G96";
199 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 205 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
200 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 206 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
201 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 207 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
208 device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
202 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 209 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
203 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 210 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
204 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 211 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -225,8 +232,9 @@ nv50_identify(struct nouveau_device *device)
225 case 0x98: 232 case 0x98:
226 device->cname = "G98"; 233 device->cname = "G98";
227 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 234 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
228 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 235 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
229 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 236 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
237 device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
230 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 238 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
231 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 239 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
232 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 240 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -253,8 +261,9 @@ nv50_identify(struct nouveau_device *device)
253 case 0xa0: 261 case 0xa0:
254 device->cname = "G200"; 262 device->cname = "G200";
255 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 263 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
256 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 264 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
257 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass; 265 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
266 device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
258 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 267 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
259 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 268 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
260 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 269 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -281,8 +290,9 @@ nv50_identify(struct nouveau_device *device)
281 case 0xaa: 290 case 0xaa:
282 device->cname = "MCP77/MCP78"; 291 device->cname = "MCP77/MCP78";
283 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 292 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
284 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 293 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
285 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 294 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
295 device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
286 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass; 296 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
287 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 297 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
288 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 298 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -309,8 +319,9 @@ nv50_identify(struct nouveau_device *device)
309 case 0xac: 319 case 0xac:
310 device->cname = "MCP79/MCP7A"; 320 device->cname = "MCP79/MCP7A";
311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 321 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
312 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 322 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
313 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 323 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
324 device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
314 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass; 325 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 326 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
316 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 327 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -337,8 +348,9 @@ nv50_identify(struct nouveau_device *device)
337 case 0xa3: 348 case 0xa3:
338 device->cname = "GT215"; 349 device->cname = "GT215";
339 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 350 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
340 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 351 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
341 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 352 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
353 device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
342 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; 354 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
343 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 355 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
344 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 356 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -367,8 +379,9 @@ nv50_identify(struct nouveau_device *device)
367 case 0xa5: 379 case 0xa5:
368 device->cname = "GT216"; 380 device->cname = "GT216";
369 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 381 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
370 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 382 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
371 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 383 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
384 device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
372 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; 385 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
373 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 386 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
374 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 387 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -396,8 +409,9 @@ nv50_identify(struct nouveau_device *device)
396 case 0xa8: 409 case 0xa8:
397 device->cname = "GT218"; 410 device->cname = "GT218";
398 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 411 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
399 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 412 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
400 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 413 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
414 device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
401 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; 415 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
402 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 416 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
403 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 417 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -425,8 +439,9 @@ nv50_identify(struct nouveau_device *device)
425 case 0xaf: 439 case 0xaf:
426 device->cname = "MCP89"; 440 device->cname = "MCP89";
427 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 441 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
428 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 442 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
429 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 443 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
444 device->oclass[NVDEV_SUBDEV_FUSE ] = &g80_fuse_oclass;
430 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; 445 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
431 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 446 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
432 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 447 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index b4a2917ce555..cd05677ad4b7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -26,6 +26,7 @@
26#include <subdev/bus.h> 26#include <subdev/bus.h>
27#include <subdev/gpio.h> 27#include <subdev/gpio.h>
28#include <subdev/i2c.h> 28#include <subdev/i2c.h>
29#include <subdev/fuse.h>
29#include <subdev/clock.h> 30#include <subdev/clock.h>
30#include <subdev/therm.h> 31#include <subdev/therm.h>
31#include <subdev/mxm.h> 32#include <subdev/mxm.h>
@@ -60,8 +61,9 @@ nvc0_identify(struct nouveau_device *device)
60 case 0xc0: 61 case 0xc0:
61 device->cname = "GF100"; 62 device->cname = "GF100";
62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 63 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
63 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 64 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
64 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 65 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
66 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
65 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 67 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
66 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 68 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 69 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -92,8 +94,9 @@ nvc0_identify(struct nouveau_device *device)
92 case 0xc4: 94 case 0xc4:
93 device->cname = "GF104"; 95 device->cname = "GF104";
94 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 96 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
95 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 97 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
96 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 98 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
99 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
97 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 100 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
98 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 101 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
99 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 102 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -124,8 +127,9 @@ nvc0_identify(struct nouveau_device *device)
124 case 0xc3: 127 case 0xc3:
125 device->cname = "GF106"; 128 device->cname = "GF106";
126 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 129 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
127 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 130 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
128 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 131 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
132 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
129 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 133 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
130 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 134 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
131 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 135 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -155,8 +159,9 @@ nvc0_identify(struct nouveau_device *device)
155 case 0xce: 159 case 0xce:
156 device->cname = "GF114"; 160 device->cname = "GF114";
157 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 161 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
158 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 162 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
159 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 163 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
164 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
160 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 165 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 166 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 167 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -187,8 +192,9 @@ nvc0_identify(struct nouveau_device *device)
187 case 0xcf: 192 case 0xcf:
188 device->cname = "GF116"; 193 device->cname = "GF116";
189 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 194 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
190 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 195 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
191 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 196 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
197 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
192 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 198 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
193 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 199 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
194 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 200 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -219,8 +225,9 @@ nvc0_identify(struct nouveau_device *device)
219 case 0xc1: 225 case 0xc1:
220 device->cname = "GF108"; 226 device->cname = "GF108";
221 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 227 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
222 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 228 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
223 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 229 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
230 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
224 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 231 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
225 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 232 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
226 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 233 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -250,8 +257,9 @@ nvc0_identify(struct nouveau_device *device)
250 case 0xc8: 257 case 0xc8:
251 device->cname = "GF110"; 258 device->cname = "GF110";
252 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 259 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
253 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass; 260 device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
254 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass; 261 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
262 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
255 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 263 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
256 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 264 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
257 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 265 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -284,6 +292,7 @@ nvc0_identify(struct nouveau_device *device)
284 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 292 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
285 device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass; 293 device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass;
286 device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass; 294 device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
295 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
287 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 296 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
288 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 297 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
289 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 298 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -315,6 +324,7 @@ nvc0_identify(struct nouveau_device *device)
315 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 324 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
316 device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass; 325 device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass;
317 device->oclass[NVDEV_SUBDEV_I2C ] = gf117_i2c_oclass; 326 device->oclass[NVDEV_SUBDEV_I2C ] = gf117_i2c_oclass;
327 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
318 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 328 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
319 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 329 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
320 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 330 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index cdf9147f32a1..b1b2e484ecfa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -26,6 +26,7 @@
26#include <subdev/bus.h> 26#include <subdev/bus.h>
27#include <subdev/gpio.h> 27#include <subdev/gpio.h>
28#include <subdev/i2c.h> 28#include <subdev/i2c.h>
29#include <subdev/fuse.h>
29#include <subdev/clock.h> 30#include <subdev/clock.h>
30#include <subdev/therm.h> 31#include <subdev/therm.h>
31#include <subdev/mxm.h> 32#include <subdev/mxm.h>
@@ -62,6 +63,7 @@ nve0_identify(struct nouveau_device *device)
62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 63 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
63 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass; 64 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
64 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass; 65 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
66 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
65 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 67 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
66 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 68 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 69 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -95,6 +97,7 @@ nve0_identify(struct nouveau_device *device)
95 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 97 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
96 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass; 98 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
97 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass; 99 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
100 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
98 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 101 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
99 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 102 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
100 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 103 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -128,6 +131,7 @@ nve0_identify(struct nouveau_device *device)
128 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 131 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
129 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass; 132 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
130 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass; 133 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
134 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
131 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 135 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
132 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 136 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
133 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 137 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -161,6 +165,7 @@ nve0_identify(struct nouveau_device *device)
161 device->oclass[NVDEV_SUBDEV_CLOCK ] = &gk20a_clock_oclass; 165 device->oclass[NVDEV_SUBDEV_CLOCK ] = &gk20a_clock_oclass;
162 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; 166 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
163 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 167 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
168 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
164 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; 169 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
165 device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass; 170 device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass;
166 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; 171 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
@@ -180,6 +185,7 @@ nve0_identify(struct nouveau_device *device)
180 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 185 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
181 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass; 186 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
182 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass; 187 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
188 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
183 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 189 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
184 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 190 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
185 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 191 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -213,6 +219,7 @@ nve0_identify(struct nouveau_device *device)
213 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 219 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
214 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass; 220 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
215 device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass; 221 device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
222 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
216 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 223 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
217 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 224 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
218 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 225 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -246,6 +253,7 @@ nve0_identify(struct nouveau_device *device)
246 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 253 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
247 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass; 254 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
248 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass; 255 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
256 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
249 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 257 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
250 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 258 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
251 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 259 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
index 22d55f6cde50..64b84667f3a5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
@@ -32,7 +32,8 @@
32#include "conn.h" 32#include "conn.h"
33 33
34int 34int
35nouveau_disp_vblank_ctor(void *data, u32 size, struct nvkm_notify *notify) 35nouveau_disp_vblank_ctor(struct nouveau_object *object, void *data, u32 size,
36 struct nvkm_notify *notify)
36{ 37{
37 struct nouveau_disp *disp = 38 struct nouveau_disp *disp =
38 container_of(notify->event, typeof(*disp), vblank); 39 container_of(notify->event, typeof(*disp), vblank);
@@ -61,7 +62,8 @@ nouveau_disp_vblank(struct nouveau_disp *disp, int head)
61} 62}
62 63
63static int 64static int
64nouveau_disp_hpd_ctor(void *data, u32 size, struct nvkm_notify *notify) 65nouveau_disp_hpd_ctor(struct nouveau_object *object, void *data, u32 size,
66 struct nvkm_notify *notify)
65{ 67{
66 struct nouveau_disp *disp = 68 struct nouveau_disp *disp =
67 container_of(notify->event, typeof(*disp), hpd); 69 container_of(notify->event, typeof(*disp), hpd);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
index 3d1070228977..1496b567dd4a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
@@ -126,8 +126,8 @@ nvkm_connector_create_(struct nouveau_object *parent,
126 return 0; 126 return 0;
127 } 127 }
128 128
129 ret = nvkm_notify_init(&gpio->event, nvkm_connector_hpd, true, 129 ret = nvkm_notify_init(NULL, &gpio->event, nvkm_connector_hpd,
130 &(struct nvkm_gpio_ntfy_req) { 130 true, &(struct nvkm_gpio_ntfy_req) {
131 .mask = NVKM_GPIO_TOGGLED, 131 .mask = NVKM_GPIO_TOGGLED,
132 .line = func.line, 132 .line = func.line,
133 }, 133 },
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
index d54da8b5f87e..b3df3fe2dc09 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
@@ -68,6 +68,10 @@ gm107_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
68 if (ret) 68 if (ret)
69 return ret; 69 return ret;
70 70
71 ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
72 if (ret)
73 return ret;
74
71 nv_engine(priv)->sclass = gm107_disp_base_oclass; 75 nv_engine(priv)->sclass = gm107_disp_base_oclass;
72 nv_engine(priv)->cclass = &nv50_disp_cclass; 76 nv_engine(priv)->cclass = &nv50_disp_cclass;
73 nv_subdev(priv)->intr = nvd0_disp_intr; 77 nv_subdev(priv)->intr = nvd0_disp_intr;
@@ -80,7 +84,7 @@ gm107_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
80 priv->dac.sense = nv50_dac_sense; 84 priv->dac.sense = nv50_dac_sense;
81 priv->sor.power = nv50_sor_power; 85 priv->sor.power = nv50_sor_power;
82 priv->sor.hda_eld = nvd0_hda_eld; 86 priv->sor.hda_eld = nvd0_hda_eld;
83 priv->sor.hdmi = nvd0_hdmi_ctrl; 87 priv->sor.hdmi = nve0_hdmi_ctrl;
84 return 0; 88 return 0;
85} 89}
86 90
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
index 8b4e06abe533..fe9ef5894dd4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -26,6 +26,8 @@
26#include <nvif/unpack.h> 26#include <nvif/unpack.h>
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29#include <subdev/timer.h>
30
29#include "nv50.h" 31#include "nv50.h"
30 32
31int 33int
@@ -46,16 +48,21 @@ nva3_hda_eld(NV50_DISP_MTHD_V1)
46 return ret; 48 return ret;
47 49
48 if (size && args->v0.data[0]) { 50 if (size && args->v0.data[0]) {
51 if (outp->info.type == DCB_OUTPUT_DP) {
52 nv_mask(priv, 0x61c1e0 + soff, 0x8000000d, 0x80000001);
53 nv_wait(priv, 0x61c1e0 + soff, 0x80000000, 0x00000000);
54 }
49 for (i = 0; i < size; i++) 55 for (i = 0; i < size; i++)
50 nv_wr32(priv, 0x61c440 + soff, (i << 8) | args->v0.data[0]); 56 nv_wr32(priv, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
51 for (; i < 0x60; i++) 57 for (; i < 0x60; i++)
52 nv_wr32(priv, 0x61c440 + soff, (i << 8)); 58 nv_wr32(priv, 0x61c440 + soff, (i << 8));
53 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); 59 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
54 } else
55 if (size) {
56 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
57 } else { 60 } else {
58 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000); 61 if (outp->info.type == DCB_OUTPUT_DP) {
62 nv_mask(priv, 0x61c1e0 + soff, 0x80000001, 0x80000000);
63 nv_wait(priv, 0x61c1e0 + soff, 0x80000000, 0x00000000);
64 }
65 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000 | !!size);
59 } 66 }
60 67
61 return 0; 68 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
index baf558fc12fb..1d4e8432d857 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -26,10 +26,7 @@
26#include <nvif/unpack.h> 26#include <nvif/unpack.h>
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29#include <subdev/bios.h> 29#include <subdev/timer.h>
30#include <subdev/bios/dcb.h>
31#include <subdev/bios/dp.h>
32#include <subdev/bios/init.h>
33 30
34#include "nv50.h" 31#include "nv50.h"
35 32
@@ -40,6 +37,7 @@ nvd0_hda_eld(NV50_DISP_MTHD_V1)
40 struct nv50_disp_sor_hda_eld_v0 v0; 37 struct nv50_disp_sor_hda_eld_v0 v0;
41 } *args = data; 38 } *args = data;
42 const u32 soff = outp->or * 0x030; 39 const u32 soff = outp->or * 0x030;
40 const u32 hoff = head * 0x800;
43 int ret, i; 41 int ret, i;
44 42
45 nv_ioctl(object, "disp sor hda eld size %d\n", size); 43 nv_ioctl(object, "disp sor hda eld size %d\n", size);
@@ -51,16 +49,22 @@ nvd0_hda_eld(NV50_DISP_MTHD_V1)
51 return ret; 49 return ret;
52 50
53 if (size && args->v0.data[0]) { 51 if (size && args->v0.data[0]) {
52 if (outp->info.type == DCB_OUTPUT_DP) {
53 nv_mask(priv, 0x616618 + hoff, 0x8000000c, 0x80000001);
54 nv_wait(priv, 0x616618 + hoff, 0x80000000, 0x00000000);
55 }
56 nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
54 for (i = 0; i < size; i++) 57 for (i = 0; i < size; i++)
55 nv_wr32(priv, 0x10ec00 + soff, (i << 8) | args->v0.data[i]); 58 nv_wr32(priv, 0x10ec00 + soff, (i << 8) | args->v0.data[i]);
56 for (; i < 0x60; i++) 59 for (; i < 0x60; i++)
57 nv_wr32(priv, 0x10ec00 + soff, (i << 8)); 60 nv_wr32(priv, 0x10ec00 + soff, (i << 8));
58 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); 61 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
59 } else
60 if (size) {
61 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
62 } else { 62 } else {
63 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000); 63 if (outp->info.type == DCB_OUTPUT_DP) {
64 nv_mask(priv, 0x616618 + hoff, 0x80000001, 0x80000000);
65 nv_wait(priv, 0x616618 + hoff, 0x80000000, 0x00000000);
66 }
67 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000 | !!size);
64 } 68 }
65 69
66 return 0; 70 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
index 3106d295b48d..bac4fc4570f0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
@@ -75,8 +75,5 @@ nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1)
75 75
76 /* HDMI_CTRL */ 76 /* HDMI_CTRL */
77 nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl); 77 nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl);
78
79 /* NFI, audio doesn't work without it though.. */
80 nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
81 return 0; 78 return 0;
82} 79}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminve0.c
new file mode 100644
index 000000000000..528d14ec2f7f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminve0.c
@@ -0,0 +1,83 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/client.h>
26#include <nvif/unpack.h>
27#include <nvif/class.h>
28
29#include "nv50.h"
30
31int
32nve0_hdmi_ctrl(NV50_DISP_MTHD_V1)
33{
34 const u32 hoff = (head * 0x800);
35 const u32 hdmi = (head * 0x400);
36 union {
37 struct nv50_disp_sor_hdmi_pwr_v0 v0;
38 } *args = data;
39 u32 ctrl;
40 int ret;
41
42 nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
43 if (nvif_unpack(args->v0, 0, 0, false)) {
44 nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
45 "max_ac_packet %d rekey %d\n",
46 args->v0.version, args->v0.state,
47 args->v0.max_ac_packet, args->v0.rekey);
48 if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
49 return -EINVAL;
50 ctrl = 0x40000000 * !!args->v0.state;
51 ctrl |= args->v0.max_ac_packet << 16;
52 ctrl |= args->v0.rekey;
53 } else
54 return ret;
55
56 if (!(ctrl & 0x40000000)) {
57 nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
58 nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
59 nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000000);
60 return 0;
61 }
62
63 /* AVI InfoFrame */
64 nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000000);
65 nv_wr32(priv, 0x690008 + hdmi, 0x000d0282);
66 nv_wr32(priv, 0x69000c + hdmi, 0x0000006f);
67 nv_wr32(priv, 0x690010 + hdmi, 0x00000000);
68 nv_wr32(priv, 0x690014 + hdmi, 0x00000000);
69 nv_wr32(priv, 0x690018 + hdmi, 0x00000000);
70 nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000001);
71
72 /* ??? InfoFrame? */
73 nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
74 nv_wr32(priv, 0x6900cc + hdmi, 0x00000010);
75 nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000001);
76
77 /* ??? */
78 nv_wr32(priv, 0x690080 + hdmi, 0x82000000);
79
80 /* HDMI_CTRL */
81 nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl);
82 return 0;
83}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index f8cbb512132f..2df3a937037d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -29,6 +29,7 @@
29#include <core/enum.h> 29#include <core/enum.h>
30#include <nvif/unpack.h> 30#include <nvif/unpack.h>
31#include <nvif/class.h> 31#include <nvif/class.h>
32#include <nvif/event.h>
32 33
33#include <subdev/bios.h> 34#include <subdev/bios.h>
34#include <subdev/bios/dcb.h> 35#include <subdev/bios/dcb.h>
@@ -82,6 +83,71 @@ nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
82 nouveau_namedb_destroy(&chan->base); 83 nouveau_namedb_destroy(&chan->base);
83} 84}
84 85
86static void
87nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
88{
89 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
90 nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000000 << index);
91}
92
93static void
94nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
95{
96 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
97 nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000001 << index);
98}
99
100void
101nv50_disp_chan_uevent_send(struct nv50_disp_priv *priv, int chid)
102{
103 struct nvif_notify_uevent_rep {
104 } rep;
105
106 nvkm_event_send(&priv->uevent, 1, chid, &rep, sizeof(rep));
107}
108
109int
110nv50_disp_chan_uevent_ctor(struct nouveau_object *object, void *data, u32 size,
111 struct nvkm_notify *notify)
112{
113 struct nv50_disp_dmac *dmac = (void *)object;
114 union {
115 struct nvif_notify_uevent_req none;
116 } *args = data;
117 int ret;
118
119 if (nvif_unvers(args->none)) {
120 notify->size = sizeof(struct nvif_notify_uevent_rep);
121 notify->types = 1;
122 notify->index = dmac->base.chid;
123 return 0;
124 }
125
126 return ret;
127}
128
129const struct nvkm_event_func
130nv50_disp_chan_uevent = {
131 .ctor = nv50_disp_chan_uevent_ctor,
132 .init = nv50_disp_chan_uevent_init,
133 .fini = nv50_disp_chan_uevent_fini,
134};
135
136int
137nv50_disp_chan_ntfy(struct nouveau_object *object, u32 type,
138 struct nvkm_event **pevent)
139{
140 struct nv50_disp_priv *priv = (void *)object->engine;
141 switch (type) {
142 case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
143 *pevent = &priv->uevent;
144 return 0;
145 default:
146 break;
147 }
148 return -EINVAL;
149}
150
85int 151int
86nv50_disp_chan_map(struct nouveau_object *object, u64 *addr, u32 *size) 152nv50_disp_chan_map(struct nouveau_object *object, u64 *addr, u32 *size)
87{ 153{
@@ -195,7 +261,7 @@ nv50_disp_dmac_init(struct nouveau_object *object)
195 return ret; 261 return ret;
196 262
197 /* enable error reporting */ 263 /* enable error reporting */
198 nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid); 264 nv_mask(priv, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
199 265
200 /* initialise channel for dma command submission */ 266 /* initialise channel for dma command submission */
201 nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push); 267 nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
@@ -232,7 +298,7 @@ nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
232 return -EBUSY; 298 return -EBUSY;
233 } 299 }
234 300
235 /* disable error reporting */ 301 /* disable error reporting and completion notifications */
236 nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid); 302 nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
237 303
238 return nv50_disp_chan_fini(&dmac->base, suspend); 304 return nv50_disp_chan_fini(&dmac->base, suspend);
@@ -454,7 +520,7 @@ nv50_disp_mast_init(struct nouveau_object *object)
454 return ret; 520 return ret;
455 521
456 /* enable error reporting */ 522 /* enable error reporting */
457 nv_mask(priv, 0x610028, 0x00010001, 0x00010001); 523 nv_mask(priv, 0x610028, 0x00010000, 0x00010000);
458 524
459 /* attempt to unstick channel from some unknown state */ 525 /* attempt to unstick channel from some unknown state */
460 if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000) 526 if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
@@ -494,7 +560,7 @@ nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
494 return -EBUSY; 560 return -EBUSY;
495 } 561 }
496 562
497 /* disable error reporting */ 563 /* disable error reporting and completion notifications */
498 nv_mask(priv, 0x610028, 0x00010001, 0x00000000); 564 nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
499 565
500 return nv50_disp_chan_fini(&mast->base, suspend); 566 return nv50_disp_chan_fini(&mast->base, suspend);
@@ -507,6 +573,7 @@ nv50_disp_mast_ofuncs = {
507 .base.init = nv50_disp_mast_init, 573 .base.init = nv50_disp_mast_init,
508 .base.fini = nv50_disp_mast_fini, 574 .base.fini = nv50_disp_mast_fini,
509 .base.map = nv50_disp_chan_map, 575 .base.map = nv50_disp_chan_map,
576 .base.ntfy = nv50_disp_chan_ntfy,
510 .base.rd32 = nv50_disp_chan_rd32, 577 .base.rd32 = nv50_disp_chan_rd32,
511 .base.wr32 = nv50_disp_chan_wr32, 578 .base.wr32 = nv50_disp_chan_wr32,
512 .chid = 0, 579 .chid = 0,
@@ -607,6 +674,7 @@ nv50_disp_sync_ofuncs = {
607 .base.dtor = nv50_disp_dmac_dtor, 674 .base.dtor = nv50_disp_dmac_dtor,
608 .base.init = nv50_disp_dmac_init, 675 .base.init = nv50_disp_dmac_init,
609 .base.fini = nv50_disp_dmac_fini, 676 .base.fini = nv50_disp_dmac_fini,
677 .base.ntfy = nv50_disp_chan_ntfy,
610 .base.map = nv50_disp_chan_map, 678 .base.map = nv50_disp_chan_map,
611 .base.rd32 = nv50_disp_chan_rd32, 679 .base.rd32 = nv50_disp_chan_rd32,
612 .base.wr32 = nv50_disp_chan_wr32, 680 .base.wr32 = nv50_disp_chan_wr32,
@@ -696,6 +764,7 @@ nv50_disp_ovly_ofuncs = {
696 .base.dtor = nv50_disp_dmac_dtor, 764 .base.dtor = nv50_disp_dmac_dtor,
697 .base.init = nv50_disp_dmac_init, 765 .base.init = nv50_disp_dmac_init,
698 .base.fini = nv50_disp_dmac_fini, 766 .base.fini = nv50_disp_dmac_fini,
767 .base.ntfy = nv50_disp_chan_ntfy,
699 .base.map = nv50_disp_chan_map, 768 .base.map = nv50_disp_chan_map,
700 .base.rd32 = nv50_disp_chan_rd32, 769 .base.rd32 = nv50_disp_chan_rd32,
701 .base.wr32 = nv50_disp_chan_wr32, 770 .base.wr32 = nv50_disp_chan_wr32,
@@ -813,6 +882,7 @@ nv50_disp_oimm_ofuncs = {
813 .base.dtor = nv50_disp_pioc_dtor, 882 .base.dtor = nv50_disp_pioc_dtor,
814 .base.init = nv50_disp_pioc_init, 883 .base.init = nv50_disp_pioc_init,
815 .base.fini = nv50_disp_pioc_fini, 884 .base.fini = nv50_disp_pioc_fini,
885 .base.ntfy = nv50_disp_chan_ntfy,
816 .base.map = nv50_disp_chan_map, 886 .base.map = nv50_disp_chan_map,
817 .base.rd32 = nv50_disp_chan_rd32, 887 .base.rd32 = nv50_disp_chan_rd32,
818 .base.wr32 = nv50_disp_chan_wr32, 888 .base.wr32 = nv50_disp_chan_wr32,
@@ -860,6 +930,7 @@ nv50_disp_curs_ofuncs = {
860 .base.dtor = nv50_disp_pioc_dtor, 930 .base.dtor = nv50_disp_pioc_dtor,
861 .base.init = nv50_disp_pioc_init, 931 .base.init = nv50_disp_pioc_init,
862 .base.fini = nv50_disp_pioc_fini, 932 .base.fini = nv50_disp_pioc_fini,
933 .base.ntfy = nv50_disp_chan_ntfy,
863 .base.map = nv50_disp_chan_map, 934 .base.map = nv50_disp_chan_map,
864 .base.rd32 = nv50_disp_chan_rd32, 935 .base.rd32 = nv50_disp_chan_rd32,
865 .base.wr32 = nv50_disp_chan_wr32, 936 .base.wr32 = nv50_disp_chan_wr32,
@@ -1559,7 +1630,7 @@ nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head)
1559} 1630}
1560 1631
1561static void 1632static void
1562nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, 1633nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
1563 struct dcb_output *outp, u32 pclk) 1634 struct dcb_output *outp, u32 pclk)
1564{ 1635{
1565 const int link = !(outp->sorconf.link & 1); 1636 const int link = !(outp->sorconf.link & 1);
@@ -1568,24 +1639,36 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
1568 const u32 loff = (link * 0x080) + soff; 1639 const u32 loff = (link * 0x080) + soff;
1569 const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8)); 1640 const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
1570 const u32 symbol = 100000; 1641 const u32 symbol = 100000;
1571 u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000; 1642 const s32 vactive = nv_rd32(priv, 0x610af8 + (head * 0x540)) & 0xffff;
1643 const s32 vblanke = nv_rd32(priv, 0x610ae8 + (head * 0x540)) & 0xffff;
1644 const s32 vblanks = nv_rd32(priv, 0x610af0 + (head * 0x540)) & 0xffff;
1645 u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
1572 u32 clksor = nv_rd32(priv, 0x614300 + soff); 1646 u32 clksor = nv_rd32(priv, 0x614300 + soff);
1573 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; 1647 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
1574 int TU, VTUi, VTUf, VTUa; 1648 int TU, VTUi, VTUf, VTUa;
1575 u64 link_data_rate, link_ratio, unk; 1649 u64 link_data_rate, link_ratio, unk;
1576 u32 best_diff = 64 * symbol; 1650 u32 best_diff = 64 * symbol;
1577 u32 link_nr, link_bw, bits; 1651 u32 link_nr, link_bw, bits;
1578 1652 u64 value;
1579 /* calculate packed data rate for each lane */ 1653
1580 if (dpctrl > 0x00030000) link_nr = 4; 1654 link_bw = (clksor & 0x000c0000) ? 270000 : 162000;
1581 else if (dpctrl > 0x00010000) link_nr = 2; 1655 link_nr = hweight32(dpctrl & 0x000f0000);
1582 else link_nr = 1; 1656
1583 1657 /* symbols/hblank - algorithm taken from comments in tegra driver */
1584 if (clksor & 0x000c0000) 1658 value = vblanke + vactive - vblanks - 7;
1585 link_bw = 270000; 1659 value = value * link_bw;
1586 else 1660 do_div(value, pclk);
1587 link_bw = 162000; 1661 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
1588 1662 nv_mask(priv, 0x61c1e8 + soff, 0x0000ffff, value);
1663
1664 /* symbols/vblank - algorithm taken from comments in tegra driver */
1665 value = vblanks - vblanke - 25;
1666 value = value * link_bw;
1667 do_div(value, pclk);
1668 value = value - ((36 / link_nr) + 3) - 1;
1669 nv_mask(priv, 0x61c1ec + soff, 0x00ffffff, value);
1670
1671 /* watermark / activesym */
1589 if ((ctrl & 0xf0000) == 0x60000) bits = 30; 1672 if ((ctrl & 0xf0000) == 0x60000) bits = 30;
1590 else if ((ctrl & 0xf0000) == 0x50000) bits = 24; 1673 else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
1591 else bits = 18; 1674 else bits = 18;
@@ -1731,7 +1814,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1731 } else 1814 } else
1732 if (!outp->info.location) { 1815 if (!outp->info.location) {
1733 if (outp->info.type == DCB_OUTPUT_DP) 1816 if (outp->info.type == DCB_OUTPUT_DP)
1734 nv50_disp_intr_unk20_2_dp(priv, &outp->info, pclk); 1817 nv50_disp_intr_unk20_2_dp(priv, head, &outp->info, pclk);
1735 oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800; 1818 oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800;
1736 oval = (conf & 0x0100) ? 0x00000101 : 0x00000000; 1819 oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1737 hval = 0x00000000; 1820 hval = 0x00000000;
@@ -1847,6 +1930,12 @@ nv50_disp_intr(struct nouveau_subdev *subdev)
1847 intr0 &= ~(0x00010000 << chid); 1930 intr0 &= ~(0x00010000 << chid);
1848 } 1931 }
1849 1932
1933 while (intr0 & 0x0000001f) {
1934 u32 chid = __ffs(intr0 & 0x0000001f);
1935 nv50_disp_chan_uevent_send(priv, chid);
1936 intr0 &= ~(0x00000001 << chid);
1937 }
1938
1850 if (intr1 & 0x00000004) { 1939 if (intr1 & 0x00000004) {
1851 nouveau_disp_vblank(&priv->base, 0); 1940 nouveau_disp_vblank(&priv->base, 0);
1852 nv_wr32(priv, 0x610024, 0x00000004); 1941 nv_wr32(priv, 0x610024, 0x00000004);
@@ -1881,6 +1970,10 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1881 if (ret) 1970 if (ret)
1882 return ret; 1971 return ret;
1883 1972
1973 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
1974 if (ret)
1975 return ret;
1976
1884 nv_engine(priv)->sclass = nv50_disp_base_oclass; 1977 nv_engine(priv)->sclass = nv50_disp_base_oclass;
1885 nv_engine(priv)->cclass = &nv50_disp_cclass; 1978 nv_engine(priv)->cclass = &nv50_disp_cclass;
1886 nv_subdev(priv)->intr = nv50_disp_intr; 1979 nv_subdev(priv)->intr = nv50_disp_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index 8ab14461f70c..5279feefec06 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -26,6 +26,8 @@ struct nv50_disp_priv {
26 struct work_struct supervisor; 26 struct work_struct supervisor;
27 u32 super; 27 u32 super;
28 28
29 struct nvkm_event uevent;
30
29 struct { 31 struct {
30 int nr; 32 int nr;
31 } head; 33 } head;
@@ -75,6 +77,7 @@ int nvd0_hda_eld(NV50_DISP_MTHD_V1);
75int nv84_hdmi_ctrl(NV50_DISP_MTHD_V1); 77int nv84_hdmi_ctrl(NV50_DISP_MTHD_V1);
76int nva3_hdmi_ctrl(NV50_DISP_MTHD_V1); 78int nva3_hdmi_ctrl(NV50_DISP_MTHD_V1);
77int nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1); 79int nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1);
80int nve0_hdmi_ctrl(NV50_DISP_MTHD_V1);
78 81
79int nv50_sor_power(NV50_DISP_MTHD_V1); 82int nv50_sor_power(NV50_DISP_MTHD_V1);
80 83
@@ -116,9 +119,16 @@ struct nv50_disp_chan {
116 int chid; 119 int chid;
117}; 120};
118 121
122int nv50_disp_chan_ntfy(struct nouveau_object *, u32, struct nvkm_event **);
119int nv50_disp_chan_map(struct nouveau_object *, u64 *, u32 *); 123int nv50_disp_chan_map(struct nouveau_object *, u64 *, u32 *);
120u32 nv50_disp_chan_rd32(struct nouveau_object *, u64); 124u32 nv50_disp_chan_rd32(struct nouveau_object *, u64);
121void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32); 125void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
126extern const struct nvkm_event_func nv50_disp_chan_uevent;
127int nv50_disp_chan_uevent_ctor(struct nouveau_object *, void *, u32,
128 struct nvkm_notify *);
129void nv50_disp_chan_uevent_send(struct nv50_disp_priv *, int);
130
131extern const struct nvkm_event_func nvd0_disp_chan_uevent;
122 132
123#define nv50_disp_chan_init(a) \ 133#define nv50_disp_chan_init(a) \
124 nouveau_namedb_init(&(a)->base) 134 nouveau_namedb_init(&(a)->base)
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index 788ced1b6182..d36284715b2a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -236,6 +236,10 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
236 if (ret) 236 if (ret)
237 return ret; 237 return ret;
238 238
239 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
240 if (ret)
241 return ret;
242
239 nv_engine(priv)->sclass = nv84_disp_base_oclass; 243 nv_engine(priv)->sclass = nv84_disp_base_oclass;
240 nv_engine(priv)->cclass = &nv50_disp_cclass; 244 nv_engine(priv)->cclass = &nv50_disp_cclass;
241 nv_subdev(priv)->intr = nv50_disp_intr; 245 nv_subdev(priv)->intr = nv50_disp_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index fa79de906eae..a117064002b1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -95,6 +95,10 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
95 if (ret) 95 if (ret)
96 return ret; 96 return ret;
97 97
98 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
99 if (ret)
100 return ret;
101
98 nv_engine(priv)->sclass = nv94_disp_base_oclass; 102 nv_engine(priv)->sclass = nv94_disp_base_oclass;
99 nv_engine(priv)->cclass = &nv50_disp_cclass; 103 nv_engine(priv)->cclass = &nv50_disp_cclass;
100 nv_subdev(priv)->intr = nv50_disp_intr; 104 nv_subdev(priv)->intr = nv50_disp_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index 7af15f5d48dc..c67e68aadd45 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -112,6 +112,10 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
112 if (ret) 112 if (ret)
113 return ret; 113 return ret;
114 114
115 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
116 if (ret)
117 return ret;
118
115 nv_engine(priv)->sclass = nva0_disp_base_oclass; 119 nv_engine(priv)->sclass = nva0_disp_base_oclass;
116 nv_engine(priv)->cclass = &nv50_disp_cclass; 120 nv_engine(priv)->cclass = &nv50_disp_cclass;
117 nv_subdev(priv)->intr = nv50_disp_intr; 121 nv_subdev(priv)->intr = nv50_disp_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index 6bd39448f8da..22969f355aae 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -67,6 +67,10 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
67 if (ret) 67 if (ret)
68 return ret; 68 return ret;
69 69
70 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
71 if (ret)
72 return ret;
73
70 nv_engine(priv)->sclass = nva3_disp_base_oclass; 74 nv_engine(priv)->sclass = nva3_disp_base_oclass;
71 nv_engine(priv)->cclass = &nv50_disp_cclass; 75 nv_engine(priv)->cclass = &nv50_disp_cclass;
72 nv_subdev(priv)->intr = nv50_disp_intr; 76 nv_subdev(priv)->intr = nv50_disp_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index a4bb3c774ee1..747e64bb9c06 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -43,6 +43,31 @@
43#include "nv50.h" 43#include "nv50.h"
44 44
45/******************************************************************************* 45/*******************************************************************************
46 * EVO channel base class
47 ******************************************************************************/
48
49static void
50nvd0_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
51{
52 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
53 nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index);
54}
55
56static void
57nvd0_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
58{
59 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
60 nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000001 << index);
61}
62
63const struct nvkm_event_func
64nvd0_disp_chan_uevent = {
65 .ctor = nv50_disp_chan_uevent_ctor,
66 .init = nvd0_disp_chan_uevent_init,
67 .fini = nvd0_disp_chan_uevent_fini,
68};
69
70/*******************************************************************************
46 * EVO DMA channel base class 71 * EVO DMA channel base class
47 ******************************************************************************/ 72 ******************************************************************************/
48 73
@@ -77,7 +102,6 @@ nvd0_disp_dmac_init(struct nouveau_object *object)
77 return ret; 102 return ret;
78 103
79 /* enable error reporting */ 104 /* enable error reporting */
80 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
81 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); 105 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
82 106
83 /* initialise channel for dma command submission */ 107 /* initialise channel for dma command submission */
@@ -115,7 +139,7 @@ nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
115 return -EBUSY; 139 return -EBUSY;
116 } 140 }
117 141
118 /* disable error reporting */ 142 /* disable error reporting and completion notification */
119 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000); 143 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
120 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000); 144 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
121 145
@@ -278,7 +302,6 @@ nvd0_disp_mast_init(struct nouveau_object *object)
278 return ret; 302 return ret;
279 303
280 /* enable error reporting */ 304 /* enable error reporting */
281 nv_mask(priv, 0x610090, 0x00000001, 0x00000001);
282 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001); 305 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
283 306
284 /* initialise channel for dma command submission */ 307 /* initialise channel for dma command submission */
@@ -313,7 +336,7 @@ nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
313 return -EBUSY; 336 return -EBUSY;
314 } 337 }
315 338
316 /* disable error reporting */ 339 /* disable error reporting and completion notification */
317 nv_mask(priv, 0x610090, 0x00000001, 0x00000000); 340 nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
318 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000); 341 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
319 342
@@ -326,6 +349,7 @@ nvd0_disp_mast_ofuncs = {
326 .base.dtor = nv50_disp_dmac_dtor, 349 .base.dtor = nv50_disp_dmac_dtor,
327 .base.init = nvd0_disp_mast_init, 350 .base.init = nvd0_disp_mast_init,
328 .base.fini = nvd0_disp_mast_fini, 351 .base.fini = nvd0_disp_mast_fini,
352 .base.ntfy = nv50_disp_chan_ntfy,
329 .base.map = nv50_disp_chan_map, 353 .base.map = nv50_disp_chan_map,
330 .base.rd32 = nv50_disp_chan_rd32, 354 .base.rd32 = nv50_disp_chan_rd32,
331 .base.wr32 = nv50_disp_chan_wr32, 355 .base.wr32 = nv50_disp_chan_wr32,
@@ -419,6 +443,7 @@ nvd0_disp_sync_ofuncs = {
419 .base.dtor = nv50_disp_dmac_dtor, 443 .base.dtor = nv50_disp_dmac_dtor,
420 .base.init = nvd0_disp_dmac_init, 444 .base.init = nvd0_disp_dmac_init,
421 .base.fini = nvd0_disp_dmac_fini, 445 .base.fini = nvd0_disp_dmac_fini,
446 .base.ntfy = nv50_disp_chan_ntfy,
422 .base.map = nv50_disp_chan_map, 447 .base.map = nv50_disp_chan_map,
423 .base.rd32 = nv50_disp_chan_rd32, 448 .base.rd32 = nv50_disp_chan_rd32,
424 .base.wr32 = nv50_disp_chan_wr32, 449 .base.wr32 = nv50_disp_chan_wr32,
@@ -499,6 +524,7 @@ nvd0_disp_ovly_ofuncs = {
499 .base.dtor = nv50_disp_dmac_dtor, 524 .base.dtor = nv50_disp_dmac_dtor,
500 .base.init = nvd0_disp_dmac_init, 525 .base.init = nvd0_disp_dmac_init,
501 .base.fini = nvd0_disp_dmac_fini, 526 .base.fini = nvd0_disp_dmac_fini,
527 .base.ntfy = nv50_disp_chan_ntfy,
502 .base.map = nv50_disp_chan_map, 528 .base.map = nv50_disp_chan_map,
503 .base.rd32 = nv50_disp_chan_rd32, 529 .base.rd32 = nv50_disp_chan_rd32,
504 .base.wr32 = nv50_disp_chan_wr32, 530 .base.wr32 = nv50_disp_chan_wr32,
@@ -524,7 +550,6 @@ nvd0_disp_pioc_init(struct nouveau_object *object)
524 return ret; 550 return ret;
525 551
526 /* enable error reporting */ 552 /* enable error reporting */
527 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
528 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); 553 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
529 554
530 /* activate channel */ 555 /* activate channel */
@@ -553,7 +578,7 @@ nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
553 return -EBUSY; 578 return -EBUSY;
554 } 579 }
555 580
556 /* disable error reporting */ 581 /* disable error reporting and completion notification */
557 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000); 582 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
558 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000); 583 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
559 584
@@ -570,6 +595,7 @@ nvd0_disp_oimm_ofuncs = {
570 .base.dtor = nv50_disp_pioc_dtor, 595 .base.dtor = nv50_disp_pioc_dtor,
571 .base.init = nvd0_disp_pioc_init, 596 .base.init = nvd0_disp_pioc_init,
572 .base.fini = nvd0_disp_pioc_fini, 597 .base.fini = nvd0_disp_pioc_fini,
598 .base.ntfy = nv50_disp_chan_ntfy,
573 .base.map = nv50_disp_chan_map, 599 .base.map = nv50_disp_chan_map,
574 .base.rd32 = nv50_disp_chan_rd32, 600 .base.rd32 = nv50_disp_chan_rd32,
575 .base.wr32 = nv50_disp_chan_wr32, 601 .base.wr32 = nv50_disp_chan_wr32,
@@ -586,6 +612,7 @@ nvd0_disp_curs_ofuncs = {
586 .base.dtor = nv50_disp_pioc_dtor, 612 .base.dtor = nv50_disp_pioc_dtor,
587 .base.init = nvd0_disp_pioc_init, 613 .base.init = nvd0_disp_pioc_init,
588 .base.fini = nvd0_disp_pioc_fini, 614 .base.fini = nvd0_disp_pioc_fini,
615 .base.ntfy = nv50_disp_chan_ntfy,
589 .base.map = nv50_disp_chan_map, 616 .base.map = nv50_disp_chan_map,
590 .base.rd32 = nv50_disp_chan_rd32, 617 .base.rd32 = nv50_disp_chan_rd32,
591 .base.wr32 = nv50_disp_chan_wr32, 618 .base.wr32 = nv50_disp_chan_wr32,
@@ -949,6 +976,9 @@ nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
949 const int or = ffs(outp->or) - 1; 976 const int or = ffs(outp->or) - 1;
950 const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020)); 977 const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
951 const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300)); 978 const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
979 const s32 vactive = nv_rd32(priv, 0x660414 + (head * 0x300)) & 0xffff;
980 const s32 vblanke = nv_rd32(priv, 0x66041c + (head * 0x300)) & 0xffff;
981 const s32 vblanks = nv_rd32(priv, 0x660420 + (head * 0x300)) & 0xffff;
952 const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; 982 const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
953 const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1; 983 const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
954 const u32 hoff = (head * 0x800); 984 const u32 hoff = (head * 0x800);
@@ -956,23 +986,35 @@ nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
956 const u32 loff = (link * 0x080) + soff; 986 const u32 loff = (link * 0x080) + soff;
957 const u32 symbol = 100000; 987 const u32 symbol = 100000;
958 const u32 TU = 64; 988 const u32 TU = 64;
959 u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000; 989 u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
960 u32 clksor = nv_rd32(priv, 0x612300 + soff); 990 u32 clksor = nv_rd32(priv, 0x612300 + soff);
961 u32 datarate, link_nr, link_bw, bits; 991 u32 datarate, link_nr, link_bw, bits;
962 u64 ratio, value; 992 u64 ratio, value;
963 993
994 link_nr = hweight32(dpctrl & 0x000f0000);
995 link_bw = (clksor & 0x007c0000) >> 18;
996 link_bw *= 27000;
997
998 /* symbols/hblank - algorithm taken from comments in tegra driver */
999 value = vblanke + vactive - vblanks - 7;
1000 value = value * link_bw;
1001 do_div(value, pclk);
1002 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
1003 nv_mask(priv, 0x616620 + hoff, 0x0000ffff, value);
1004
1005 /* symbols/vblank - algorithm taken from comments in tegra driver */
1006 value = vblanks - vblanke - 25;
1007 value = value * link_bw;
1008 do_div(value, pclk);
1009 value = value - ((36 / link_nr) + 3) - 1;
1010 nv_mask(priv, 0x616624 + hoff, 0x00ffffff, value);
1011
1012 /* watermark */
964 if ((conf & 0x3c0) == 0x180) bits = 30; 1013 if ((conf & 0x3c0) == 0x180) bits = 30;
965 else if ((conf & 0x3c0) == 0x140) bits = 24; 1014 else if ((conf & 0x3c0) == 0x140) bits = 24;
966 else bits = 18; 1015 else bits = 18;
967 datarate = (pclk * bits) / 8; 1016 datarate = (pclk * bits) / 8;
968 1017
969 if (dpctrl > 0x00030000) link_nr = 4;
970 else if (dpctrl > 0x00010000) link_nr = 2;
971 else link_nr = 1;
972
973 link_bw = (clksor & 0x007c0000) >> 18;
974 link_bw *= 27000;
975
976 ratio = datarate; 1018 ratio = datarate;
977 ratio *= symbol; 1019 ratio *= symbol;
978 do_div(ratio, link_nr * link_bw); 1020 do_div(ratio, link_nr * link_bw);
@@ -1153,7 +1195,11 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
1153 1195
1154 if (intr & 0x00000001) { 1196 if (intr & 0x00000001) {
1155 u32 stat = nv_rd32(priv, 0x61008c); 1197 u32 stat = nv_rd32(priv, 0x61008c);
1156 nv_wr32(priv, 0x61008c, stat); 1198 while (stat) {
1199 int chid = __ffs(stat); stat &= ~(1 << chid);
1200 nv50_disp_chan_uevent_send(priv, chid);
1201 nv_wr32(priv, 0x61008c, 1 << chid);
1202 }
1157 intr &= ~0x00000001; 1203 intr &= ~0x00000001;
1158 } 1204 }
1159 1205
@@ -1209,6 +1255,10 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1209 if (ret) 1255 if (ret)
1210 return ret; 1256 return ret;
1211 1257
1258 ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
1259 if (ret)
1260 return ret;
1261
1212 nv_engine(priv)->sclass = nvd0_disp_base_oclass; 1262 nv_engine(priv)->sclass = nvd0_disp_base_oclass;
1213 nv_engine(priv)->cclass = &nv50_disp_cclass; 1263 nv_engine(priv)->cclass = &nv50_disp_cclass;
1214 nv_subdev(priv)->intr = nvd0_disp_intr; 1264 nv_subdev(priv)->intr = nvd0_disp_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index 47fef1e398c4..db144b2cf06b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -233,6 +233,10 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
233 if (ret) 233 if (ret)
234 return ret; 234 return ret;
235 235
236 ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
237 if (ret)
238 return ret;
239
236 nv_engine(priv)->sclass = nve0_disp_base_oclass; 240 nv_engine(priv)->sclass = nve0_disp_base_oclass;
237 nv_engine(priv)->cclass = &nv50_disp_cclass; 241 nv_engine(priv)->cclass = &nv50_disp_cclass;
238 nv_subdev(priv)->intr = nvd0_disp_intr; 242 nv_subdev(priv)->intr = nvd0_disp_intr;
@@ -245,7 +249,7 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
245 priv->dac.sense = nv50_dac_sense; 249 priv->dac.sense = nv50_dac_sense;
246 priv->sor.power = nv50_sor_power; 250 priv->sor.power = nv50_sor_power;
247 priv->sor.hda_eld = nvd0_hda_eld; 251 priv->sor.hda_eld = nvd0_hda_eld;
248 priv->sor.hdmi = nvd0_hdmi_ctrl; 252 priv->sor.hdmi = nve0_hdmi_ctrl;
249 return 0; 253 return 0;
250} 254}
251 255
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
index 04bda4ac4ed3..402d7d67d806 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -68,6 +68,10 @@ nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
68 if (ret) 68 if (ret)
69 return ret; 69 return ret;
70 70
71 ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
72 if (ret)
73 return ret;
74
71 nv_engine(priv)->sclass = nvf0_disp_base_oclass; 75 nv_engine(priv)->sclass = nvf0_disp_base_oclass;
72 nv_engine(priv)->cclass = &nv50_disp_cclass; 76 nv_engine(priv)->cclass = &nv50_disp_cclass;
73 nv_subdev(priv)->intr = nvd0_disp_intr; 77 nv_subdev(priv)->intr = nvd0_disp_intr;
@@ -80,7 +84,7 @@ nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
80 priv->dac.sense = nv50_dac_sense; 84 priv->dac.sense = nv50_dac_sense;
81 priv->sor.power = nv50_sor_power; 85 priv->sor.power = nv50_sor_power;
82 priv->sor.hda_eld = nvd0_hda_eld; 86 priv->sor.hda_eld = nvd0_hda_eld;
83 priv->sor.hdmi = nvd0_hdmi_ctrl; 87 priv->sor.hdmi = nve0_hdmi_ctrl;
84 return 0; 88 return 0;
85} 89}
86 90
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
index 6f6e2a898270..667a9070e006 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
@@ -254,7 +254,7 @@ nvkm_output_dp_create_(struct nouveau_object *parent,
254 atomic_set(&outp->lt.done, 0); 254 atomic_set(&outp->lt.done, 0);
255 255
256 /* link maintenance */ 256 /* link maintenance */
257 ret = nvkm_notify_init(&i2c->event, nvkm_output_dp_irq, true, 257 ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_irq, true,
258 &(struct nvkm_i2c_ntfy_req) { 258 &(struct nvkm_i2c_ntfy_req) {
259 .mask = NVKM_I2C_IRQ, 259 .mask = NVKM_I2C_IRQ,
260 .port = outp->base.edid->index, 260 .port = outp->base.edid->index,
@@ -268,7 +268,7 @@ nvkm_output_dp_create_(struct nouveau_object *parent,
268 } 268 }
269 269
270 /* hotplug detect, replaces gpio-based mechanism with aux events */ 270 /* hotplug detect, replaces gpio-based mechanism with aux events */
271 ret = nvkm_notify_init(&i2c->event, nvkm_output_dp_hpd, true, 271 ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_hpd, true,
272 &(struct nvkm_i2c_ntfy_req) { 272 &(struct nvkm_i2c_ntfy_req) {
273 .mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG, 273 .mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG,
274 .port = outp->base.edid->index, 274 .port = outp->base.edid->index,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
index dbd43ae9df81..6a0511d54ce6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
@@ -40,7 +40,8 @@ int _nouveau_disp_fini(struct nouveau_object *, bool);
40extern struct nouveau_oclass *nvkm_output_oclass; 40extern struct nouveau_oclass *nvkm_output_oclass;
41extern struct nouveau_oclass *nvkm_connector_oclass; 41extern struct nouveau_oclass *nvkm_connector_oclass;
42 42
43int nouveau_disp_vblank_ctor(void *data, u32 size, struct nvkm_notify *); 43int nouveau_disp_vblank_ctor(struct nouveau_object *, void *data, u32 size,
44 struct nvkm_notify *);
44void nouveau_disp_vblank(struct nouveau_disp *, int head); 45void nouveau_disp_vblank(struct nouveau_disp *, int head);
45int nouveau_disp_ntfy(struct nouveau_object *, u32, struct nvkm_event **); 46int nouveau_disp_ntfy(struct nouveau_object *, u32, struct nvkm_event **);
46 47
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index 0f999fc45ab9..ac8375cf4eef 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -34,7 +34,8 @@
34#include <engine/fifo.h> 34#include <engine/fifo.h>
35 35
36static int 36static int
37nouveau_fifo_event_ctor(void *data, u32 size, struct nvkm_notify *notify) 37nouveau_fifo_event_ctor(struct nouveau_object *object, void *data, u32 size,
38 struct nvkm_notify *notify)
38{ 39{
39 if (size == 0) { 40 if (size == 0) {
40 notify->size = 0; 41 notify->size = 0;
@@ -170,7 +171,8 @@ _nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
170} 171}
171 172
172int 173int
173nouveau_fifo_uevent_ctor(void *data, u32 size, struct nvkm_notify *notify) 174nouveau_fifo_uevent_ctor(struct nouveau_object *object, void *data, u32 size,
175 struct nvkm_notify *notify)
174{ 176{
175 union { 177 union {
176 struct nvif_notify_uevent_req none; 178 struct nvif_notify_uevent_req none;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index 4d2994d8cc32..a0fec205f9db 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -175,7 +175,8 @@ nv50_software_context_ctor(struct nouveau_object *parent,
175 return ret; 175 return ret;
176 176
177 for (i = 0; pdisp && i < pdisp->vblank.index_nr; i++) { 177 for (i = 0; pdisp && i < pdisp->vblank.index_nr; i++) {
178 ret = nvkm_notify_init(&pdisp->vblank, pclass->vblank, false, 178 ret = nvkm_notify_init(NULL, &pdisp->vblank, pclass->vblank,
179 false,
179 &(struct nvif_notify_head_req_v0) { 180 &(struct nvif_notify_head_req_v0) {
180 .head = i, 181 .head = i,
181 }, 182 },
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index 1794a05205d8..b0ce9f6680b5 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -48,7 +48,7 @@ int nouveau_client_init(struct nouveau_client *);
48int nouveau_client_fini(struct nouveau_client *, bool suspend); 48int nouveau_client_fini(struct nouveau_client *, bool suspend);
49const char *nouveau_client_name(void *obj); 49const char *nouveau_client_name(void *obj);
50 50
51int nvkm_client_notify_new(struct nouveau_client *, struct nvkm_event *, 51int nvkm_client_notify_new(struct nouveau_object *, struct nvkm_event *,
52 void *data, u32 size); 52 void *data, u32 size);
53int nvkm_client_notify_del(struct nouveau_client *, int index); 53int nvkm_client_notify_del(struct nouveau_client *, int index);
54int nvkm_client_notify_get(struct nouveau_client *, int index); 54int nvkm_client_notify_get(struct nouveau_client *, int index);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index 8743766454a5..1d9d893929bb 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -24,6 +24,7 @@ enum nv_subdev_type {
24 * been created, and are allowed to assume any subdevs in the 24 * been created, and are allowed to assume any subdevs in the
25 * list above them exist and have been initialised. 25 * list above them exist and have been initialised.
26 */ 26 */
27 NVDEV_SUBDEV_FUSE,
27 NVDEV_SUBDEV_MXM, 28 NVDEV_SUBDEV_MXM,
28 NVDEV_SUBDEV_MC, 29 NVDEV_SUBDEV_MC,
29 NVDEV_SUBDEV_BUS, 30 NVDEV_SUBDEV_BUS,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
index 51e55d03330a..92876528972f 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/event.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -4,7 +4,8 @@
4#include <core/notify.h> 4#include <core/notify.h>
5 5
6struct nvkm_event_func { 6struct nvkm_event_func {
7 int (*ctor)(void *data, u32 size, struct nvkm_notify *); 7 int (*ctor)(struct nouveau_object *, void *data, u32 size,
8 struct nvkm_notify *);
8 void (*send)(void *data, u32 size, struct nvkm_notify *); 9 void (*send)(void *data, u32 size, struct nvkm_notify *);
9 void (*init)(struct nvkm_event *, int type, int index); 10 void (*init)(struct nvkm_event *, int type, int index);
10 void (*fini)(struct nvkm_event *, int type, int index); 11 void (*fini)(struct nvkm_event *, int type, int index);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h
index 2bf7d0e32261..bfe6931544fe 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/mm.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h
@@ -6,6 +6,10 @@ struct nouveau_mm_node {
6 struct list_head fl_entry; 6 struct list_head fl_entry;
7 struct list_head rl_entry; 7 struct list_head rl_entry;
8 8
9#define NVKM_MM_HEAP_ANY 0x00
10 u8 heap;
11#define NVKM_MM_TYPE_NONE 0x00
12#define NVKM_MM_TYPE_HOLE 0xff
9 u8 type; 13 u8 type;
10 u32 offset; 14 u32 offset;
11 u32 length; 15 u32 length;
@@ -27,10 +31,10 @@ nouveau_mm_initialised(struct nouveau_mm *mm)
27 31
28int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block); 32int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
29int nouveau_mm_fini(struct nouveau_mm *); 33int nouveau_mm_fini(struct nouveau_mm *);
30int nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min, 34int nouveau_mm_head(struct nouveau_mm *, u8 heap, u8 type, u32 size_max,
31 u32 align, struct nouveau_mm_node **); 35 u32 size_min, u32 align, struct nouveau_mm_node **);
32int nouveau_mm_tail(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min, 36int nouveau_mm_tail(struct nouveau_mm *, u8 heap, u8 type, u32 size_max,
33 u32 align, struct nouveau_mm_node **); 37 u32 size_min, u32 align, struct nouveau_mm_node **);
34void nouveau_mm_free(struct nouveau_mm *, struct nouveau_mm_node **); 38void nouveau_mm_free(struct nouveau_mm *, struct nouveau_mm_node **);
35 39
36#endif 40#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/notify.h b/drivers/gpu/drm/nouveau/core/include/core/notify.h
index 1262d8f020f3..a7c3c5f578cc 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/notify.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/notify.h
@@ -25,8 +25,9 @@ struct nvkm_notify {
25 const void *data; 25 const void *data;
26}; 26};
27 27
28int nvkm_notify_init(struct nvkm_event *, int (*func)(struct nvkm_notify *), 28int nvkm_notify_init(struct nouveau_object *, struct nvkm_event *,
29 bool work, void *data, u32 size, u32 reply, 29 int (*func)(struct nvkm_notify *), bool work,
30 void *data, u32 size, u32 reply,
30 struct nvkm_notify *); 31 struct nvkm_notify *);
31void nvkm_notify_fini(struct nvkm_notify *); 32void nvkm_notify_fini(struct nvkm_notify *);
32void nvkm_notify_get(struct nvkm_notify *); 33void nvkm_notify_get(struct nvkm_notify *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index e5e4d930b2c2..2007453f6fce 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -116,7 +116,8 @@ extern struct nouveau_oclass *nve0_fifo_oclass;
116extern struct nouveau_oclass *gk20a_fifo_oclass; 116extern struct nouveau_oclass *gk20a_fifo_oclass;
117extern struct nouveau_oclass *nv108_fifo_oclass; 117extern struct nouveau_oclass *nv108_fifo_oclass;
118 118
119int nouveau_fifo_uevent_ctor(void *, u32, struct nvkm_notify *); 119int nouveau_fifo_uevent_ctor(struct nouveau_object *, void *, u32,
120 struct nvkm_notify *);
120void nouveau_fifo_uevent(struct nouveau_fifo *); 121void nouveau_fifo_uevent(struct nouveau_fifo *);
121 122
122void nv04_fifo_intr(struct nouveau_subdev *); 123void nv04_fifo_intr(struct nouveau_subdev *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
index be037fac534c..257ddf6d36d4 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
@@ -12,7 +12,6 @@ struct nouveau_bar {
12 12
13 int (*alloc)(struct nouveau_bar *, struct nouveau_object *, 13 int (*alloc)(struct nouveau_bar *, struct nouveau_object *,
14 struct nouveau_mem *, struct nouveau_object **); 14 struct nouveau_mem *, struct nouveau_object **);
15 void __iomem *iomem;
16 15
17 int (*kmap)(struct nouveau_bar *, struct nouveau_mem *, 16 int (*kmap)(struct nouveau_bar *, struct nouveau_mem *,
18 u32 flags, struct nouveau_vma *); 17 u32 flags, struct nouveau_vma *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0205.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0205.h
new file mode 100644
index 000000000000..e171120cec81
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0205.h
@@ -0,0 +1,32 @@
1#ifndef __NVBIOS_M0205_H__
2#define __NVBIOS_M0205_H__
3
4struct nvbios_M0205T {
5 u16 freq;
6};
7
8u32 nvbios_M0205Te(struct nouveau_bios *,
9 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
10u32 nvbios_M0205Tp(struct nouveau_bios *,
11 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz,
12 struct nvbios_M0205T *);
13
14struct nvbios_M0205E {
15 u8 type;
16};
17
18u32 nvbios_M0205Ee(struct nouveau_bios *, int idx,
19 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
20u32 nvbios_M0205Ep(struct nouveau_bios *, int idx,
21 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
22 struct nvbios_M0205E *);
23
24struct nvbios_M0205S {
25 u8 data;
26};
27
28u32 nvbios_M0205Se(struct nouveau_bios *, int ent, int idx, u8 *ver, u8 *hdr);
29u32 nvbios_M0205Sp(struct nouveau_bios *, int ent, int idx, u8 *ver, u8 *hdr,
30 struct nvbios_M0205S *);
31
32#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0209.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0209.h
new file mode 100644
index 000000000000..67dc50d837bc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0209.h
@@ -0,0 +1,30 @@
1#ifndef __NVBIOS_M0209_H__
2#define __NVBIOS_M0209_H__
3
4u32 nvbios_M0209Te(struct nouveau_bios *,
5 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
6
7struct nvbios_M0209E {
8 u8 v00_40;
9 u8 bits;
10 u8 modulo;
11 u8 v02_40;
12 u8 v02_07;
13 u8 v03;
14};
15
16u32 nvbios_M0209Ee(struct nouveau_bios *, int idx,
17 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
18u32 nvbios_M0209Ep(struct nouveau_bios *, int idx,
19 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
20 struct nvbios_M0209E *);
21
22struct nvbios_M0209S {
23 u32 data[0x200];
24};
25
26u32 nvbios_M0209Se(struct nouveau_bios *, int ent, int idx, u8 *ver, u8 *hdr);
27u32 nvbios_M0209Sp(struct nouveau_bios *, int ent, int idx, u8 *ver, u8 *hdr,
28 struct nvbios_M0209S *);
29
30#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/fan.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/fan.h
new file mode 100644
index 000000000000..119d0874e041
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/fan.h
@@ -0,0 +1,8 @@
1#ifndef __NVBIOS_FAN_H__
2#define __NVBIOS_FAN_H__
3
4#include <subdev/bios/therm.h>
5
6u16 nvbios_fan_parse(struct nouveau_bios *bios, struct nvbios_therm_fan *fan);
7
8#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
index c086ac6d677d..a685bbd04568 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
@@ -4,60 +4,118 @@
4struct nouveau_bios; 4struct nouveau_bios;
5 5
6struct nvbios_ramcfg { 6struct nvbios_ramcfg {
7 unsigned rammap_11_08_01:1; 7 unsigned rammap_ver;
8 unsigned rammap_11_08_0c:2; 8 unsigned rammap_hdr;
9 unsigned rammap_11_08_10:1; 9 unsigned rammap_min;
10 unsigned rammap_11_11_0c:2; 10 unsigned rammap_max;
11 union {
12 struct {
13 unsigned rammap_10_04_02:1;
14 unsigned rammap_10_04_08:1;
15 };
16 struct {
17 unsigned rammap_11_08_01:1;
18 unsigned rammap_11_08_0c:2;
19 unsigned rammap_11_08_10:1;
20 unsigned rammap_11_09_01ff:9;
21 unsigned rammap_11_0a_03fe:9;
22 unsigned rammap_11_0a_0400:1;
23 unsigned rammap_11_0a_0800:1;
24 unsigned rammap_11_0b_01f0:5;
25 unsigned rammap_11_0b_0200:1;
26 unsigned rammap_11_0b_0400:1;
27 unsigned rammap_11_0b_0800:1;
28 unsigned rammap_11_0d:8;
29 unsigned rammap_11_0e:8;
30 unsigned rammap_11_0f:8;
31 unsigned rammap_11_11_0c:2;
32 };
33 };
11 34
12 unsigned ramcfg_11_01_01:1; 35 unsigned ramcfg_ver;
13 unsigned ramcfg_11_01_02:1; 36 unsigned ramcfg_hdr;
14 unsigned ramcfg_11_01_04:1; 37 unsigned ramcfg_timing;
15 unsigned ramcfg_11_01_08:1; 38 union {
16 unsigned ramcfg_11_01_10:1; 39 struct {
17 unsigned ramcfg_11_01_20:1; 40 unsigned ramcfg_10_02_01:1;
18 unsigned ramcfg_11_01_40:1; 41 unsigned ramcfg_10_02_02:1;
19 unsigned ramcfg_11_01_80:1; 42 unsigned ramcfg_10_02_04:1;
20 unsigned ramcfg_11_02_03:2; 43 unsigned ramcfg_10_02_08:1;
21 unsigned ramcfg_11_02_04:1; 44 unsigned ramcfg_10_02_10:1;
22 unsigned ramcfg_11_02_08:1; 45 unsigned ramcfg_10_02_20:1;
23 unsigned ramcfg_11_02_10:1; 46 unsigned ramcfg_10_02_40:1;
24 unsigned ramcfg_11_02_40:1; 47 unsigned ramcfg_10_03_0f:4;
25 unsigned ramcfg_11_02_80:1; 48 unsigned ramcfg_10_05:8;
26 unsigned ramcfg_11_03_0f:4; 49 unsigned ramcfg_10_06:8;
27 unsigned ramcfg_11_03_30:2; 50 unsigned ramcfg_10_07:8;
28 unsigned ramcfg_11_03_c0:2; 51 unsigned ramcfg_10_08:8;
29 unsigned ramcfg_11_03_f0:4; 52 unsigned ramcfg_10_09_0f:4;
30 unsigned ramcfg_11_04:8; 53 unsigned ramcfg_10_09_f0:4;
31 unsigned ramcfg_11_06:8; 54 };
32 unsigned ramcfg_11_07_02:1; 55 struct {
33 unsigned ramcfg_11_07_04:1; 56 unsigned ramcfg_11_01_01:1;
34 unsigned ramcfg_11_07_08:1; 57 unsigned ramcfg_11_01_02:1;
35 unsigned ramcfg_11_07_10:1; 58 unsigned ramcfg_11_01_04:1;
36 unsigned ramcfg_11_07_40:1; 59 unsigned ramcfg_11_01_08:1;
37 unsigned ramcfg_11_07_80:1; 60 unsigned ramcfg_11_01_10:1;
38 unsigned ramcfg_11_08_01:1; 61 unsigned ramcfg_11_01_20:1;
39 unsigned ramcfg_11_08_02:1; 62 unsigned ramcfg_11_01_40:1;
40 unsigned ramcfg_11_08_04:1; 63 unsigned ramcfg_11_01_80:1;
41 unsigned ramcfg_11_08_08:1; 64 unsigned ramcfg_11_02_03:2;
42 unsigned ramcfg_11_08_10:1; 65 unsigned ramcfg_11_02_04:1;
43 unsigned ramcfg_11_08_20:1; 66 unsigned ramcfg_11_02_08:1;
44 unsigned ramcfg_11_09:8; 67 unsigned ramcfg_11_02_10:1;
68 unsigned ramcfg_11_02_40:1;
69 unsigned ramcfg_11_02_80:1;
70 unsigned ramcfg_11_03_0f:4;
71 unsigned ramcfg_11_03_30:2;
72 unsigned ramcfg_11_03_c0:2;
73 unsigned ramcfg_11_03_f0:4;
74 unsigned ramcfg_11_04:8;
75 unsigned ramcfg_11_06:8;
76 unsigned ramcfg_11_07_02:1;
77 unsigned ramcfg_11_07_04:1;
78 unsigned ramcfg_11_07_08:1;
79 unsigned ramcfg_11_07_10:1;
80 unsigned ramcfg_11_07_40:1;
81 unsigned ramcfg_11_07_80:1;
82 unsigned ramcfg_11_08_01:1;
83 unsigned ramcfg_11_08_02:1;
84 unsigned ramcfg_11_08_04:1;
85 unsigned ramcfg_11_08_08:1;
86 unsigned ramcfg_11_08_10:1;
87 unsigned ramcfg_11_08_20:1;
88 unsigned ramcfg_11_09:8;
89 };
90 };
45 91
92 unsigned timing_ver;
93 unsigned timing_hdr;
46 unsigned timing[11]; 94 unsigned timing[11];
47 unsigned timing_20_2e_03:2; 95 union {
48 unsigned timing_20_2e_30:2; 96 struct {
49 unsigned timing_20_2e_c0:2; 97 unsigned timing_10_WR:8;
50 unsigned timing_20_2f_03:2; 98 unsigned timing_10_CL:8;
51 unsigned timing_20_2c_003f:6; 99 unsigned timing_10_ODT:3;
52 unsigned timing_20_2c_1fc0:7; 100 unsigned timing_10_CWL:8;
53 unsigned timing_20_30_f8:5; 101 };
54 unsigned timing_20_30_07:3; 102 struct {
55 unsigned timing_20_31_0007:3; 103 unsigned timing_20_2e_03:2;
56 unsigned timing_20_31_0078:4; 104 unsigned timing_20_2e_30:2;
57 unsigned timing_20_31_0780:4; 105 unsigned timing_20_2e_c0:2;
58 unsigned timing_20_31_0800:1; 106 unsigned timing_20_2f_03:2;
59 unsigned timing_20_31_7000:3; 107 unsigned timing_20_2c_003f:6;
60 unsigned timing_20_31_8000:1; 108 unsigned timing_20_2c_1fc0:7;
109 unsigned timing_20_30_f8:5;
110 unsigned timing_20_30_07:3;
111 unsigned timing_20_31_0007:3;
112 unsigned timing_20_31_0078:4;
113 unsigned timing_20_31_0780:4;
114 unsigned timing_20_31_0800:1;
115 unsigned timing_20_31_7000:3;
116 unsigned timing_20_31_8000:1;
117 };
118 };
61}; 119};
62 120
63u8 nvbios_ramcfg_count(struct nouveau_bios *); 121u8 nvbios_ramcfg_count(struct nouveau_bios *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
index 5bdf8e4db40a..47e021d3e20d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
@@ -8,9 +8,10 @@ u32 nvbios_rammapTe(struct nouveau_bios *, u8 *ver, u8 *hdr,
8 8
9u32 nvbios_rammapEe(struct nouveau_bios *, int idx, 9u32 nvbios_rammapEe(struct nouveau_bios *, int idx,
10 u8 *ver, u8 *hdr, u8 *cnt, u8 *len); 10 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
11u32 nvbios_rammapEp(struct nouveau_bios *, int idx,
12 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
13 struct nvbios_ramcfg *);
11u32 nvbios_rammapEm(struct nouveau_bios *, u16 mhz, 14u32 nvbios_rammapEm(struct nouveau_bios *, u16 mhz,
12 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
13u32 nvbios_rammapEp(struct nouveau_bios *, u16 mhz,
14 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, 15 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
15 struct nvbios_ramcfg *); 16 struct nvbios_ramcfg *);
16 17
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
index 8dc5051df55d..295d093f3b30 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
@@ -23,6 +23,12 @@ struct nvbios_therm_sensor {
23 struct nvbios_therm_threshold thrs_shutdown; 23 struct nvbios_therm_threshold thrs_shutdown;
24}; 24};
25 25
26enum nvbios_therm_fan_type {
27 NVBIOS_THERM_FAN_UNK = 0,
28 NVBIOS_THERM_FAN_TOGGLE = 1,
29 NVBIOS_THERM_FAN_PWM = 2,
30};
31
26/* no vbios have more than 6 */ 32/* no vbios have more than 6 */
27#define NOUVEAU_TEMP_FAN_TRIP_MAX 10 33#define NOUVEAU_TEMP_FAN_TRIP_MAX 10
28struct nouveau_therm_trip_point { 34struct nouveau_therm_trip_point {
@@ -38,7 +44,9 @@ enum nvbios_therm_fan_mode {
38}; 44};
39 45
40struct nvbios_therm_fan { 46struct nvbios_therm_fan {
41 u16 pwm_freq; 47 enum nvbios_therm_fan_type type;
48
49 u32 pwm_freq;
42 50
43 u8 min_duty; 51 u8 min_duty;
44 u8 max_duty; 52 u8 max_duty;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index a5ca00dd2f61..36ed035d4d42 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -29,6 +29,7 @@ enum nv_clk_src {
29 nv_clk_src_mdiv, 29 nv_clk_src_mdiv,
30 30
31 nv_clk_src_core, 31 nv_clk_src_core,
32 nv_clk_src_core_intm,
32 nv_clk_src_shader, 33 nv_clk_src_shader,
33 34
34 nv_clk_src_mem, 35 nv_clk_src_mem,
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 871e73914b24..8d0032f15205 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -111,6 +111,7 @@ extern struct nouveau_oclass *gm107_fb_oclass;
111#include <subdev/bios/ramcfg.h> 111#include <subdev/bios/ramcfg.h>
112 112
113struct nouveau_ram_data { 113struct nouveau_ram_data {
114 struct list_head head;
114 struct nvbios_ramcfg bios; 115 struct nvbios_ramcfg bios;
115 u32 freq; 116 u32 freq;
116}; 117};
@@ -136,6 +137,7 @@ struct nouveau_ram {
136 137
137 int ranks; 138 int ranks;
138 int parts; 139 int parts;
140 int part_mask;
139 141
140 int (*get)(struct nouveau_fb *, u64 size, u32 align, 142 int (*get)(struct nouveau_fb *, u64 size, u32 align,
141 u32 size_nc, u32 type, struct nouveau_mem **); 143 u32 size_nc, u32 type, struct nouveau_mem **);
@@ -144,11 +146,6 @@ struct nouveau_ram {
144 int (*calc)(struct nouveau_fb *, u32 freq); 146 int (*calc)(struct nouveau_fb *, u32 freq);
145 int (*prog)(struct nouveau_fb *); 147 int (*prog)(struct nouveau_fb *);
146 void (*tidy)(struct nouveau_fb *); 148 void (*tidy)(struct nouveau_fb *);
147 struct {
148 u8 version;
149 u32 data;
150 u8 size;
151 } rammap, ramcfg, timing;
152 u32 freq; 149 u32 freq;
153 u32 mr[16]; 150 u32 mr[16];
154 u32 mr1_nuts; 151 u32 mr1_nuts;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb/regsnv04.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb/regsnv04.h
new file mode 100644
index 000000000000..0f7fc0c52ab2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb/regsnv04.h
@@ -0,0 +1,21 @@
1#ifndef __NOUVEAU_FB_REGS_04_H__
2#define __NOUVEAU_FB_REGS_04_H__
3
4#define NV04_PFB_BOOT_0 0x00100000
5# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003
6# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000
7# define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB 0x00000001
8# define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB 0x00000002
9# define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB 0x00000003
10# define NV04_PFB_BOOT_0_RAM_WIDTH_128 0x00000004
11# define NV04_PFB_BOOT_0_RAM_TYPE 0x00000028
12# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT 0x00000000
13# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT 0x00000008
14# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK 0x00000010
15# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT 0x00000018
16# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT 0x00000020
17# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16 0x00000028
18# define NV04_PFB_BOOT_0_UMA_ENABLE 0x00000100
19# define NV04_PFB_BOOT_0_UMA_SIZE 0x0000f000
20
21#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fuse.h b/drivers/gpu/drm/nouveau/core/include/subdev/fuse.h
new file mode 100644
index 000000000000..2b1ddb2a9a7d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fuse.h
@@ -0,0 +1,30 @@
1#ifndef __NOUVEAU_FUSE_H__
2#define __NOUVEAU_FUSE_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_fuse {
8 struct nouveau_subdev base;
9};
10
11static inline struct nouveau_fuse *
12nouveau_fuse(void *obj)
13{
14 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FUSE];
15}
16
17#define nouveau_fuse_create(p, e, o, d) \
18 nouveau_fuse_create_((p), (e), (o), sizeof(**d), (void **)d)
19
20int nouveau_fuse_create_(struct nouveau_object *, struct nouveau_object *,
21 struct nouveau_oclass *, int, void **);
22void _nouveau_fuse_dtor(struct nouveau_object *);
23int _nouveau_fuse_init(struct nouveau_object *);
24#define _nouveau_fuse_fini _nouveau_subdev_fini
25
26extern struct nouveau_oclass g80_fuse_oclass;
27extern struct nouveau_oclass gf100_fuse_oclass;
28extern struct nouveau_oclass gm107_fuse_oclass;
29
30#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index b73733d21cc7..f855140dbcb7 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -40,7 +40,7 @@ nouveau_gpio(void *obj)
40 40
41extern struct nouveau_oclass *nv10_gpio_oclass; 41extern struct nouveau_oclass *nv10_gpio_oclass;
42extern struct nouveau_oclass *nv50_gpio_oclass; 42extern struct nouveau_oclass *nv50_gpio_oclass;
43extern struct nouveau_oclass *nv92_gpio_oclass; 43extern struct nouveau_oclass *nv94_gpio_oclass;
44extern struct nouveau_oclass *nvd0_gpio_oclass; 44extern struct nouveau_oclass *nvd0_gpio_oclass;
45extern struct nouveau_oclass *nve0_gpio_oclass; 45extern struct nouveau_oclass *nve0_gpio_oclass;
46 46
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
index f73feec151db..bf3d1f611333 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
@@ -47,5 +47,8 @@ void nouveau_memx_wr32(struct nouveau_memx *, u32 addr, u32 data);
47void nouveau_memx_wait(struct nouveau_memx *, 47void nouveau_memx_wait(struct nouveau_memx *,
48 u32 addr, u32 mask, u32 data, u32 nsec); 48 u32 addr, u32 mask, u32 data, u32 nsec);
49void nouveau_memx_nsec(struct nouveau_memx *, u32 nsec); 49void nouveau_memx_nsec(struct nouveau_memx *, u32 nsec);
50void nouveau_memx_wait_vblank(struct nouveau_memx *);
51void nouveau_memx_block(struct nouveau_memx *);
52void nouveau_memx_unblock(struct nouveau_memx *);
50 53
51#endif 54#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index d4a68179e586..a437597dcafc 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -78,5 +78,6 @@ extern struct nouveau_oclass nv50_therm_oclass;
78extern struct nouveau_oclass nv84_therm_oclass; 78extern struct nouveau_oclass nv84_therm_oclass;
79extern struct nouveau_oclass nva3_therm_oclass; 79extern struct nouveau_oclass nva3_therm_oclass;
80extern struct nouveau_oclass nvd0_therm_oclass; 80extern struct nouveau_oclass nvd0_therm_oclass;
81extern struct nouveau_oclass gm107_therm_oclass;
81 82
82#endif 83#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index 8bcbdf39cfb2..b1adc69efd88 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -38,10 +38,12 @@ struct nouveau_barobj {
38static int 38static int
39nouveau_barobj_ctor(struct nouveau_object *parent, 39nouveau_barobj_ctor(struct nouveau_object *parent,
40 struct nouveau_object *engine, 40 struct nouveau_object *engine,
41 struct nouveau_oclass *oclass, void *mem, u32 size, 41 struct nouveau_oclass *oclass, void *data, u32 size,
42 struct nouveau_object **pobject) 42 struct nouveau_object **pobject)
43{ 43{
44 struct nouveau_device *device = nv_device(parent);
44 struct nouveau_bar *bar = (void *)engine; 45 struct nouveau_bar *bar = (void *)engine;
46 struct nouveau_mem *mem = data;
45 struct nouveau_barobj *barobj; 47 struct nouveau_barobj *barobj;
46 int ret; 48 int ret;
47 49
@@ -54,7 +56,13 @@ nouveau_barobj_ctor(struct nouveau_object *parent,
54 if (ret) 56 if (ret)
55 return ret; 57 return ret;
56 58
57 barobj->iomem = bar->iomem + (u32)barobj->vma.offset; 59 barobj->iomem = ioremap(nv_device_resource_start(device, 3) +
60 (u32)barobj->vma.offset, mem->size << 12);
61 if (!barobj->iomem) {
62 nv_warn(bar, "PRAMIN ioremap failed\n");
63 return -ENOMEM;
64 }
65
58 return 0; 66 return 0;
59} 67}
60 68
@@ -63,8 +71,11 @@ nouveau_barobj_dtor(struct nouveau_object *object)
63{ 71{
64 struct nouveau_bar *bar = (void *)object->engine; 72 struct nouveau_bar *bar = (void *)object->engine;
65 struct nouveau_barobj *barobj = (void *)object; 73 struct nouveau_barobj *barobj = (void *)object;
66 if (barobj->vma.node) 74 if (barobj->vma.node) {
75 if (barobj->iomem)
76 iounmap(barobj->iomem);
67 bar->unmap(bar, &barobj->vma); 77 bar->unmap(bar, &barobj->vma);
78 }
68 nouveau_object_destroy(&barobj->base); 79 nouveau_object_destroy(&barobj->base);
69} 80}
70 81
@@ -99,12 +110,11 @@ nouveau_bar_alloc(struct nouveau_bar *bar, struct nouveau_object *parent,
99 struct nouveau_mem *mem, struct nouveau_object **pobject) 110 struct nouveau_mem *mem, struct nouveau_object **pobject)
100{ 111{
101 struct nouveau_object *engine = nv_object(bar); 112 struct nouveau_object *engine = nv_object(bar);
102 int ret = -ENOMEM; 113 struct nouveau_object *gpuobj;
103 if (bar->iomem) { 114 int ret = nouveau_object_ctor(parent, engine, &nouveau_barobj_oclass,
104 ret = nouveau_object_ctor(parent, engine, 115 mem, 0, &gpuobj);
105 &nouveau_barobj_oclass, 116 if (ret == 0)
106 mem, 0, pobject); 117 *pobject = gpuobj;
107 }
108 return ret; 118 return ret;
109} 119}
110 120
@@ -113,7 +123,6 @@ nouveau_bar_create_(struct nouveau_object *parent,
113 struct nouveau_object *engine, 123 struct nouveau_object *engine,
114 struct nouveau_oclass *oclass, int length, void **pobject) 124 struct nouveau_oclass *oclass, int length, void **pobject)
115{ 125{
116 struct nouveau_device *device = nv_device(parent);
117 struct nouveau_bar *bar; 126 struct nouveau_bar *bar;
118 int ret; 127 int ret;
119 128
@@ -123,21 +132,12 @@ nouveau_bar_create_(struct nouveau_object *parent,
123 if (ret) 132 if (ret)
124 return ret; 133 return ret;
125 134
126 if (nv_device_resource_len(device, 3) != 0) {
127 bar->iomem = ioremap(nv_device_resource_start(device, 3),
128 nv_device_resource_len(device, 3));
129 if (!bar->iomem)
130 nv_warn(bar, "PRAMIN ioremap failed\n");
131 }
132
133 return 0; 135 return 0;
134} 136}
135 137
136void 138void
137nouveau_bar_destroy(struct nouveau_bar *bar) 139nouveau_bar_destroy(struct nouveau_bar *bar)
138{ 140{
139 if (bar->iomem)
140 iounmap(bar->iomem);
141 nouveau_subdev_destroy(&bar->base); 141 nouveau_subdev_destroy(&bar->base);
142} 142}
143 143
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/M0205.c b/drivers/gpu/drm/nouveau/core/subdev/bios/M0205.c
new file mode 100644
index 000000000000..ac9617c5fc2a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/M0205.c
@@ -0,0 +1,136 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/M0205.h>
28
29u32
30nvbios_M0205Te(struct nouveau_bios *bios,
31 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
32{
33 struct bit_entry bit_M;
34 u32 data = 0x00000000;
35
36 if (!bit_entry(bios, 'M', &bit_M)) {
37 if (bit_M.version == 2 && bit_M.length > 0x08)
38 data = nv_ro32(bios, bit_M.offset + 0x05);
39 if (data) {
40 *ver = nv_ro08(bios, data + 0x00);
41 switch (*ver) {
42 case 0x10:
43 *hdr = nv_ro08(bios, data + 0x01);
44 *len = nv_ro08(bios, data + 0x02);
45 *ssz = nv_ro08(bios, data + 0x03);
46 *snr = nv_ro08(bios, data + 0x04);
47 *cnt = nv_ro08(bios, data + 0x05);
48 return data;
49 default:
50 break;
51 }
52 }
53 }
54
55 return 0x00000000;
56}
57
58u32
59nvbios_M0205Tp(struct nouveau_bios *bios,
60 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz,
61 struct nvbios_M0205T *info)
62{
63 u32 data = nvbios_M0205Te(bios, ver, hdr, cnt, len, snr, ssz);
64 memset(info, 0x00, sizeof(*info));
65 switch (!!data * *ver) {
66 case 0x10:
67 info->freq = nv_ro16(bios, data + 0x06);
68 break;
69 default:
70 break;
71 }
72 return data;
73}
74
75u32
76nvbios_M0205Ee(struct nouveau_bios *bios, int idx,
77 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
78{
79 u8 snr, ssz;
80 u32 data = nvbios_M0205Te(bios, ver, hdr, cnt, len, &snr, &ssz);
81 if (data && idx < *cnt) {
82 data = data + *hdr + idx * (*len + (snr * ssz));
83 *hdr = *len;
84 *cnt = snr;
85 *len = ssz;
86 return data;
87 }
88 return 0x00000000;
89}
90
91u32
92nvbios_M0205Ep(struct nouveau_bios *bios, int idx,
93 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
94 struct nvbios_M0205E *info)
95{
96 u32 data = nvbios_M0205Ee(bios, idx, ver, hdr, cnt, len);
97 memset(info, 0x00, sizeof(*info));
98 switch (!!data * *ver) {
99 case 0x10:
100 info->type = nv_ro08(bios, data + 0x00) & 0x0f;
101 return data;
102 default:
103 break;
104 }
105 return 0x00000000;
106}
107
108u32
109nvbios_M0205Se(struct nouveau_bios *bios, int ent, int idx, u8 *ver, u8 *hdr)
110{
111
112 u8 cnt, len;
113 u32 data = nvbios_M0205Ee(bios, ent, ver, hdr, &cnt, &len);
114 if (data && idx < cnt) {
115 data = data + *hdr + idx * len;
116 *hdr = len;
117 return data;
118 }
119 return 0x00000000;
120}
121
122u32
123nvbios_M0205Sp(struct nouveau_bios *bios, int ent, int idx, u8 *ver, u8 *hdr,
124 struct nvbios_M0205S *info)
125{
126 u32 data = nvbios_M0205Se(bios, ent, idx, ver, hdr);
127 memset(info, 0x00, sizeof(*info));
128 switch (!!data * *ver) {
129 case 0x10:
130 info->data = nv_ro08(bios, data + 0x00);
131 return data;
132 default:
133 break;
134 }
135 return 0x00000000;
136}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/M0209.c b/drivers/gpu/drm/nouveau/core/subdev/bios/M0209.c
new file mode 100644
index 000000000000..b142a510e89f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/M0209.c
@@ -0,0 +1,137 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/M0209.h>
28
29u32
30nvbios_M0209Te(struct nouveau_bios *bios,
31 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
32{
33 struct bit_entry bit_M;
34 u32 data = 0x00000000;
35
36 if (!bit_entry(bios, 'M', &bit_M)) {
37 if (bit_M.version == 2 && bit_M.length > 0x0c)
38 data = nv_ro32(bios, bit_M.offset + 0x09);
39 if (data) {
40 *ver = nv_ro08(bios, data + 0x00);
41 switch (*ver) {
42 case 0x10:
43 *hdr = nv_ro08(bios, data + 0x01);
44 *len = nv_ro08(bios, data + 0x02);
45 *ssz = nv_ro08(bios, data + 0x03);
46 *snr = 1;
47 *cnt = nv_ro08(bios, data + 0x04);
48 return data;
49 default:
50 break;
51 }
52 }
53 }
54
55 return 0x00000000;
56}
57
58u32
59nvbios_M0209Ee(struct nouveau_bios *bios, int idx,
60 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
61{
62 u8 snr, ssz;
63 u32 data = nvbios_M0209Te(bios, ver, hdr, cnt, len, &snr, &ssz);
64 if (data && idx < *cnt) {
65 data = data + *hdr + idx * (*len + (snr * ssz));
66 *hdr = *len;
67 *cnt = snr;
68 *len = ssz;
69 return data;
70 }
71 return 0x00000000;
72}
73
74u32
75nvbios_M0209Ep(struct nouveau_bios *bios, int idx,
76 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
77 struct nvbios_M0209E *info)
78{
79 u32 data = nvbios_M0209Ee(bios, idx, ver, hdr, cnt, len);
80 memset(info, 0x00, sizeof(*info));
81 switch (!!data * *ver) {
82 case 0x10:
83 info->v00_40 = (nv_ro08(bios, data + 0x00) & 0x40) >> 6;
84 info->bits = nv_ro08(bios, data + 0x00) & 0x3f;
85 info->modulo = nv_ro08(bios, data + 0x01);
86 info->v02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
87 info->v02_07 = nv_ro08(bios, data + 0x02) & 0x07;
88 info->v03 = nv_ro08(bios, data + 0x03);
89 return data;
90 default:
91 break;
92 }
93 return 0x00000000;
94}
95
96u32
97nvbios_M0209Se(struct nouveau_bios *bios, int ent, int idx, u8 *ver, u8 *hdr)
98{
99
100 u8 cnt, len;
101 u32 data = nvbios_M0209Ee(bios, ent, ver, hdr, &cnt, &len);
102 if (data && idx < cnt) {
103 data = data + *hdr + idx * len;
104 *hdr = len;
105 return data;
106 }
107 return 0x00000000;
108}
109
110u32
111nvbios_M0209Sp(struct nouveau_bios *bios, int ent, int idx, u8 *ver, u8 *hdr,
112 struct nvbios_M0209S *info)
113{
114 struct nvbios_M0209E M0209E;
115 u8 cnt, len;
116 u32 data = nvbios_M0209Ep(bios, ent, ver, hdr, &cnt, &len, &M0209E);
117 if (data) {
118 u32 i, data = nvbios_M0209Se(bios, ent, idx, ver, hdr);
119 memset(info, 0x00, sizeof(*info));
120 switch (!!data * *ver) {
121 case 0x10:
122 for (i = 0; i < ARRAY_SIZE(info->data); i++) {
123 u32 bits = (i % M0209E.modulo) * M0209E.bits;
124 u32 mask = (1ULL << M0209E.bits) - 1;
125 u16 off = bits / 8;
126 u8 mod = bits % 8;
127 info->data[i] = nv_ro32(bios, data + off);
128 info->data[i] = info->data[i] >> mod;
129 info->data[i] = info->data[i] & mask;
130 }
131 return data;
132 default:
133 break;
134 }
135 }
136 return 0x00000000;
137}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index 88606bfaf847..bd8d348385b3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -124,6 +124,7 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
124 struct dcb_output *outp) 124 struct dcb_output *outp)
125{ 125{
126 u16 dcb = dcb_outp(bios, idx, ver, len); 126 u16 dcb = dcb_outp(bios, idx, ver, len);
127 memset(outp, 0x00, sizeof(*outp));
127 if (dcb) { 128 if (dcb) {
128 if (*ver >= 0x20) { 129 if (*ver >= 0x20) {
129 u32 conn = nv_ro32(bios, dcb + 0x00); 130 u32 conn = nv_ro32(bios, dcb + 0x00);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/core/subdev/bios/fan.c
new file mode 100644
index 000000000000..e419892240f5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/fan.c
@@ -0,0 +1,93 @@
1/*
2 * Copyright 2014 Martin Peres
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/fan.h>
28
29u16
30nvbios_fan_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
31{
32 struct bit_entry bit_P;
33 u16 fan = 0x0000;
34
35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version == 2 && bit_P.length >= 0x5a)
37 fan = nv_ro16(bios, bit_P.offset + 0x58);
38
39 if (fan) {
40 *ver = nv_ro08(bios, fan + 0);
41 switch (*ver) {
42 case 0x10:
43 *hdr = nv_ro08(bios, fan + 1);
44 *len = nv_ro08(bios, fan + 2);
45 *cnt = nv_ro08(bios, fan + 3);
46 return fan;
47 default:
48 break;
49 }
50 }
51 }
52
53 return 0x0000;
54}
55
56u16
57nvbios_fan_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
58 u8 *cnt, u8 *len)
59{
60 u16 data = nvbios_fan_table(bios, ver, hdr, cnt, len);
61 if (data && idx < *cnt)
62 return data + *hdr + (idx * (*len));
63 return 0x0000;
64}
65
66u16
67nvbios_fan_parse(struct nouveau_bios *bios, struct nvbios_therm_fan *fan)
68{
69 u8 ver, hdr, cnt, len;
70
71 u16 data = nvbios_fan_entry(bios, 0, &ver, &hdr, &cnt, &len);
72 if (data) {
73 u8 type = nv_ro08(bios, data + 0x00);
74 switch (type) {
75 case 0:
76 fan->type = NVBIOS_THERM_FAN_TOGGLE;
77 break;
78 case 1:
79 case 2:
80 /* TODO: Understand the difference between the two! */
81 fan->type = NVBIOS_THERM_FAN_PWM;
82 break;
83 default:
84 fan->type = NVBIOS_THERM_FAN_UNK;
85 }
86
87 fan->min_duty = nv_ro08(bios, data + 0x02);
88 fan->max_duty = nv_ro08(bios, data + 0x03);
89
90 fan->pwm_freq = nv_ro32(bios, data + 0x0b) & 0xffffff;
91 }
92 return data;
93}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
index 1811b2cb0472..585e69331ccc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
@@ -75,31 +75,39 @@ nvbios_rammapEe(struct nouveau_bios *bios, int idx,
75} 75}
76 76
77u32 77u32
78nvbios_rammapEm(struct nouveau_bios *bios, u16 khz, 78nvbios_rammapEp(struct nouveau_bios *bios, int idx,
79 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
80{
81 int idx = 0;
82 u32 data;
83 while ((data = nvbios_rammapEe(bios, idx++, ver, hdr, cnt, len))) {
84 if (khz >= nv_ro16(bios, data + 0x00) &&
85 khz <= nv_ro16(bios, data + 0x02))
86 break;
87 }
88 return data;
89}
90
91u32
92nvbios_rammapEp(struct nouveau_bios *bios, u16 khz,
93 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, 79 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
94 struct nvbios_ramcfg *p) 80 struct nvbios_ramcfg *p)
95{ 81{
96 u32 data = nvbios_rammapEm(bios, khz, ver, hdr, cnt, len); 82 u32 data = nvbios_rammapEe(bios, idx, ver, hdr, cnt, len), temp;
97 memset(p, 0x00, sizeof(*p)); 83 memset(p, 0x00, sizeof(*p));
84 p->rammap_ver = *ver;
85 p->rammap_hdr = *hdr;
98 switch (!!data * *ver) { 86 switch (!!data * *ver) {
87 case 0x10:
88 p->rammap_min = nv_ro16(bios, data + 0x00);
89 p->rammap_max = nv_ro16(bios, data + 0x02);
90 p->rammap_10_04_02 = (nv_ro08(bios, data + 0x04) & 0x02) >> 1;
91 p->rammap_10_04_08 = (nv_ro08(bios, data + 0x04) & 0x08) >> 3;
92 break;
99 case 0x11: 93 case 0x11:
94 p->rammap_min = nv_ro16(bios, data + 0x00);
95 p->rammap_max = nv_ro16(bios, data + 0x02);
100 p->rammap_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0; 96 p->rammap_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
101 p->rammap_11_08_0c = (nv_ro08(bios, data + 0x08) & 0x0c) >> 2; 97 p->rammap_11_08_0c = (nv_ro08(bios, data + 0x08) & 0x0c) >> 2;
102 p->rammap_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4; 98 p->rammap_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
99 temp = nv_ro32(bios, data + 0x09);
100 p->rammap_11_09_01ff = (temp & 0x000001ff) >> 0;
101 p->rammap_11_0a_03fe = (temp & 0x0003fe00) >> 9;
102 p->rammap_11_0a_0400 = (temp & 0x00040000) >> 18;
103 p->rammap_11_0a_0800 = (temp & 0x00080000) >> 19;
104 p->rammap_11_0b_01f0 = (temp & 0x01f00000) >> 20;
105 p->rammap_11_0b_0200 = (temp & 0x02000000) >> 25;
106 p->rammap_11_0b_0400 = (temp & 0x04000000) >> 26;
107 p->rammap_11_0b_0800 = (temp & 0x08000000) >> 27;
108 p->rammap_11_0d = nv_ro08(bios, data + 0x0d);
109 p->rammap_11_0e = nv_ro08(bios, data + 0x0e);
110 p->rammap_11_0f = nv_ro08(bios, data + 0x0f);
103 p->rammap_11_11_0c = (nv_ro08(bios, data + 0x11) & 0x0c) >> 2; 111 p->rammap_11_11_0c = (nv_ro08(bios, data + 0x11) & 0x0c) >> 2;
104 break; 112 break;
105 default: 113 default:
@@ -110,6 +118,20 @@ nvbios_rammapEp(struct nouveau_bios *bios, u16 khz,
110} 118}
111 119
112u32 120u32
121nvbios_rammapEm(struct nouveau_bios *bios, u16 mhz,
122 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
123 struct nvbios_ramcfg *info)
124{
125 int idx = 0;
126 u32 data;
127 while ((data = nvbios_rammapEp(bios, idx++, ver, hdr, cnt, len, info))) {
128 if (mhz >= info->rammap_min && mhz <= info->rammap_max)
129 break;
130 }
131 return data;
132}
133
134u32
113nvbios_rammapSe(struct nouveau_bios *bios, u32 data, 135nvbios_rammapSe(struct nouveau_bios *bios, u32 data,
114 u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx, 136 u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
115 u8 *ver, u8 *hdr) 137 u8 *ver, u8 *hdr)
@@ -129,8 +151,28 @@ nvbios_rammapSp(struct nouveau_bios *bios, u32 data,
129 u8 *ver, u8 *hdr, struct nvbios_ramcfg *p) 151 u8 *ver, u8 *hdr, struct nvbios_ramcfg *p)
130{ 152{
131 data = nvbios_rammapSe(bios, data, ever, ehdr, ecnt, elen, idx, ver, hdr); 153 data = nvbios_rammapSe(bios, data, ever, ehdr, ecnt, elen, idx, ver, hdr);
154 p->ramcfg_ver = *ver;
155 p->ramcfg_hdr = *hdr;
132 switch (!!data * *ver) { 156 switch (!!data * *ver) {
157 case 0x10:
158 p->ramcfg_timing = nv_ro08(bios, data + 0x01);
159 p->ramcfg_10_02_01 = (nv_ro08(bios, data + 0x02) & 0x01) >> 0;
160 p->ramcfg_10_02_02 = (nv_ro08(bios, data + 0x02) & 0x02) >> 1;
161 p->ramcfg_10_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2;
162 p->ramcfg_10_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
163 p->ramcfg_10_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
164 p->ramcfg_10_02_20 = (nv_ro08(bios, data + 0x02) & 0x20) >> 5;
165 p->ramcfg_10_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
166 p->ramcfg_10_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
167 p->ramcfg_10_05 = (nv_ro08(bios, data + 0x05) & 0xff) >> 0;
168 p->ramcfg_10_06 = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
169 p->ramcfg_10_07 = (nv_ro08(bios, data + 0x07) & 0xff) >> 0;
170 p->ramcfg_10_08 = (nv_ro08(bios, data + 0x08) & 0xff) >> 0;
171 p->ramcfg_10_09_0f = (nv_ro08(bios, data + 0x09) & 0x0f) >> 0;
172 p->ramcfg_10_09_f0 = (nv_ro08(bios, data + 0x09) & 0xf0) >> 4;
173 break;
133 case 0x11: 174 case 0x11:
175 p->ramcfg_timing = nv_ro08(bios, data + 0x00);
134 p->ramcfg_11_01_01 = (nv_ro08(bios, data + 0x01) & 0x01) >> 0; 176 p->ramcfg_11_01_01 = (nv_ro08(bios, data + 0x01) & 0x01) >> 0;
135 p->ramcfg_11_01_02 = (nv_ro08(bios, data + 0x01) & 0x02) >> 1; 177 p->ramcfg_11_01_02 = (nv_ro08(bios, data + 0x01) & 0x02) >> 1;
136 p->ramcfg_11_01_04 = (nv_ro08(bios, data + 0x01) & 0x04) >> 2; 178 p->ramcfg_11_01_04 = (nv_ro08(bios, data + 0x01) & 0x04) >> 2;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
index 350d44ab2ba2..46d955eb51eb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
@@ -89,7 +89,15 @@ nvbios_timingEp(struct nouveau_bios *bios, int idx,
89 struct nvbios_ramcfg *p) 89 struct nvbios_ramcfg *p)
90{ 90{
91 u16 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp; 91 u16 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp;
92 p->timing_ver = *ver;
93 p->timing_hdr = *hdr;
92 switch (!!data * *ver) { 94 switch (!!data * *ver) {
95 case 0x10:
96 p->timing_10_WR = nv_ro08(bios, data + 0x00);
97 p->timing_10_CL = nv_ro08(bios, data + 0x02);
98 p->timing_10_ODT = nv_ro08(bios, data + 0x0e) & 0x07;
99 p->timing_10_CWL = nv_ro08(bios, data + 0x13);
100 break;
93 case 0x20: 101 case 0x20:
94 p->timing[0] = nv_ro32(bios, data + 0x00); 102 p->timing[0] = nv_ro32(bios, data + 0x00);
95 p->timing[1] = nv_ro32(bios, data + 0x04); 103 p->timing[1] = nv_ro32(bios, data + 0x04);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
index a276a711294a..e51b72d47129 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
@@ -573,7 +573,7 @@ nouveau_clock_create_(struct nouveau_object *parent,
573 573
574 clk->allow_reclock = allow_reclock; 574 clk->allow_reclock = allow_reclock;
575 575
576 ret = nvkm_notify_init(&device->event, nouveau_clock_pwrsrc, true, 576 ret = nvkm_notify_init(NULL, &device->event, nouveau_clock_pwrsrc, true,
577 NULL, 0, 0, &clk->pwrsrc_ntfy); 577 NULL, 0, 0, &clk->pwrsrc_ntfy);
578 if (ret) 578 if (ret)
579 return ret; 579 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index 087012b18956..094551d8ad9b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -20,8 +20,10 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 * Roy Spliet
23 */ 24 */
24 25
26#include <engine/fifo.h>
25#include <subdev/bios.h> 27#include <subdev/bios.h>
26#include <subdev/bios/pll.h> 28#include <subdev/bios/pll.h>
27#include <subdev/timer.h> 29#include <subdev/timer.h>
@@ -42,9 +44,17 @@ static u32
42read_vco(struct nva3_clock_priv *priv, int clk) 44read_vco(struct nva3_clock_priv *priv, int clk)
43{ 45{
44 u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4)); 46 u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
45 if ((sctl & 0x00000030) != 0x00000030) 47
48 switch (sctl & 0x00000030) {
49 case 0x00000000:
50 return nv_device(priv)->crystal;
51 case 0x00000020:
46 return read_pll(priv, 0x41, 0x00e820); 52 return read_pll(priv, 0x41, 0x00e820);
47 return read_pll(priv, 0x42, 0x00e8a0); 53 case 0x00000030:
54 return read_pll(priv, 0x42, 0x00e8a0);
55 default:
56 return 0;
57 }
48} 58}
49 59
50static u32 60static u32
@@ -66,14 +76,25 @@ read_clk(struct nva3_clock_priv *priv, int clk, bool ignore_en)
66 if (!ignore_en && !(sctl & 0x00000100)) 76 if (!ignore_en && !(sctl & 0x00000100))
67 return 0; 77 return 0;
68 78
79 /* out_alt */
80 if (sctl & 0x00000400)
81 return 108000;
82
83 /* vco_out */
69 switch (sctl & 0x00003000) { 84 switch (sctl & 0x00003000) {
70 case 0x00000000: 85 case 0x00000000:
71 return nv_device(priv)->crystal; 86 if (!(sctl & 0x00000200))
87 return nv_device(priv)->crystal;
88 return 0;
72 case 0x00002000: 89 case 0x00002000:
73 if (sctl & 0x00000040) 90 if (sctl & 0x00000040)
74 return 108000; 91 return 108000;
75 return 100000; 92 return 100000;
76 case 0x00003000: 93 case 0x00003000:
94 /* vco_enable */
95 if (!(sctl & 0x00000001))
96 return 0;
97
77 sclk = read_vco(priv, clk); 98 sclk = read_vco(priv, clk);
78 sdiv = ((sctl & 0x003f0000) >> 16) + 2; 99 sdiv = ((sctl & 0x003f0000) >> 16) + 2;
79 return (sclk * 2) / sdiv; 100 return (sclk * 2) / sdiv;
@@ -95,7 +116,9 @@ read_pll(struct nva3_clock_priv *priv, int clk, u32 pll)
95 N = (coef & 0x0000ff00) >> 8; 116 N = (coef & 0x0000ff00) >> 8;
96 P = (coef & 0x003f0000) >> 16; 117 P = (coef & 0x003f0000) >> 16;
97 118
98 /* no post-divider on these.. */ 119 /* no post-divider on these..
120 * XXX: it looks more like two post-"dividers" that
121 * cross each other out in the default RPLL config */
99 if ((pll & 0x00ff00) == 0x00e800) 122 if ((pll & 0x00ff00) == 0x00e800)
100 P = 1; 123 P = 1;
101 124
@@ -114,13 +137,13 @@ static int
114nva3_clock_read(struct nouveau_clock *clk, enum nv_clk_src src) 137nva3_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
115{ 138{
116 struct nva3_clock_priv *priv = (void *)clk; 139 struct nva3_clock_priv *priv = (void *)clk;
140 u32 hsrc;
117 141
118 switch (src) { 142 switch (src) {
119 case nv_clk_src_crystal: 143 case nv_clk_src_crystal:
120 return nv_device(priv)->crystal; 144 return nv_device(priv)->crystal;
121 case nv_clk_src_href:
122 return 100000;
123 case nv_clk_src_core: 145 case nv_clk_src_core:
146 case nv_clk_src_core_intm:
124 return read_pll(priv, 0x00, 0x4200); 147 return read_pll(priv, 0x00, 0x4200);
125 case nv_clk_src_shader: 148 case nv_clk_src_shader:
126 return read_pll(priv, 0x01, 0x4220); 149 return read_pll(priv, 0x01, 0x4220);
@@ -132,24 +155,33 @@ nva3_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
132 return read_clk(priv, 0x21, false); 155 return read_clk(priv, 0x21, false);
133 case nv_clk_src_daemon: 156 case nv_clk_src_daemon:
134 return read_clk(priv, 0x25, false); 157 return read_clk(priv, 0x25, false);
158 case nv_clk_src_host:
159 hsrc = (nv_rd32(priv, 0xc040) & 0x30000000) >> 28;
160 switch (hsrc) {
161 case 0:
162 return read_clk(priv, 0x1d, false);
163 case 2:
164 case 3:
165 return 277000;
166 default:
167 nv_error(clk, "unknown HOST clock source %d\n", hsrc);
168 return -EINVAL;
169 }
135 default: 170 default:
136 nv_error(clk, "invalid clock source %d\n", src); 171 nv_error(clk, "invalid clock source %d\n", src);
137 return -EINVAL; 172 return -EINVAL;
138 } 173 }
174
175 return 0;
139} 176}
140 177
141int 178int
142nva3_clock_info(struct nouveau_clock *clock, int clk, u32 pll, u32 khz, 179nva3_clk_info(struct nouveau_clock *clock, int clk, u32 khz,
143 struct nva3_clock_info *info) 180 struct nva3_clock_info *info)
144{ 181{
145 struct nouveau_bios *bios = nouveau_bios(clock);
146 struct nva3_clock_priv *priv = (void *)clock; 182 struct nva3_clock_priv *priv = (void *)clock;
147 struct nvbios_pll limits; 183 u32 oclk, sclk, sdiv, diff;
148 u32 oclk, sclk, sdiv;
149 int P, N, M, diff;
150 int ret;
151 184
152 info->pll = 0;
153 info->clk = 0; 185 info->clk = 0;
154 186
155 switch (khz) { 187 switch (khz) {
@@ -164,43 +196,69 @@ nva3_clock_info(struct nouveau_clock *clock, int clk, u32 pll, u32 khz,
164 return khz; 196 return khz;
165 default: 197 default:
166 sclk = read_vco(priv, clk); 198 sclk = read_vco(priv, clk);
167 sdiv = min((sclk * 2) / (khz - 2999), (u32)65); 199 sdiv = min((sclk * 2) / khz, (u32)65);
168 /* if the clock has a PLL attached, and we can get a within 200 oclk = (sclk * 2) / sdiv;
169 * [-2, 3) MHz of a divider, we'll disable the PLL and use 201 diff = ((khz + 3000) - oclk);
170 * the divider instead. 202
171 * 203 /* When imprecise, play it safe and aim for a clock lower than
172 * divider can go as low as 2, limited here because NVIDIA 204 * desired rather than higher */
205 if (diff < 0) {
206 sdiv++;
207 oclk = (sclk * 2) / sdiv;
208 }
209
210 /* divider can go as low as 2, limited here because NVIDIA
173 * and the VBIOS on my NVA8 seem to prefer using the PLL 211 * and the VBIOS on my NVA8 seem to prefer using the PLL
174 * for 810MHz - is there a good reason? 212 * for 810MHz - is there a good reason?
175 */ 213 * XXX: PLLs with refclk 810MHz? */
176 if (sdiv > 4) { 214 if (sdiv > 4) {
177 oclk = (sclk * 2) / sdiv; 215 info->clk = (((sdiv - 2) << 16) | 0x00003100);
178 diff = khz - oclk; 216 return oclk;
179 if (!pll || (diff >= -2000 && diff < 3000)) {
180 info->clk = (((sdiv - 2) << 16) | 0x00003100);
181 return oclk;
182 }
183 } 217 }
184 218
185 if (!pll)
186 return -ERANGE;
187 break; 219 break;
188 } 220 }
189 221
222 return -ERANGE;
223}
224
225int
226nva3_pll_info(struct nouveau_clock *clock, int clk, u32 pll, u32 khz,
227 struct nva3_clock_info *info)
228{
229 struct nouveau_bios *bios = nouveau_bios(clock);
230 struct nva3_clock_priv *priv = (void *)clock;
231 struct nvbios_pll limits;
232 int P, N, M, diff;
233 int ret;
234
235 info->pll = 0;
236
237 /* If we can get a within [-2, 3) MHz of a divider, we'll disable the
238 * PLL and use the divider instead. */
239 ret = nva3_clk_info(clock, clk, khz, info);
240 diff = khz - ret;
241 if (!pll || (diff >= -2000 && diff < 3000)) {
242 goto out;
243 }
244
245 /* Try with PLL */
190 ret = nvbios_pll_parse(bios, pll, &limits); 246 ret = nvbios_pll_parse(bios, pll, &limits);
191 if (ret) 247 if (ret)
192 return ret; 248 return ret;
193 249
194 limits.refclk = read_clk(priv, clk - 0x10, true); 250 ret = nva3_clk_info(clock, clk - 0x10, limits.refclk, info);
195 if (!limits.refclk) 251 if (ret != limits.refclk)
196 return -EINVAL; 252 return -EINVAL;
197 253
198 ret = nva3_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P); 254 ret = nva3_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P);
199 if (ret >= 0) { 255 if (ret >= 0) {
200 info->clk = nv_rd32(priv, 0x4120 + (clk * 4));
201 info->pll = (P << 16) | (N << 8) | M; 256 info->pll = (P << 16) | (N << 8) | M;
202 } 257 }
203 258
259out:
260 info->fb_delay = max(((khz + 7566) / 15133), (u32) 18);
261
204 return ret ? ret : -ERANGE; 262 return ret ? ret : -ERANGE;
205} 263}
206 264
@@ -208,13 +266,76 @@ static int
208calc_clk(struct nva3_clock_priv *priv, struct nouveau_cstate *cstate, 266calc_clk(struct nva3_clock_priv *priv, struct nouveau_cstate *cstate,
209 int clk, u32 pll, int idx) 267 int clk, u32 pll, int idx)
210{ 268{
211 int ret = nva3_clock_info(&priv->base, clk, pll, cstate->domain[idx], 269 int ret = nva3_pll_info(&priv->base, clk, pll, cstate->domain[idx],
212 &priv->eng[idx]); 270 &priv->eng[idx]);
213 if (ret >= 0) 271 if (ret >= 0)
214 return 0; 272 return 0;
215 return ret; 273 return ret;
216} 274}
217 275
276static int
277calc_host(struct nva3_clock_priv *priv, struct nouveau_cstate *cstate)
278{
279 int ret = 0;
280 u32 kHz = cstate->domain[nv_clk_src_host];
281 struct nva3_clock_info *info = &priv->eng[nv_clk_src_host];
282
283 if (kHz == 277000) {
284 info->clk = 0;
285 info->host_out = NVA3_HOST_277;
286 return 0;
287 }
288
289 info->host_out = NVA3_HOST_CLK;
290
291 ret = nva3_clk_info(&priv->base, 0x1d, kHz, info);
292 if (ret >= 0)
293 return 0;
294 return ret;
295}
296
297int
298nva3_clock_pre(struct nouveau_clock *clk, unsigned long *flags)
299{
300 struct nouveau_fifo *pfifo = nouveau_fifo(clk);
301
302 /* halt and idle execution engines */
303 nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
304 nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
305 /* Wait until the interrupt handler is finished */
306 if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
307 return -EBUSY;
308
309 if (pfifo)
310 pfifo->pause(pfifo, flags);
311
312 if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
313 return -EIO;
314 if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
315 return -EIO;
316
317 return 0;
318}
319
320void
321nva3_clock_post(struct nouveau_clock *clk, unsigned long *flags)
322{
323 struct nouveau_fifo *pfifo = nouveau_fifo(clk);
324
325 if (pfifo && flags)
326 pfifo->start(pfifo, flags);
327
328 nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
329 nv_mask(clk, 0x020060, 0x00070000, 0x00040000);
330}
331
332static void
333disable_clk_src(struct nva3_clock_priv *priv, u32 src)
334{
335 nv_mask(priv, src, 0x00000100, 0x00000000);
336 nv_mask(priv, src, 0x00000001, 0x00000000);
337}
338
218static void 339static void
219prog_pll(struct nva3_clock_priv *priv, int clk, u32 pll, int idx) 340prog_pll(struct nva3_clock_priv *priv, int clk, u32 pll, int idx)
220{ 341{
@@ -223,24 +344,35 @@ prog_pll(struct nva3_clock_priv *priv, int clk, u32 pll, int idx)
223 const u32 src1 = 0x004160 + (clk * 4); 344 const u32 src1 = 0x004160 + (clk * 4);
224 const u32 ctrl = pll + 0; 345 const u32 ctrl = pll + 0;
225 const u32 coef = pll + 4; 346 const u32 coef = pll + 4;
347 u32 bypass;
226 348
227 if (info->pll) { 349 if (info->pll) {
228 nv_mask(priv, src0, 0x00000101, 0x00000101); 350 /* Always start from a non-PLL clock */
351 bypass = nv_rd32(priv, ctrl) & 0x00000008;
352 if (!bypass) {
353 nv_mask(priv, src1, 0x00000101, 0x00000101);
354 nv_mask(priv, ctrl, 0x00000008, 0x00000008);
355 udelay(20);
356 }
357
358 nv_mask(priv, src0, 0x003f3141, 0x00000101 | info->clk);
229 nv_wr32(priv, coef, info->pll); 359 nv_wr32(priv, coef, info->pll);
230 nv_mask(priv, ctrl, 0x00000015, 0x00000015); 360 nv_mask(priv, ctrl, 0x00000015, 0x00000015);
231 nv_mask(priv, ctrl, 0x00000010, 0x00000000); 361 nv_mask(priv, ctrl, 0x00000010, 0x00000000);
232 nv_wait(priv, ctrl, 0x00020000, 0x00020000); 362 if (!nv_wait(priv, ctrl, 0x00020000, 0x00020000)) {
363 nv_mask(priv, ctrl, 0x00000010, 0x00000010);
364 nv_mask(priv, src0, 0x00000101, 0x00000000);
365 return;
366 }
233 nv_mask(priv, ctrl, 0x00000010, 0x00000010); 367 nv_mask(priv, ctrl, 0x00000010, 0x00000010);
234 nv_mask(priv, ctrl, 0x00000008, 0x00000000); 368 nv_mask(priv, ctrl, 0x00000008, 0x00000000);
235 nv_mask(priv, src1, 0x00000100, 0x00000000); 369 disable_clk_src(priv, src1);
236 nv_mask(priv, src1, 0x00000001, 0x00000000);
237 } else { 370 } else {
238 nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk); 371 nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk);
239 nv_mask(priv, ctrl, 0x00000018, 0x00000018); 372 nv_mask(priv, ctrl, 0x00000018, 0x00000018);
240 udelay(20); 373 udelay(20);
241 nv_mask(priv, ctrl, 0x00000001, 0x00000000); 374 nv_mask(priv, ctrl, 0x00000001, 0x00000000);
242 nv_mask(priv, src0, 0x00000100, 0x00000000); 375 disable_clk_src(priv, src0);
243 nv_mask(priv, src0, 0x00000001, 0x00000000);
244 } 376 }
245} 377}
246 378
@@ -251,18 +383,72 @@ prog_clk(struct nva3_clock_priv *priv, int clk, int idx)
251 nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk); 383 nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk);
252} 384}
253 385
386static void
387prog_host(struct nva3_clock_priv *priv)
388{
389 struct nva3_clock_info *info = &priv->eng[nv_clk_src_host];
390 u32 hsrc = (nv_rd32(priv, 0xc040));
391
392 switch (info->host_out) {
393 case NVA3_HOST_277:
394 if ((hsrc & 0x30000000) == 0) {
395 nv_wr32(priv, 0xc040, hsrc | 0x20000000);
396 disable_clk_src(priv, 0x4194);
397 }
398 break;
399 case NVA3_HOST_CLK:
400 prog_clk(priv, 0x1d, nv_clk_src_host);
401 if ((hsrc & 0x30000000) >= 0x20000000) {
402 nv_wr32(priv, 0xc040, hsrc & ~0x30000000);
403 }
404 break;
405 default:
406 break;
407 }
408
409 /* This seems to be a clock gating factor on idle, always set to 64 */
410 nv_wr32(priv, 0xc044, 0x3e);
411}
412
413static void
414prog_core(struct nva3_clock_priv *priv, int idx)
415{
416 struct nva3_clock_info *info = &priv->eng[idx];
417 u32 fb_delay = nv_rd32(priv, 0x10002c);
418
419 if (fb_delay < info->fb_delay)
420 nv_wr32(priv, 0x10002c, info->fb_delay);
421
422 prog_pll(priv, 0x00, 0x004200, idx);
423
424 if (fb_delay > info->fb_delay)
425 nv_wr32(priv, 0x10002c, info->fb_delay);
426}
427
254static int 428static int
255nva3_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate) 429nva3_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
256{ 430{
257 struct nva3_clock_priv *priv = (void *)clk; 431 struct nva3_clock_priv *priv = (void *)clk;
432 struct nva3_clock_info *core = &priv->eng[nv_clk_src_core];
258 int ret; 433 int ret;
259 434
260 if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) || 435 if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
261 (ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) || 436 (ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
262 (ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) || 437 (ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
263 (ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec))) 438 (ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
439 (ret = calc_host(priv, cstate)))
264 return ret; 440 return ret;
265 441
442 /* XXX: Should be reading the highest bit in the VBIOS clock to decide
443 * whether to use a PLL or not... but using a PLL defeats the purpose */
444 if (core->pll) {
445 ret = nva3_clk_info(clk, 0x10,
446 cstate->domain[nv_clk_src_core_intm],
447 &priv->eng[nv_clk_src_core_intm]);
448 if (ret < 0)
449 return ret;
450 }
451
266 return 0; 452 return 0;
267} 453}
268 454
@@ -270,11 +456,31 @@ static int
270nva3_clock_prog(struct nouveau_clock *clk) 456nva3_clock_prog(struct nouveau_clock *clk)
271{ 457{
272 struct nva3_clock_priv *priv = (void *)clk; 458 struct nva3_clock_priv *priv = (void *)clk;
273 prog_pll(priv, 0x00, 0x004200, nv_clk_src_core); 459 struct nva3_clock_info *core = &priv->eng[nv_clk_src_core];
460 int ret = 0;
461 unsigned long flags;
462 unsigned long *f = &flags;
463
464 ret = nva3_clock_pre(clk, f);
465 if (ret)
466 goto out;
467
468 if (core->pll)
469 prog_core(priv, nv_clk_src_core_intm);
470
471 prog_core(priv, nv_clk_src_core);
274 prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader); 472 prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader);
275 prog_clk(priv, 0x20, nv_clk_src_disp); 473 prog_clk(priv, 0x20, nv_clk_src_disp);
276 prog_clk(priv, 0x21, nv_clk_src_vdec); 474 prog_clk(priv, 0x21, nv_clk_src_vdec);
277 return 0; 475 prog_host(priv);
476
477out:
478 if (ret == -EBUSY)
479 f = NULL;
480
481 nva3_clock_post(clk, f);
482
483 return ret;
278} 484}
279 485
280static void 486static void
@@ -284,13 +490,14 @@ nva3_clock_tidy(struct nouveau_clock *clk)
284 490
285static struct nouveau_clocks 491static struct nouveau_clocks
286nva3_domain[] = { 492nva3_domain[] = {
287 { nv_clk_src_crystal, 0xff }, 493 { nv_clk_src_crystal , 0xff },
288 { nv_clk_src_href , 0xff }, 494 { nv_clk_src_core , 0x00, 0, "core", 1000 },
289 { nv_clk_src_core , 0x00, 0, "core", 1000 }, 495 { nv_clk_src_shader , 0x01, 0, "shader", 1000 },
290 { nv_clk_src_shader , 0x01, 0, "shader", 1000 }, 496 { nv_clk_src_mem , 0x02, 0, "memory", 1000 },
291 { nv_clk_src_mem , 0x02, 0, "memory", 1000 }, 497 { nv_clk_src_vdec , 0x03 },
292 { nv_clk_src_vdec , 0x03 }, 498 { nv_clk_src_disp , 0x04 },
293 { nv_clk_src_disp , 0x04 }, 499 { nv_clk_src_host , 0x05 },
500 { nv_clk_src_core_intm, 0x06 },
294 { nv_clk_src_max } 501 { nv_clk_src_max }
295}; 502};
296 503
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
index 6229a509b42e..a45a1038b12f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
@@ -6,9 +6,15 @@
6struct nva3_clock_info { 6struct nva3_clock_info {
7 u32 clk; 7 u32 clk;
8 u32 pll; 8 u32 pll;
9 enum {
10 NVA3_HOST_277,
11 NVA3_HOST_CLK,
12 } host_out;
13 u32 fb_delay;
9}; 14};
10 15
11int nva3_clock_info(struct nouveau_clock *, int, u32, u32, 16int nva3_pll_info(struct nouveau_clock *, int, u32, u32,
12 struct nva3_clock_info *); 17 struct nva3_clock_info *);
13 18int nva3_clock_pre(struct nouveau_clock *clk, unsigned long *flags);
19void nva3_clock_post(struct nouveau_clock *clk, unsigned long *flags);
14#endif 20#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
index 74e19731b1b7..54aeab8005a0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
@@ -28,6 +28,7 @@
28#include <subdev/timer.h> 28#include <subdev/timer.h>
29#include <subdev/clock.h> 29#include <subdev/clock.h>
30 30
31#include "nva3.h"
31#include "pll.h" 32#include "pll.h"
32 33
33struct nvaa_clock_priv { 34struct nvaa_clock_priv {
@@ -299,25 +300,14 @@ static int
299nvaa_clock_prog(struct nouveau_clock *clk) 300nvaa_clock_prog(struct nouveau_clock *clk)
300{ 301{
301 struct nvaa_clock_priv *priv = (void *)clk; 302 struct nvaa_clock_priv *priv = (void *)clk;
302 struct nouveau_fifo *pfifo = nouveau_fifo(clk); 303 u32 pllmask = 0, mast;
303 unsigned long flags; 304 unsigned long flags;
304 u32 pllmask = 0, mast, ptherm_gate; 305 unsigned long *f = &flags;
305 int ret = -EBUSY; 306 int ret = 0;
306
307 /* halt and idle execution engines */
308 ptherm_gate = nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
309 nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
310 /* Wait until the interrupt handler is finished */
311 if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
312 goto resume;
313
314 if (pfifo)
315 pfifo->pause(pfifo, &flags);
316 307
317 if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010)) 308 ret = nva3_clock_pre(clk, f);
318 goto resume; 309 if (ret)
319 if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f)) 310 goto out;
320 goto resume;
321 311
322 /* First switch to safe clocks: href */ 312 /* First switch to safe clocks: href */
323 mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640); 313 mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
@@ -375,15 +365,8 @@ nvaa_clock_prog(struct nouveau_clock *clk)
375 } 365 }
376 366
377 nv_wr32(clk, 0xc054, mast); 367 nv_wr32(clk, 0xc054, mast);
378 ret = 0;
379 368
380resume: 369resume:
381 if (pfifo)
382 pfifo->start(pfifo, &flags);
383
384 nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
385 nv_wr32(clk, 0x020060, ptherm_gate);
386
387 /* Disable some PLLs and dividers when unused */ 370 /* Disable some PLLs and dividers when unused */
388 if (priv->csrc != nv_clk_src_core) { 371 if (priv->csrc != nv_clk_src_core) {
389 nv_wr32(clk, 0x4040, 0x00000000); 372 nv_wr32(clk, 0x4040, 0x00000000);
@@ -395,6 +378,12 @@ resume:
395 nv_mask(clk, 0x4020, 0x80000000, 0x00000000); 378 nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
396 } 379 }
397 380
381out:
382 if (ret == -EBUSY)
383 f = NULL;
384
385 nva3_clock_post(clk, f);
386
398 return ret; 387 return ret;
399} 388}
400 389
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
index 4fe49cf4c99a..6103484fea72 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
@@ -26,22 +26,8 @@
26 26
27#include <core/device.h> 27#include <core/device.h>
28 28
29#define NV04_PFB_BOOT_0 0x00100000 29#include <subdev/fb/regsnv04.h>
30# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003 30
31# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000
32# define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB 0x00000001
33# define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB 0x00000002
34# define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB 0x00000003
35# define NV04_PFB_BOOT_0_RAM_WIDTH_128 0x00000004
36# define NV04_PFB_BOOT_0_RAM_TYPE 0x00000028
37# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT 0x00000000
38# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT 0x00000008
39# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK 0x00000010
40# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT 0x00000018
41# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT 0x00000020
42# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16 0x00000028
43# define NV04_PFB_BOOT_0_UMA_ENABLE 0x00000100
44# define NV04_PFB_BOOT_0_UMA_SIZE 0x0000f000
45#define NV04_PFB_DEBUG_0 0x00100080 31#define NV04_PFB_DEBUG_0 0x00100080
46# define NV04_PFB_DEBUG_0_PAGE_MODE 0x00000001 32# define NV04_PFB_DEBUG_0_PAGE_MODE 0x00000001
47# define NV04_PFB_DEBUG_0_REFRESH_OFF 0x00000010 33# define NV04_PFB_DEBUG_0_REFRESH_OFF 0x00000010
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
index 66fe959b4f74..7fbbe05d5c60 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
@@ -40,7 +40,7 @@ nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts)
40 int WL, CL, WR, at[2], dt, ds; 40 int WL, CL, WR, at[2], dt, ds;
41 int rq = ram->freq < 1000000; /* XXX */ 41 int rq = ram->freq < 1000000; /* XXX */
42 42
43 switch (ram->ramcfg.version) { 43 switch (ram->next->bios.ramcfg_ver) {
44 case 0x11: 44 case 0x11:
45 pd = ram->next->bios.ramcfg_11_01_80; 45 pd = ram->next->bios.ramcfg_11_01_80;
46 lf = ram->next->bios.ramcfg_11_01_40; 46 lf = ram->next->bios.ramcfg_11_01_40;
@@ -54,7 +54,7 @@ nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts)
54 return -ENOSYS; 54 return -ENOSYS;
55 } 55 }
56 56
57 switch (ram->timing.version) { 57 switch (ram->next->bios.timing_ver) {
58 case 0x20: 58 case 0x20:
59 WL = (ram->next->bios.timing[1] & 0x00000f80) >> 7; 59 WL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
60 CL = (ram->next->bios.timing[1] & 0x0000001f); 60 CL = (ram->next->bios.timing[1] & 0x0000001f);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
index f003c1b1893f..2209ade63339 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -45,7 +45,7 @@ nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
45{ 45{
46 u32 tiles = DIV_ROUND_UP(size, 0x40); 46 u32 tiles = DIV_ROUND_UP(size, 0x40);
47 u32 tags = round_up(tiles / pfb->ram->parts, 0x40); 47 u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
48 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { 48 if (!nouveau_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
49 if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */ 49 if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
50 else tile->zcomp = 0x04000000; /* Z24S8 */ 50 else tile->zcomp = 0x04000000; /* Z24S8 */
51 tile->zcomp |= tile->tag->offset; 51 tile->zcomp |= tile->tag->offset;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
index f34f4223210b..e2a66c355c50 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
@@ -32,7 +32,7 @@ nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
32{ 32{
33 u32 tiles = DIV_ROUND_UP(size, 0x40); 33 u32 tiles = DIV_ROUND_UP(size, 0x40);
34 u32 tags = round_up(tiles / pfb->ram->parts, 0x40); 34 u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
35 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { 35 if (!nouveau_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
36 if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */ 36 if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
37 else tile->zcomp = 0x00200000; /* Z24S8 */ 37 else tile->zcomp = 0x00200000; /* Z24S8 */
38 tile->zcomp |= tile->tag->offset; 38 tile->zcomp |= tile->tag->offset;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
index 69093f7151f0..cbec402ba5b9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -51,7 +51,7 @@ nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
51{ 51{
52 u32 tiles = DIV_ROUND_UP(size, 0x40); 52 u32 tiles = DIV_ROUND_UP(size, 0x40);
53 u32 tags = round_up(tiles / pfb->ram->parts, 0x40); 53 u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
54 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { 54 if (!nouveau_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
55 if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */ 55 if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
56 else tile->zcomp |= 0x02000000; /* Z24S8 */ 56 else tile->zcomp |= 0x02000000; /* Z24S8 */
57 tile->zcomp |= ((tile->tag->offset ) >> 6); 57 tile->zcomp |= ((tile->tag->offset ) >> 6);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
index 161b06e8fc3f..b2cf8c69fb2e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
@@ -32,7 +32,7 @@ nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
32{ 32{
33 u32 tiles = DIV_ROUND_UP(size, 0x40); 33 u32 tiles = DIV_ROUND_UP(size, 0x40);
34 u32 tags = round_up(tiles / pfb->ram->parts, 0x40); 34 u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
35 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { 35 if (!nouveau_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
36 if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */ 36 if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
37 else tile->zcomp |= 0x08000000; /* Z24S8 */ 37 else tile->zcomp |= 0x08000000; /* Z24S8 */
38 tile->zcomp |= ((tile->tag->offset ) >> 6); 38 tile->zcomp |= ((tile->tag->offset ) >> 6);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
index 2dd3d0aab6bb..b4cdae2a3b2f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
@@ -32,7 +32,7 @@ nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
32{ 32{
33 u32 tiles = DIV_ROUND_UP(size, 0x40); 33 u32 tiles = DIV_ROUND_UP(size, 0x40);
34 u32 tags = round_up(tiles / pfb->ram->parts, 0x40); 34 u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
35 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { 35 if (!nouveau_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
36 if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */ 36 if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
37 else tile->zcomp |= 0x20000000; /* Z24S8 */ 37 else tile->zcomp |= 0x20000000; /* Z24S8 */
38 tile->zcomp |= ((tile->tag->offset ) >> 6); 38 tile->zcomp |= ((tile->tag->offset ) >> 6);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
index 95a115ab0c86..52814258c212 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -33,7 +33,7 @@ nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
33 u32 tiles = DIV_ROUND_UP(size, 0x80); 33 u32 tiles = DIV_ROUND_UP(size, 0x80);
34 u32 tags = round_up(tiles / pfb->ram->parts, 0x100); 34 u32 tags = round_up(tiles / pfb->ram->parts, 0x100);
35 if ( (flags & 2) && 35 if ( (flags & 2) &&
36 !nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { 36 !nouveau_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
37 tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */ 37 tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
38 tile->zcomp |= ((tile->tag->offset ) >> 8); 38 tile->zcomp |= ((tile->tag->offset ) >> 8);
39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13; 39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index 82273f832e42..60322e906dd4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -35,6 +35,7 @@ extern struct nouveau_oclass nve0_ram_oclass;
35extern struct nouveau_oclass gk20a_ram_oclass; 35extern struct nouveau_oclass gk20a_ram_oclass;
36extern struct nouveau_oclass gm107_ram_oclass; 36extern struct nouveau_oclass gm107_ram_oclass;
37 37
38int nouveau_sddr2_calc(struct nouveau_ram *ram);
38int nouveau_sddr3_calc(struct nouveau_ram *ram); 39int nouveau_sddr3_calc(struct nouveau_ram *ram);
39int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts); 40int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts);
40 41
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
index 2af9cfd2c60f..d1fbbe4b00a2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -12,16 +12,32 @@ struct ramfuc {
12struct ramfuc_reg { 12struct ramfuc_reg {
13 int sequence; 13 int sequence;
14 bool force; 14 bool force;
15 u32 addr[2]; 15 u32 addr;
16 u32 stride; /* in bytes */
17 u32 mask;
16 u32 data; 18 u32 data;
17}; 19};
18 20
19static inline struct ramfuc_reg 21static inline struct ramfuc_reg
22ramfuc_stride(u32 addr, u32 stride, u32 mask)
23{
24 return (struct ramfuc_reg) {
25 .sequence = 0,
26 .addr = addr,
27 .stride = stride,
28 .mask = mask,
29 .data = 0xdeadbeef,
30 };
31}
32
33static inline struct ramfuc_reg
20ramfuc_reg2(u32 addr1, u32 addr2) 34ramfuc_reg2(u32 addr1, u32 addr2)
21{ 35{
22 return (struct ramfuc_reg) { 36 return (struct ramfuc_reg) {
23 .sequence = 0, 37 .sequence = 0,
24 .addr = { addr1, addr2 }, 38 .addr = addr1,
39 .stride = addr2 - addr1,
40 .mask = 0x3,
25 .data = 0xdeadbeef, 41 .data = 0xdeadbeef,
26 }; 42 };
27} 43}
@@ -29,7 +45,13 @@ ramfuc_reg2(u32 addr1, u32 addr2)
29static noinline struct ramfuc_reg 45static noinline struct ramfuc_reg
30ramfuc_reg(u32 addr) 46ramfuc_reg(u32 addr)
31{ 47{
32 return ramfuc_reg2(addr, addr); 48 return (struct ramfuc_reg) {
49 .sequence = 0,
50 .addr = addr,
51 .stride = 0,
52 .mask = 0x1,
53 .data = 0xdeadbeef,
54 };
33} 55}
34 56
35static inline int 57static inline int
@@ -62,18 +84,25 @@ static inline u32
62ramfuc_rd32(struct ramfuc *ram, struct ramfuc_reg *reg) 84ramfuc_rd32(struct ramfuc *ram, struct ramfuc_reg *reg)
63{ 85{
64 if (reg->sequence != ram->sequence) 86 if (reg->sequence != ram->sequence)
65 reg->data = nv_rd32(ram->pfb, reg->addr[0]); 87 reg->data = nv_rd32(ram->pfb, reg->addr);
66 return reg->data; 88 return reg->data;
67} 89}
68 90
69static inline void 91static inline void
70ramfuc_wr32(struct ramfuc *ram, struct ramfuc_reg *reg, u32 data) 92ramfuc_wr32(struct ramfuc *ram, struct ramfuc_reg *reg, u32 data)
71{ 93{
94 unsigned int mask, off = 0;
95
72 reg->sequence = ram->sequence; 96 reg->sequence = ram->sequence;
73 reg->data = data; 97 reg->data = data;
74 if (reg->addr[0] != reg->addr[1]) 98
75 nouveau_memx_wr32(ram->memx, reg->addr[1], reg->data); 99 for (mask = reg->mask; mask > 0; mask = (mask & ~1) >> 1) {
76 nouveau_memx_wr32(ram->memx, reg->addr[0], reg->data); 100 if (mask & 1) {
101 nouveau_memx_wr32(ram->memx, reg->addr+off, reg->data);
102 }
103
104 off += reg->stride;
105 }
77} 106}
78 107
79static inline void 108static inline void
@@ -105,14 +134,35 @@ ramfuc_nsec(struct ramfuc *ram, u32 nsec)
105 nouveau_memx_nsec(ram->memx, nsec); 134 nouveau_memx_nsec(ram->memx, nsec);
106} 135}
107 136
108#define ram_init(s,p) ramfuc_init(&(s)->base, (p)) 137static inline void
109#define ram_exec(s,e) ramfuc_exec(&(s)->base, (e)) 138ramfuc_wait_vblank(struct ramfuc *ram)
110#define ram_have(s,r) ((s)->r_##r.addr[0] != 0x000000) 139{
111#define ram_rd32(s,r) ramfuc_rd32(&(s)->base, &(s)->r_##r) 140 nouveau_memx_wait_vblank(ram->memx);
112#define ram_wr32(s,r,d) ramfuc_wr32(&(s)->base, &(s)->r_##r, (d)) 141}
113#define ram_nuke(s,r) ramfuc_nuke(&(s)->base, &(s)->r_##r) 142
114#define ram_mask(s,r,m,d) ramfuc_mask(&(s)->base, &(s)->r_##r, (m), (d)) 143static inline void
115#define ram_wait(s,r,m,d,n) ramfuc_wait(&(s)->base, (r), (m), (d), (n)) 144ramfuc_block(struct ramfuc *ram)
116#define ram_nsec(s,n) ramfuc_nsec(&(s)->base, (n)) 145{
146 nouveau_memx_block(ram->memx);
147}
148
149static inline void
150ramfuc_unblock(struct ramfuc *ram)
151{
152 nouveau_memx_unblock(ram->memx);
153}
154
155#define ram_init(s,p) ramfuc_init(&(s)->base, (p))
156#define ram_exec(s,e) ramfuc_exec(&(s)->base, (e))
157#define ram_have(s,r) ((s)->r_##r.addr != 0x000000)
158#define ram_rd32(s,r) ramfuc_rd32(&(s)->base, &(s)->r_##r)
159#define ram_wr32(s,r,d) ramfuc_wr32(&(s)->base, &(s)->r_##r, (d))
160#define ram_nuke(s,r) ramfuc_nuke(&(s)->base, &(s)->r_##r)
161#define ram_mask(s,r,m,d) ramfuc_mask(&(s)->base, &(s)->r_##r, (m), (d))
162#define ram_wait(s,r,m,d,n) ramfuc_wait(&(s)->base, (r), (m), (d), (n))
163#define ram_nsec(s,n) ramfuc_nsec(&(s)->base, (n))
164#define ram_wait_vblank(s) ramfuc_wait_vblank(&(s)->base)
165#define ram_block(s) ramfuc_block(&(s)->base)
166#define ram_unblock(s) ramfuc_unblock(&(s)->base)
117 167
118#endif 168#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv04.c
index e781080d3327..1972268d1410 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv04.c
@@ -22,22 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#define NV04_PFB_BOOT_0 0x00100000 25#include <subdev/fb/regsnv04.h>
26# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003
27# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000
28# define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB 0x00000001
29# define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB 0x00000002
30# define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB 0x00000003
31# define NV04_PFB_BOOT_0_RAM_WIDTH_128 0x00000004
32# define NV04_PFB_BOOT_0_RAM_TYPE 0x00000028
33# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT 0x00000000
34# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT 0x00000008
35# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK 0x00000010
36# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT 0x00000018
37# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT 0x00000020
38# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16 0x00000028
39# define NV04_PFB_BOOT_0_UMA_ENABLE 0x00000100
40# define NV04_PFB_BOOT_0_UMA_SIZE 0x0000f000
41 26
42#include "priv.h" 27#include "priv.h"
43 28
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index e5d12c24cc43..64a983c96625 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -280,7 +280,7 @@ nv50_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
280 if (align == 16) { 280 if (align == 16) {
281 int n = (max >> 4) * comp; 281 int n = (max >> 4) * comp;
282 282
283 ret = nouveau_mm_head(tags, 1, n, n, 1, &mem->tag); 283 ret = nouveau_mm_head(tags, 0, 1, n, n, 1, &mem->tag);
284 if (ret) 284 if (ret)
285 mem->tag = NULL; 285 mem->tag = NULL;
286 } 286 }
@@ -296,9 +296,9 @@ nv50_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
296 type = nv50_fb_memtype[type]; 296 type = nv50_fb_memtype[type];
297 do { 297 do {
298 if (back) 298 if (back)
299 ret = nouveau_mm_tail(heap, type, max, min, align, &r); 299 ret = nouveau_mm_tail(heap, 0, type, max, min, align, &r);
300 else 300 else
301 ret = nouveau_mm_head(heap, type, max, min, align, &r); 301 ret = nouveau_mm_head(heap, 0, type, max, min, align, &r);
302 if (ret) { 302 if (ret) {
303 mutex_unlock(&pfb->base.mutex); 303 mutex_unlock(&pfb->base.mutex);
304 pfb->ram->put(pfb, &mem); 304 pfb->ram->put(pfb, &mem);
@@ -319,27 +319,22 @@ nv50_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
319static u32 319static u32
320nv50_fb_vram_rblock(struct nouveau_fb *pfb, struct nouveau_ram *ram) 320nv50_fb_vram_rblock(struct nouveau_fb *pfb, struct nouveau_ram *ram)
321{ 321{
322 int i, parts, colbits, rowbitsa, rowbitsb, banks; 322 int colbits, rowbitsa, rowbitsb, banks;
323 u64 rowsize, predicted; 323 u64 rowsize, predicted;
324 u32 r0, r4, rt, ru, rblock_size; 324 u32 r0, r4, rt, rblock_size;
325 325
326 r0 = nv_rd32(pfb, 0x100200); 326 r0 = nv_rd32(pfb, 0x100200);
327 r4 = nv_rd32(pfb, 0x100204); 327 r4 = nv_rd32(pfb, 0x100204);
328 rt = nv_rd32(pfb, 0x100250); 328 rt = nv_rd32(pfb, 0x100250);
329 ru = nv_rd32(pfb, 0x001540); 329 nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt,
330 nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); 330 nv_rd32(pfb, 0x001540));
331
332 for (i = 0, parts = 0; i < 8; i++) {
333 if (ru & (0x00010000 << i))
334 parts++;
335 }
336 331
337 colbits = (r4 & 0x0000f000) >> 12; 332 colbits = (r4 & 0x0000f000) >> 12;
338 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; 333 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
339 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; 334 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
340 banks = 1 << (((r4 & 0x03000000) >> 24) + 2); 335 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
341 336
342 rowsize = parts * banks * (1 << colbits) * 8; 337 rowsize = ram->parts * banks * (1 << colbits) * 8;
343 predicted = rowsize << rowbitsa; 338 predicted = rowsize << rowbitsa;
344 if (r0 & 0x00000004) 339 if (r0 & 0x00000004)
345 predicted += rowsize << rowbitsb; 340 predicted += rowsize << rowbitsb;
@@ -376,6 +371,9 @@ nv50_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
376 ram->size = nv_rd32(pfb, 0x10020c); 371 ram->size = nv_rd32(pfb, 0x10020c);
377 ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32); 372 ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
378 373
374 ram->part_mask = (nv_rd32(pfb, 0x001540) & 0x00ff0000) >> 16;
375 ram->parts = hweight8(ram->part_mask);
376
379 switch (nv_rd32(pfb, 0x100714) & 0x00000007) { 377 switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
380 case 0: ram->type = NV_MEM_TYPE_DDR1; break; 378 case 0: ram->type = NV_MEM_TYPE_DDR1; break;
381 case 1: 379 case 1:
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
index 8076fb195dd5..3601deca0bd5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -79,20 +79,27 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
79 struct nva3_ram *ram = (void *)pfb->ram; 79 struct nva3_ram *ram = (void *)pfb->ram;
80 struct nva3_ramfuc *fuc = &ram->fuc; 80 struct nva3_ramfuc *fuc = &ram->fuc;
81 struct nva3_clock_info mclk; 81 struct nva3_clock_info mclk;
82 u8 ver, cnt, len, strap; 82 struct nouveau_ram_data *next;
83 u8 ver, hdr, cnt, len, strap;
83 u32 data; 84 u32 data;
84 struct {
85 u32 data;
86 u8 size;
87 } rammap, ramcfg, timing;
88 u32 r004018, r100760, ctrl; 85 u32 r004018, r100760, ctrl;
89 u32 unk714, unk718, unk71c; 86 u32 unk714, unk718, unk71c;
90 int ret; 87 int ret, i;
88
89 next = &ram->base.target;
90 next->freq = freq;
91 ram->base.next = next;
91 92
92 /* lookup memory config data relevant to the target frequency */ 93 /* lookup memory config data relevant to the target frequency */
93 rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size, 94 i = 0;
94 &cnt, &ramcfg.size); 95 while ((data = nvbios_rammapEp(bios, i++, &ver, &hdr, &cnt, &len,
95 if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) { 96 &next->bios))) {
97 if (freq / 1000 >= next->bios.rammap_min &&
98 freq / 1000 <= next->bios.rammap_max)
99 break;
100 }
101
102 if (!data || ver != 0x10 || hdr < 0x0e) {
96 nv_error(pfb, "invalid/missing rammap entry\n"); 103 nv_error(pfb, "invalid/missing rammap entry\n");
97 return -EINVAL; 104 return -EINVAL;
98 } 105 }
@@ -104,26 +111,25 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
104 return -EINVAL; 111 return -EINVAL;
105 } 112 }
106 113
107 ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size); 114 data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, strap,
108 if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) { 115 &ver, &hdr, &next->bios);
116 if (!data || ver != 0x10 || hdr < 0x0e) {
109 nv_error(pfb, "invalid/missing ramcfg entry\n"); 117 nv_error(pfb, "invalid/missing ramcfg entry\n");
110 return -EINVAL; 118 return -EINVAL;
111 } 119 }
112 120
113 /* lookup memory timings, if bios says they're present */ 121 /* lookup memory timings, if bios says they're present */
114 strap = nv_ro08(bios, ramcfg.data + 0x01); 122 if (next->bios.ramcfg_timing != 0xff) {
115 if (strap != 0xff) { 123 data = nvbios_timingEp(bios, next->bios.ramcfg_timing,
116 timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size, 124 &ver, &hdr, &cnt, &len,
117 &cnt, &len); 125 &next->bios);
118 if (!timing.data || ver != 0x10 || timing.size < 0x19) { 126 if (!data || ver != 0x10 || hdr < 0x19) {
119 nv_error(pfb, "invalid/missing timing entry\n"); 127 nv_error(pfb, "invalid/missing timing entry\n");
120 return -EINVAL; 128 return -EINVAL;
121 } 129 }
122 } else {
123 timing.data = 0;
124 } 130 }
125 131
126 ret = nva3_clock_info(nouveau_clock(pfb), 0x12, 0x4000, freq, &mclk); 132 ret = nva3_pll_info(nouveau_clock(pfb), 0x12, 0x4000, freq, &mclk);
127 if (ret < 0) { 133 if (ret < 0) {
128 nv_error(pfb, "failed mclk calculation\n"); 134 nv_error(pfb, "failed mclk calculation\n");
129 return ret; 135 return ret;
@@ -163,17 +169,17 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
163 ram_mask(fuc, 0x004168, 0x003f3141, ctrl); 169 ram_mask(fuc, 0x004168, 0x003f3141, ctrl);
164 } 170 }
165 171
166 if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)) { 172 if (next->bios.ramcfg_10_02_10) {
167 ram_mask(fuc, 0x111104, 0x00000600, 0x00000000); 173 ram_mask(fuc, 0x111104, 0x00000600, 0x00000000);
168 } else { 174 } else {
169 ram_mask(fuc, 0x111100, 0x40000000, 0x40000000); 175 ram_mask(fuc, 0x111100, 0x40000000, 0x40000000);
170 ram_mask(fuc, 0x111104, 0x00000180, 0x00000000); 176 ram_mask(fuc, 0x111104, 0x00000180, 0x00000000);
171 } 177 }
172 178
173 if (!(nv_ro08(bios, rammap.data + 0x04) & 0x02)) 179 if (!next->bios.rammap_10_04_02)
174 ram_mask(fuc, 0x100200, 0x00000800, 0x00000000); 180 ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
175 ram_wr32(fuc, 0x611200, 0x00003300); 181 ram_wr32(fuc, 0x611200, 0x00003300);
176 if (!(nv_ro08(bios, ramcfg.data + 0x02) & 0x10)) 182 if (!next->bios.ramcfg_10_02_10)
177 ram_wr32(fuc, 0x111100, 0x4c020000); /*XXX*/ 183 ram_wr32(fuc, 0x111100, 0x4c020000); /*XXX*/
178 184
179 ram_wr32(fuc, 0x1002d4, 0x00000001); 185 ram_wr32(fuc, 0x1002d4, 0x00000001);
@@ -202,17 +208,16 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
202 ram_wr32(fuc, 0x004018, 0x0000d000 | r004018); 208 ram_wr32(fuc, 0x004018, 0x0000d000 | r004018);
203 } 209 }
204 210
205 if ( (nv_ro08(bios, rammap.data + 0x04) & 0x08)) { 211 if (next->bios.rammap_10_04_08) {
206 u32 unk5a0 = (nv_ro16(bios, ramcfg.data + 0x05) << 8) | 212 ram_wr32(fuc, 0x1005a0, next->bios.ramcfg_10_06 << 16 |
207 nv_ro08(bios, ramcfg.data + 0x05); 213 next->bios.ramcfg_10_05 << 8 |
208 u32 unk5a4 = (nv_ro16(bios, ramcfg.data + 0x07)); 214 next->bios.ramcfg_10_05);
209 u32 unk804 = (nv_ro08(bios, ramcfg.data + 0x09) & 0xf0) << 16 | 215 ram_wr32(fuc, 0x1005a4, next->bios.ramcfg_10_08 << 8 |
210 (nv_ro08(bios, ramcfg.data + 0x03) & 0x0f) << 16 | 216 next->bios.ramcfg_10_07);
211 (nv_ro08(bios, ramcfg.data + 0x09) & 0x0f) | 217 ram_wr32(fuc, 0x10f804, next->bios.ramcfg_10_09_f0 << 20 |
212 0x80000000; 218 next->bios.ramcfg_10_03_0f << 16 |
213 ram_wr32(fuc, 0x1005a0, unk5a0); 219 next->bios.ramcfg_10_09_0f |
214 ram_wr32(fuc, 0x1005a4, unk5a4); 220 0x80000000);
215 ram_wr32(fuc, 0x10f804, unk804);
216 ram_mask(fuc, 0x10053c, 0x00001000, 0x00000000); 221 ram_mask(fuc, 0x10053c, 0x00001000, 0x00000000);
217 } else { 222 } else {
218 ram_mask(fuc, 0x10053c, 0x00001000, 0x00001000); 223 ram_mask(fuc, 0x10053c, 0x00001000, 0x00001000);
@@ -250,27 +255,26 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
250 ram_mask(fuc, 0x100220[0], 0x00000000, 0x00000000); 255 ram_mask(fuc, 0x100220[0], 0x00000000, 0x00000000);
251 ram_mask(fuc, 0x100220[8], 0x00000000, 0x00000000); 256 ram_mask(fuc, 0x100220[8], 0x00000000, 0x00000000);
252 257
253 data = (nv_ro08(bios, ramcfg.data + 0x02) & 0x08) ? 0x00000000 : 0x00001000; 258 ram_mask(fuc, 0x100200, 0x00001000, !next->bios.ramcfg_10_02_08 << 12);
254 ram_mask(fuc, 0x100200, 0x00001000, data);
255 259
256 unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000010; 260 unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000010;
257 unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100; 261 unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100;
258 unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100; 262 unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100;
259 if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x20)) 263 if (next->bios.ramcfg_10_02_20)
260 unk714 |= 0xf0000000; 264 unk714 |= 0xf0000000;
261 if (!(nv_ro08(bios, ramcfg.data + 0x02) & 0x04)) 265 if (!next->bios.ramcfg_10_02_04)
262 unk714 |= 0x00000010; 266 unk714 |= 0x00000010;
263 ram_wr32(fuc, 0x100714, unk714); 267 ram_wr32(fuc, 0x100714, unk714);
264 268
265 if (nv_ro08(bios, ramcfg.data + 0x02) & 0x01) 269 if (next->bios.ramcfg_10_02_01)
266 unk71c |= 0x00000100; 270 unk71c |= 0x00000100;
267 ram_wr32(fuc, 0x10071c, unk71c); 271 ram_wr32(fuc, 0x10071c, unk71c);
268 272
269 if (nv_ro08(bios, ramcfg.data + 0x02) & 0x02) 273 if (next->bios.ramcfg_10_02_02)
270 unk718 |= 0x00000100; 274 unk718 |= 0x00000100;
271 ram_wr32(fuc, 0x100718, unk718); 275 ram_wr32(fuc, 0x100718, unk718);
272 276
273 if (nv_ro08(bios, ramcfg.data + 0x02) & 0x10) 277 if (next->bios.ramcfg_10_02_10)
274 ram_wr32(fuc, 0x111100, 0x48000000); /*XXX*/ 278 ram_wr32(fuc, 0x111100, 0x48000000); /*XXX*/
275 279
276 ram_mask(fuc, mr[0], 0x100, 0x100); 280 ram_mask(fuc, mr[0], 0x100, 0x100);
@@ -282,9 +286,9 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
282 ram_nsec(fuc, 12000); 286 ram_nsec(fuc, 12000);
283 287
284 ram_wr32(fuc, 0x611200, 0x00003330); 288 ram_wr32(fuc, 0x611200, 0x00003330);
285 if ( (nv_ro08(bios, rammap.data + 0x04) & 0x02)) 289 if (next->bios.rammap_10_04_02)
286 ram_mask(fuc, 0x100200, 0x00000800, 0x00000800); 290 ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
287 if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)) { 291 if (next->bios.ramcfg_10_02_10) {
288 ram_mask(fuc, 0x111104, 0x00000180, 0x00000180); 292 ram_mask(fuc, 0x111104, 0x00000180, 0x00000180);
289 ram_mask(fuc, 0x111100, 0x40000000, 0x00000000); 293 ram_mask(fuc, 0x111100, 0x40000000, 0x00000000);
290 } else { 294 } else {
@@ -404,11 +408,11 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
404 ram->fuc.r_0x100714 = ramfuc_reg(0x100714); 408 ram->fuc.r_0x100714 = ramfuc_reg(0x100714);
405 ram->fuc.r_0x100718 = ramfuc_reg(0x100718); 409 ram->fuc.r_0x100718 = ramfuc_reg(0x100718);
406 ram->fuc.r_0x10071c = ramfuc_reg(0x10071c); 410 ram->fuc.r_0x10071c = ramfuc_reg(0x10071c);
407 ram->fuc.r_0x100760 = ramfuc_reg(0x100760); 411 ram->fuc.r_0x100760 = ramfuc_stride(0x100760, 4, ram->base.part_mask);
408 ram->fuc.r_0x1007a0 = ramfuc_reg(0x1007a0); 412 ram->fuc.r_0x1007a0 = ramfuc_stride(0x1007a0, 4, ram->base.part_mask);
409 ram->fuc.r_0x1007e0 = ramfuc_reg(0x1007e0); 413 ram->fuc.r_0x1007e0 = ramfuc_stride(0x1007e0, 4, ram->base.part_mask);
410 ram->fuc.r_0x10f804 = ramfuc_reg(0x10f804); 414 ram->fuc.r_0x10f804 = ramfuc_reg(0x10f804);
411 ram->fuc.r_0x1110e0 = ramfuc_reg(0x1110e0); 415 ram->fuc.r_0x1110e0 = ramfuc_stride(0x1110e0, 4, ram->base.part_mask);
412 ram->fuc.r_0x111100 = ramfuc_reg(0x111100); 416 ram->fuc.r_0x111100 = ramfuc_reg(0x111100);
413 ram->fuc.r_0x111104 = ramfuc_reg(0x111104); 417 ram->fuc.r_0x111104 = ramfuc_reg(0x111104);
414 ram->fuc.r_0x611200 = ramfuc_reg(0x611200); 418 ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 2b284b192763..735cb9580abe 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -133,6 +133,7 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
133 struct nouveau_bios *bios = nouveau_bios(pfb); 133 struct nouveau_bios *bios = nouveau_bios(pfb);
134 struct nvc0_ram *ram = (void *)pfb->ram; 134 struct nvc0_ram *ram = (void *)pfb->ram;
135 struct nvc0_ramfuc *fuc = &ram->fuc; 135 struct nvc0_ramfuc *fuc = &ram->fuc;
136 struct nvbios_ramcfg cfg;
136 u8 ver, cnt, len, strap; 137 u8 ver, cnt, len, strap;
137 struct { 138 struct {
138 u32 data; 139 u32 data;
@@ -145,7 +146,7 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
145 146
146 /* lookup memory config data relevant to the target frequency */ 147 /* lookup memory config data relevant to the target frequency */
147 rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size, 148 rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
148 &cnt, &ramcfg.size); 149 &cnt, &ramcfg.size, &cfg);
149 if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) { 150 if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
150 nv_error(pfb, "invalid/missing rammap entry\n"); 151 nv_error(pfb, "invalid/missing rammap entry\n");
151 return -EINVAL; 152 return -EINVAL;
@@ -483,9 +484,9 @@ nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
483 484
484 do { 485 do {
485 if (back) 486 if (back)
486 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r); 487 ret = nouveau_mm_tail(mm, 0, 1, size, ncmin, align, &r);
487 else 488 else
488 ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r); 489 ret = nouveau_mm_head(mm, 0, 1, size, ncmin, align, &r);
489 if (ret) { 490 if (ret) {
490 mutex_unlock(&pfb->base.mutex); 491 mutex_unlock(&pfb->base.mutex);
491 pfb->ram->put(pfb, &mem); 492 pfb->ram->put(pfb, &mem);
@@ -562,7 +563,7 @@ nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
562 offset = (0x0200000000ULL >> 12) + (bsize << 8); 563 offset = (0x0200000000ULL >> 12) + (bsize << 8);
563 length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail; 564 length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail;
564 565
565 ret = nouveau_mm_init(&pfb->vram, offset, length, 0); 566 ret = nouveau_mm_init(&pfb->vram, offset, length, 1);
566 if (ret) 567 if (ret)
567 nouveau_mm_fini(&pfb->vram); 568 nouveau_mm_fini(&pfb->vram);
568 } 569 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index c5b46e302319..6bae474abb44 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -29,6 +29,8 @@
29#include <subdev/bios/init.h> 29#include <subdev/bios/init.h>
30#include <subdev/bios/rammap.h> 30#include <subdev/bios/rammap.h>
31#include <subdev/bios/timing.h> 31#include <subdev/bios/timing.h>
32#include <subdev/bios/M0205.h>
33#include <subdev/bios/M0209.h>
32 34
33#include <subdev/clock.h> 35#include <subdev/clock.h>
34#include <subdev/clock/pll.h> 36#include <subdev/clock/pll.h>
@@ -41,14 +43,6 @@
41 43
42#include "ramfuc.h" 44#include "ramfuc.h"
43 45
44/* binary driver only executes this path if the condition (a) is true
45 * for any configuration (combination of rammap+ramcfg+timing) that
46 * can be reached on a given card. for now, we will execute the branch
47 * unconditionally in the hope that a "false everywhere" in the bios
48 * tables doesn't actually mean "don't touch this".
49 */
50#define NOTE00(a) 1
51
52struct nve0_ramfuc { 46struct nve0_ramfuc {
53 struct ramfuc base; 47 struct ramfuc base;
54 48
@@ -134,10 +128,12 @@ struct nve0_ram {
134 struct nouveau_ram base; 128 struct nouveau_ram base;
135 struct nve0_ramfuc fuc; 129 struct nve0_ramfuc fuc;
136 130
131 struct list_head cfg;
137 u32 parts; 132 u32 parts;
138 u32 pmask; 133 u32 pmask;
139 u32 pnuts; 134 u32 pnuts;
140 135
136 struct nvbios_ramcfg diff;
141 int from; 137 int from;
142 int mode; 138 int mode;
143 int N1, fN1, M1, P1; 139 int N1, fN1, M1, P1;
@@ -241,7 +237,7 @@ nve0_ram_nuts(struct nve0_ram *ram, struct ramfuc_reg *reg,
241{ 237{
242 struct nve0_fb_priv *priv = (void *)nouveau_fb(ram); 238 struct nve0_fb_priv *priv = (void *)nouveau_fb(ram);
243 struct ramfuc *fuc = &ram->fuc.base; 239 struct ramfuc *fuc = &ram->fuc.base;
244 u32 addr = 0x110000 + (reg->addr[0] & 0xfff); 240 u32 addr = 0x110000 + (reg->addr & 0xfff);
245 u32 mask = _mask | _copy; 241 u32 mask = _mask | _copy;
246 u32 data = (_data & _mask) | (reg->data & _copy); 242 u32 data = (_data & _mask) | (reg->data & _copy);
247 u32 i; 243 u32 i;
@@ -268,6 +264,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
268 u32 mask, data; 264 u32 mask, data;
269 265
270 ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); 266 ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
267 ram_block(fuc);
271 ram_wr32(fuc, 0x62c000, 0x0f0f0000); 268 ram_wr32(fuc, 0x62c000, 0x0f0f0000);
272 269
273 /* MR1: turn termination on early, for some reason.. */ 270 /* MR1: turn termination on early, for some reason.. */
@@ -478,7 +475,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
478 ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]); 475 ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]);
479 476
480 data = mask = 0x00000000; 477 data = mask = 0x00000000;
481 if (NOTE00(ramcfg_08_20)) { 478 if (ram->diff.ramcfg_11_08_20) {
482 if (next->bios.ramcfg_11_08_20) 479 if (next->bios.ramcfg_11_08_20)
483 data |= 0x01000000; 480 data |= 0x01000000;
484 mask |= 0x01000000; 481 mask |= 0x01000000;
@@ -486,11 +483,11 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
486 ram_mask(fuc, 0x10f200, mask, data); 483 ram_mask(fuc, 0x10f200, mask, data);
487 484
488 data = mask = 0x00000000; 485 data = mask = 0x00000000;
489 if (NOTE00(ramcfg_02_03 != 0)) { 486 if (ram->diff.ramcfg_11_02_03) {
490 data |= next->bios.ramcfg_11_02_03 << 8; 487 data |= next->bios.ramcfg_11_02_03 << 8;
491 mask |= 0x00000300; 488 mask |= 0x00000300;
492 } 489 }
493 if (NOTE00(ramcfg_01_10)) { 490 if (ram->diff.ramcfg_11_01_10) {
494 if (next->bios.ramcfg_11_01_10) 491 if (next->bios.ramcfg_11_01_10)
495 data |= 0x70000000; 492 data |= 0x70000000;
496 mask |= 0x70000000; 493 mask |= 0x70000000;
@@ -498,11 +495,11 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
498 ram_mask(fuc, 0x10f604, mask, data); 495 ram_mask(fuc, 0x10f604, mask, data);
499 496
500 data = mask = 0x00000000; 497 data = mask = 0x00000000;
501 if (NOTE00(timing_30_07 != 0)) { 498 if (ram->diff.timing_20_30_07) {
502 data |= next->bios.timing_20_30_07 << 28; 499 data |= next->bios.timing_20_30_07 << 28;
503 mask |= 0x70000000; 500 mask |= 0x70000000;
504 } 501 }
505 if (NOTE00(ramcfg_01_01)) { 502 if (ram->diff.ramcfg_11_01_01) {
506 if (next->bios.ramcfg_11_01_01) 503 if (next->bios.ramcfg_11_01_01)
507 data |= 0x00000100; 504 data |= 0x00000100;
508 mask |= 0x00000100; 505 mask |= 0x00000100;
@@ -510,11 +507,11 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
510 ram_mask(fuc, 0x10f614, mask, data); 507 ram_mask(fuc, 0x10f614, mask, data);
511 508
512 data = mask = 0x00000000; 509 data = mask = 0x00000000;
513 if (NOTE00(timing_30_07 != 0)) { 510 if (ram->diff.timing_20_30_07) {
514 data |= next->bios.timing_20_30_07 << 28; 511 data |= next->bios.timing_20_30_07 << 28;
515 mask |= 0x70000000; 512 mask |= 0x70000000;
516 } 513 }
517 if (NOTE00(ramcfg_01_02)) { 514 if (ram->diff.ramcfg_11_01_02) {
518 if (next->bios.ramcfg_11_01_02) 515 if (next->bios.ramcfg_11_01_02)
519 data |= 0x00000100; 516 data |= 0x00000100;
520 mask |= 0x00000100; 517 mask |= 0x00000100;
@@ -548,11 +545,11 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
548 ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f); 545 ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f);
549 546
550 data = mask = 0x00000000; 547 data = mask = 0x00000000;
551 if (NOTE00(ramcfg_02_03 != 0)) { 548 if (ram->diff.ramcfg_11_02_03) {
552 data |= next->bios.ramcfg_11_02_03; 549 data |= next->bios.ramcfg_11_02_03;
553 mask |= 0x00000003; 550 mask |= 0x00000003;
554 } 551 }
555 if (NOTE00(ramcfg_01_10)) { 552 if (ram->diff.ramcfg_11_01_10) {
556 if (next->bios.ramcfg_11_01_10) 553 if (next->bios.ramcfg_11_01_10)
557 data |= 0x00000004; 554 data |= 0x00000004;
558 mask |= 0x00000004; 555 mask |= 0x00000004;
@@ -666,6 +663,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
666 if (next->bios.ramcfg_11_07_02) 663 if (next->bios.ramcfg_11_07_02)
667 nve0_ram_train(fuc, 0x80020000, 0x01000000); 664 nve0_ram_train(fuc, 0x80020000, 0x01000000);
668 665
666 ram_unblock(fuc);
669 ram_wr32(fuc, 0x62c000, 0x0f0f0f00); 667 ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
670 668
671 if (next->bios.rammap_11_08_01) 669 if (next->bios.rammap_11_08_01)
@@ -695,6 +693,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
695 u32 mask, data; 693 u32 mask, data;
696 694
697 ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); 695 ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
696 ram_block(fuc);
698 ram_wr32(fuc, 0x62c000, 0x0f0f0000); 697 ram_wr32(fuc, 0x62c000, 0x0f0f0000);
699 698
700 if (vc == 1 && ram_have(fuc, gpio2E)) { 699 if (vc == 1 && ram_have(fuc, gpio2E)) {
@@ -917,6 +916,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
917 ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000); 916 ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
918 ram_nsec(fuc, 1000); 917 ram_nsec(fuc, 1000);
919 918
919 ram_unblock(fuc);
920 ram_wr32(fuc, 0x62c000, 0x0f0f0f00); 920 ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
921 921
922 if (next->bios.rammap_11_08_01) 922 if (next->bios.rammap_11_08_01)
@@ -932,58 +932,24 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
932 ******************************************************************************/ 932 ******************************************************************************/
933 933
934static int 934static int
935nve0_ram_calc_data(struct nouveau_fb *pfb, u32 freq, 935nve0_ram_calc_data(struct nouveau_fb *pfb, u32 khz,
936 struct nouveau_ram_data *data) 936 struct nouveau_ram_data *data)
937{ 937{
938 struct nouveau_bios *bios = nouveau_bios(pfb);
939 struct nve0_ram *ram = (void *)pfb->ram; 938 struct nve0_ram *ram = (void *)pfb->ram;
940 u8 strap, cnt, len; 939 struct nouveau_ram_data *cfg;
941 940 u32 mhz = khz / 1000;
942 /* lookup memory config data relevant to the target frequency */ 941
943 ram->base.rammap.data = nvbios_rammapEp(bios, freq / 1000, 942 list_for_each_entry(cfg, &ram->cfg, head) {
944 &ram->base.rammap.version, 943 if (mhz >= cfg->bios.rammap_min &&
945 &ram->base.rammap.size, 944 mhz <= cfg->bios.rammap_max) {
946 &cnt, &len, &data->bios); 945 *data = *cfg;
947 if (!ram->base.rammap.data || ram->base.rammap.version != 0x11 || 946 data->freq = khz;
948 ram->base.rammap.size < 0x09) { 947 return 0;
949 nv_error(pfb, "invalid/missing rammap entry\n");
950 return -EINVAL;
951 }
952
953 /* locate specific data set for the attached memory */
954 strap = nvbios_ramcfg_index(nv_subdev(pfb));
955 ram->base.ramcfg.data = nvbios_rammapSp(bios, ram->base.rammap.data,
956 ram->base.rammap.version,
957 ram->base.rammap.size,
958 cnt, len, strap,
959 &ram->base.ramcfg.version,
960 &ram->base.ramcfg.size,
961 &data->bios);
962 if (!ram->base.ramcfg.data || ram->base.ramcfg.version != 0x11 ||
963 ram->base.ramcfg.size < 0x08) {
964 nv_error(pfb, "invalid/missing ramcfg entry\n");
965 return -EINVAL;
966 }
967
968 /* lookup memory timings, if bios says they're present */
969 strap = nv_ro08(bios, ram->base.ramcfg.data + 0x00);
970 if (strap != 0xff) {
971 ram->base.timing.data =
972 nvbios_timingEp(bios, strap, &ram->base.timing.version,
973 &ram->base.timing.size, &cnt, &len,
974 &data->bios);
975 if (!ram->base.timing.data ||
976 ram->base.timing.version != 0x20 ||
977 ram->base.timing.size < 0x33) {
978 nv_error(pfb, "invalid/missing timing entry\n");
979 return -EINVAL;
980 } 948 }
981 } else {
982 ram->base.timing.data = 0;
983 } 949 }
984 950
985 data->freq = freq; 951 nv_error(ram, "ramcfg data for %dMHz not found\n", mhz);
986 return 0; 952 return -EINVAL;
987} 953}
988 954
989static int 955static int
@@ -1106,13 +1072,99 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
1106 return nve0_ram_calc_xits(pfb, ram->base.next); 1072 return nve0_ram_calc_xits(pfb, ram->base.next);
1107} 1073}
1108 1074
1075static void
1076nve0_ram_prog_0(struct nouveau_fb *pfb, u32 freq)
1077{
1078 struct nve0_ram *ram = (void *)pfb->ram;
1079 struct nouveau_ram_data *cfg;
1080 u32 mhz = freq / 1000;
1081 u32 mask, data;
1082
1083 list_for_each_entry(cfg, &ram->cfg, head) {
1084 if (mhz >= cfg->bios.rammap_min &&
1085 mhz <= cfg->bios.rammap_max)
1086 break;
1087 }
1088
1089 if (&cfg->head == &ram->cfg)
1090 return;
1091
1092 if (mask = 0, data = 0, ram->diff.rammap_11_0a_03fe) {
1093 data |= cfg->bios.rammap_11_0a_03fe << 12;
1094 mask |= 0x001ff000;
1095 }
1096 if (ram->diff.rammap_11_09_01ff) {
1097 data |= cfg->bios.rammap_11_09_01ff;
1098 mask |= 0x000001ff;
1099 }
1100 nv_mask(pfb, 0x10f468, mask, data);
1101
1102 if (mask = 0, data = 0, ram->diff.rammap_11_0a_0400) {
1103 data |= cfg->bios.rammap_11_0a_0400;
1104 mask |= 0x00000001;
1105 }
1106 nv_mask(pfb, 0x10f420, mask, data);
1107
1108 if (mask = 0, data = 0, ram->diff.rammap_11_0a_0800) {
1109 data |= cfg->bios.rammap_11_0a_0800;
1110 mask |= 0x00000001;
1111 }
1112 nv_mask(pfb, 0x10f430, mask, data);
1113
1114 if (mask = 0, data = 0, ram->diff.rammap_11_0b_01f0) {
1115 data |= cfg->bios.rammap_11_0b_01f0;
1116 mask |= 0x0000001f;
1117 }
1118 nv_mask(pfb, 0x10f400, mask, data);
1119
1120 if (mask = 0, data = 0, ram->diff.rammap_11_0b_0200) {
1121 data |= cfg->bios.rammap_11_0b_0200 << 9;
1122 mask |= 0x00000200;
1123 }
1124 nv_mask(pfb, 0x10f410, mask, data);
1125
1126 if (mask = 0, data = 0, ram->diff.rammap_11_0d) {
1127 data |= cfg->bios.rammap_11_0d << 16;
1128 mask |= 0x00ff0000;
1129 }
1130 if (ram->diff.rammap_11_0f) {
1131 data |= cfg->bios.rammap_11_0f << 8;
1132 mask |= 0x0000ff00;
1133 }
1134 nv_mask(pfb, 0x10f440, mask, data);
1135
1136 if (mask = 0, data = 0, ram->diff.rammap_11_0e) {
1137 data |= cfg->bios.rammap_11_0e << 8;
1138 mask |= 0x0000ff00;
1139 }
1140 if (ram->diff.rammap_11_0b_0800) {
1141 data |= cfg->bios.rammap_11_0b_0800 << 7;
1142 mask |= 0x00000080;
1143 }
1144 if (ram->diff.rammap_11_0b_0400) {
1145 data |= cfg->bios.rammap_11_0b_0400 << 5;
1146 mask |= 0x00000020;
1147 }
1148 nv_mask(pfb, 0x10f444, mask, data);
1149}
1150
1109static int 1151static int
1110nve0_ram_prog(struct nouveau_fb *pfb) 1152nve0_ram_prog(struct nouveau_fb *pfb)
1111{ 1153{
1112 struct nouveau_device *device = nv_device(pfb); 1154 struct nouveau_device *device = nv_device(pfb);
1113 struct nve0_ram *ram = (void *)pfb->ram; 1155 struct nve0_ram *ram = (void *)pfb->ram;
1114 struct nve0_ramfuc *fuc = &ram->fuc; 1156 struct nve0_ramfuc *fuc = &ram->fuc;
1115 ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", true)); 1157 struct nouveau_ram_data *next = ram->base.next;
1158
1159 if (!nouveau_boolopt(device->cfgopt, "NvMemExec", true)) {
1160 ram_exec(fuc, false);
1161 return (ram->base.next == &ram->base.xition);
1162 }
1163
1164 nve0_ram_prog_0(pfb, 1000);
1165 ram_exec(fuc, true);
1166 nve0_ram_prog_0(pfb, next->freq);
1167
1116 return (ram->base.next == &ram->base.xition); 1168 return (ram->base.next == &ram->base.xition);
1117} 1169}
1118 1170
@@ -1125,24 +1177,147 @@ nve0_ram_tidy(struct nouveau_fb *pfb)
1125 ram_exec(fuc, false); 1177 ram_exec(fuc, false);
1126} 1178}
1127 1179
1180struct nve0_ram_train {
1181 u16 mask;
1182 struct nvbios_M0209S remap;
1183 struct nvbios_M0209S type00;
1184 struct nvbios_M0209S type01;
1185 struct nvbios_M0209S type04;
1186 struct nvbios_M0209S type06;
1187 struct nvbios_M0209S type07;
1188 struct nvbios_M0209S type08;
1189 struct nvbios_M0209S type09;
1190};
1191
1192static int
1193nve0_ram_train_type(struct nouveau_fb *pfb, int i, u8 ramcfg,
1194 struct nve0_ram_train *train)
1195{
1196 struct nouveau_bios *bios = nouveau_bios(pfb);
1197 struct nvbios_M0205E M0205E;
1198 struct nvbios_M0205S M0205S;
1199 struct nvbios_M0209E M0209E;
1200 struct nvbios_M0209S *remap = &train->remap;
1201 struct nvbios_M0209S *value;
1202 u8 ver, hdr, cnt, len;
1203 u32 data;
1204
1205 /* determine type of data for this index */
1206 if (!(data = nvbios_M0205Ep(bios, i, &ver, &hdr, &cnt, &len, &M0205E)))
1207 return -ENOENT;
1208
1209 switch (M0205E.type) {
1210 case 0x00: value = &train->type00; break;
1211 case 0x01: value = &train->type01; break;
1212 case 0x04: value = &train->type04; break;
1213 case 0x06: value = &train->type06; break;
1214 case 0x07: value = &train->type07; break;
1215 case 0x08: value = &train->type08; break;
1216 case 0x09: value = &train->type09; break;
1217 default:
1218 return 0;
1219 }
1220
1221 /* training data index determined by ramcfg strap */
1222 if (!(data = nvbios_M0205Sp(bios, i, ramcfg, &ver, &hdr, &M0205S)))
1223 return -EINVAL;
1224 i = M0205S.data;
1225
1226 /* training data format information */
1227 if (!(data = nvbios_M0209Ep(bios, i, &ver, &hdr, &cnt, &len, &M0209E)))
1228 return -EINVAL;
1229
1230 /* ... and the raw data */
1231 if (!(data = nvbios_M0209Sp(bios, i, 0, &ver, &hdr, value)))
1232 return -EINVAL;
1233
1234 if (M0209E.v02_07 == 2) {
1235 /* of course! why wouldn't we have a pointer to another entry
1236 * in the same table, and use the first one as an array of
1237 * remap indices...
1238 */
1239 if (!(data = nvbios_M0209Sp(bios, M0209E.v03, 0, &ver, &hdr,
1240 remap)))
1241 return -EINVAL;
1242
1243 for (i = 0; i < ARRAY_SIZE(value->data); i++)
1244 value->data[i] = remap->data[value->data[i]];
1245 } else
1246 if (M0209E.v02_07 != 1)
1247 return -EINVAL;
1248
1249 train->mask |= 1 << M0205E.type;
1250 return 0;
1251}
1252
1253static int
1254nve0_ram_train_init_0(struct nouveau_fb *pfb, struct nve0_ram_train *train)
1255{
1256 int i, j;
1257
1258 if ((train->mask & 0x03d3) != 0x03d3) {
1259 nv_warn(pfb, "missing link training data\n");
1260 return -EINVAL;
1261 }
1262
1263 for (i = 0; i < 0x30; i++) {
1264 for (j = 0; j < 8; j += 4) {
1265 nv_wr32(pfb, 0x10f968 + j, 0x00000000 | (i << 8));
1266 nv_wr32(pfb, 0x10f920 + j, 0x00000000 |
1267 train->type08.data[i] << 4 |
1268 train->type06.data[i]);
1269 nv_wr32(pfb, 0x10f918 + j, train->type00.data[i]);
1270 nv_wr32(pfb, 0x10f920 + j, 0x00000100 |
1271 train->type09.data[i] << 4 |
1272 train->type07.data[i]);
1273 nv_wr32(pfb, 0x10f918 + j, train->type01.data[i]);
1274 }
1275 }
1276
1277 for (j = 0; j < 8; j += 4) {
1278 for (i = 0; i < 0x100; i++) {
1279 nv_wr32(pfb, 0x10f968 + j, i);
1280 nv_wr32(pfb, 0x10f900 + j, train->type04.data[i]);
1281 }
1282 }
1283
1284 return 0;
1285}
1286
1287static int
1288nve0_ram_train_init(struct nouveau_fb *pfb)
1289{
1290 u8 ramcfg = nvbios_ramcfg_index(nv_subdev(pfb));
1291 struct nve0_ram_train *train;
1292 int ret = -ENOMEM, i;
1293
1294 if ((train = kzalloc(sizeof(*train), GFP_KERNEL))) {
1295 for (i = 0; i < 0x100; i++) {
1296 ret = nve0_ram_train_type(pfb, i, ramcfg, train);
1297 if (ret && ret != -ENOENT)
1298 break;
1299 }
1300 }
1301
1302 switch (pfb->ram->type) {
1303 case NV_MEM_TYPE_GDDR5:
1304 ret = nve0_ram_train_init_0(pfb, train);
1305 break;
1306 default:
1307 ret = 0;
1308 break;
1309 }
1310
1311 kfree(train);
1312 return ret;
1313}
1314
1128int 1315int
1129nve0_ram_init(struct nouveau_object *object) 1316nve0_ram_init(struct nouveau_object *object)
1130{ 1317{
1131 struct nouveau_fb *pfb = (void *)object->parent; 1318 struct nouveau_fb *pfb = (void *)object->parent;
1132 struct nve0_ram *ram = (void *)object; 1319 struct nve0_ram *ram = (void *)object;
1133 struct nouveau_bios *bios = nouveau_bios(pfb); 1320 struct nouveau_bios *bios = nouveau_bios(pfb);
1134 static const u8 train0[] = {
1135 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
1136 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
1137 };
1138 static const u32 train1[] = {
1139 0x00000000, 0xffffffff,
1140 0x55555555, 0xaaaaaaaa,
1141 0x33333333, 0xcccccccc,
1142 0xf0f0f0f0, 0x0f0f0f0f,
1143 0x00ff00ff, 0xff00ff00,
1144 0x0000ffff, 0xffff0000,
1145 };
1146 u8 ver, hdr, cnt, len, snr, ssz; 1321 u8 ver, hdr, cnt, len, snr, ssz;
1147 u32 data, save; 1322 u32 data, save;
1148 int ret, i; 1323 int ret, i;
@@ -1168,51 +1343,107 @@ nve0_ram_init(struct nouveau_object *object)
1168 1343
1169 cnt = nv_ro08(bios, data + 0x14); /* guess at count */ 1344 cnt = nv_ro08(bios, data + 0x14); /* guess at count */
1170 data = nv_ro32(bios, data + 0x10); /* guess u32... */ 1345 data = nv_ro32(bios, data + 0x10); /* guess u32... */
1171 save = nv_rd32(pfb, 0x10f65c); 1346 save = nv_rd32(pfb, 0x10f65c) & 0x000000f0;
1172 for (i = 0; i < cnt; i++) { 1347 for (i = 0; i < cnt; i++, data += 4) {
1173 nv_mask(pfb, 0x10f65c, 0x000000f0, i << 4); 1348 if (i != save >> 4) {
1174 nvbios_exec(&(struct nvbios_init) { 1349 nv_mask(pfb, 0x10f65c, 0x000000f0, i << 4);
1175 .subdev = nv_subdev(pfb), 1350 nvbios_exec(&(struct nvbios_init) {
1176 .bios = bios, 1351 .subdev = nv_subdev(pfb),
1177 .offset = nv_ro32(bios, data), /* guess u32 */ 1352 .bios = bios,
1178 .execute = 1, 1353 .offset = nv_ro32(bios, data),
1179 }); 1354 .execute = 1,
1180 data += 4; 1355 });
1181 } 1356 }
1182 nv_wr32(pfb, 0x10f65c, save); 1357 }
1358 nv_mask(pfb, 0x10f65c, 0x000000f0, save);
1183 nv_mask(pfb, 0x10f584, 0x11000000, 0x00000000); 1359 nv_mask(pfb, 0x10f584, 0x11000000, 0x00000000);
1360 nv_wr32(pfb, 0x10ecc0, 0xffffffff);
1361 nv_mask(pfb, 0x10f160, 0x00000010, 0x00000010);
1184 1362
1185 switch (ram->base.type) { 1363 return nve0_ram_train_init(pfb);
1186 case NV_MEM_TYPE_GDDR5: 1364}
1187 for (i = 0; i < 0x30; i++) {
1188 nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
1189 nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
1190 nv_wr32(pfb, 0x10f918, train1[i % 12]);
1191 nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
1192 nv_wr32(pfb, 0x10f918, train1[i % 12]);
1193
1194 nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
1195 nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
1196 nv_wr32(pfb, 0x10f91c, train1[i % 12]);
1197 nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
1198 nv_wr32(pfb, 0x10f91c, train1[i % 12]);
1199 }
1200 1365
1201 for (i = 0; i < 0x100; i++) { 1366static int
1202 nv_wr32(pfb, 0x10f968, i); 1367nve0_ram_ctor_data(struct nve0_ram *ram, u8 ramcfg, int i)
1203 nv_wr32(pfb, 0x10f900, train1[2 + (i & 1)]); 1368{
1204 } 1369 struct nouveau_fb *pfb = (void *)nv_object(ram)->parent;
1370 struct nouveau_bios *bios = nouveau_bios(pfb);
1371 struct nouveau_ram_data *cfg;
1372 struct nvbios_ramcfg *d = &ram->diff;
1373 struct nvbios_ramcfg *p, *n;
1374 u8 ver, hdr, cnt, len;
1375 u32 data;
1376 int ret;
1205 1377
1206 for (i = 0; i < 0x100; i++) { 1378 if (!(cfg = kmalloc(sizeof(*cfg), GFP_KERNEL)))
1207 nv_wr32(pfb, 0x10f96c, i); 1379 return -ENOMEM;
1208 nv_wr32(pfb, 0x10f900, train1[2 + (i & 1)]); 1380 p = &list_last_entry(&ram->cfg, typeof(*cfg), head)->bios;
1209 } 1381 n = &cfg->bios;
1210 break; 1382
1211 default: 1383 /* memory config data for a range of target frequencies */
1212 break; 1384 data = nvbios_rammapEp(bios, i, &ver, &hdr, &cnt, &len, &cfg->bios);
1385 if (ret = -ENOENT, !data)
1386 goto done;
1387 if (ret = -ENOSYS, ver != 0x11 || hdr < 0x12)
1388 goto done;
1389
1390 /* ... and a portion specific to the attached memory */
1391 data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, ramcfg,
1392 &ver, &hdr, &cfg->bios);
1393 if (ret = -EINVAL, !data)
1394 goto done;
1395 if (ret = -ENOSYS, ver != 0x11 || hdr < 0x0a)
1396 goto done;
1397
1398 /* lookup memory timings, if bios says they're present */
1399 if (cfg->bios.ramcfg_timing != 0xff) {
1400 data = nvbios_timingEp(bios, cfg->bios.ramcfg_timing,
1401 &ver, &hdr, &cnt, &len,
1402 &cfg->bios);
1403 if (ret = -EINVAL, !data)
1404 goto done;
1405 if (ret = -ENOSYS, ver != 0x20 || hdr < 0x33)
1406 goto done;
1213 } 1407 }
1214 1408
1215 return 0; 1409 list_add_tail(&cfg->head, &ram->cfg);
1410 if (ret = 0, i == 0)
1411 goto done;
1412
1413 d->rammap_11_0a_03fe |= p->rammap_11_0a_03fe != n->rammap_11_0a_03fe;
1414 d->rammap_11_09_01ff |= p->rammap_11_09_01ff != n->rammap_11_09_01ff;
1415 d->rammap_11_0a_0400 |= p->rammap_11_0a_0400 != n->rammap_11_0a_0400;
1416 d->rammap_11_0a_0800 |= p->rammap_11_0a_0800 != n->rammap_11_0a_0800;
1417 d->rammap_11_0b_01f0 |= p->rammap_11_0b_01f0 != n->rammap_11_0b_01f0;
1418 d->rammap_11_0b_0200 |= p->rammap_11_0b_0200 != n->rammap_11_0b_0200;
1419 d->rammap_11_0d |= p->rammap_11_0d != n->rammap_11_0d;
1420 d->rammap_11_0f |= p->rammap_11_0f != n->rammap_11_0f;
1421 d->rammap_11_0e |= p->rammap_11_0e != n->rammap_11_0e;
1422 d->rammap_11_0b_0800 |= p->rammap_11_0b_0800 != n->rammap_11_0b_0800;
1423 d->rammap_11_0b_0400 |= p->rammap_11_0b_0400 != n->rammap_11_0b_0400;
1424 d->ramcfg_11_01_01 |= p->ramcfg_11_01_01 != n->ramcfg_11_01_01;
1425 d->ramcfg_11_01_02 |= p->ramcfg_11_01_02 != n->ramcfg_11_01_02;
1426 d->ramcfg_11_01_10 |= p->ramcfg_11_01_10 != n->ramcfg_11_01_10;
1427 d->ramcfg_11_02_03 |= p->ramcfg_11_02_03 != n->ramcfg_11_02_03;
1428 d->ramcfg_11_08_20 |= p->ramcfg_11_08_20 != n->ramcfg_11_08_20;
1429 d->timing_20_30_07 |= p->timing_20_30_07 != n->timing_20_30_07;
1430done:
1431 if (ret)
1432 kfree(cfg);
1433 return ret;
1434}
1435
1436static void
1437nve0_ram_dtor(struct nouveau_object *object)
1438{
1439 struct nve0_ram *ram = (void *)object;
1440 struct nouveau_ram_data *cfg, *tmp;
1441
1442 list_for_each_entry_safe(cfg, tmp, &ram->cfg, head) {
1443 kfree(cfg);
1444 }
1445
1446 nouveau_ram_destroy(&ram->base);
1216} 1447}
1217 1448
1218static int 1449static int
@@ -1226,6 +1457,7 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1226 struct dcb_gpio_func func; 1457 struct dcb_gpio_func func;
1227 struct nve0_ram *ram; 1458 struct nve0_ram *ram;
1228 int ret, i; 1459 int ret, i;
1460 u8 ramcfg = nvbios_ramcfg_index(nv_subdev(pfb));
1229 u32 tmp; 1461 u32 tmp;
1230 1462
1231 ret = nvc0_ram_create(parent, engine, oclass, 0x022554, &ram); 1463 ret = nvc0_ram_create(parent, engine, oclass, 0x022554, &ram);
@@ -1233,6 +1465,8 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1233 if (ret) 1465 if (ret)
1234 return ret; 1466 return ret;
1235 1467
1468 INIT_LIST_HEAD(&ram->cfg);
1469
1236 switch (ram->base.type) { 1470 switch (ram->base.type) {
1237 case NV_MEM_TYPE_DDR3: 1471 case NV_MEM_TYPE_DDR3:
1238 case NV_MEM_TYPE_GDDR5: 1472 case NV_MEM_TYPE_GDDR5:
@@ -1264,7 +1498,26 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1264 } 1498 }
1265 } 1499 }
1266 1500
1267 // parse bios data for both pll's 1501 /* parse bios data for all rammap table entries up-front, and
1502 * build information on whether certain fields differ between
1503 * any of the entries.
1504 *
1505 * the binary driver appears to completely ignore some fields
1506 * when all entries contain the same value. at first, it was
1507 * hoped that these were mere optimisations and the bios init
1508 * tables had configured as per the values here, but there is
1509 * evidence now to suggest that this isn't the case and we do
1510 * need to treat this condition as a "don't touch" indicator.
1511 */
1512 for (i = 0; !ret; i++) {
1513 ret = nve0_ram_ctor_data(ram, ramcfg, i);
1514 if (ret && ret != -ENOENT) {
1515 nv_error(pfb, "failed to parse ramcfg data\n");
1516 return ret;
1517 }
1518 }
1519
1520 /* parse bios data for both pll's */
1268 ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll); 1521 ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
1269 if (ret) { 1522 if (ret) {
1270 nv_error(pfb, "mclk refpll data not found\n"); 1523 nv_error(pfb, "mclk refpll data not found\n");
@@ -1277,6 +1530,7 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1277 return ret; 1530 return ret;
1278 } 1531 }
1279 1532
1533 /* lookup memory voltage gpios */
1280 ret = gpio->find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func); 1534 ret = gpio->find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
1281 if (ret == 0) { 1535 if (ret == 0) {
1282 ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04)); 1536 ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04));
@@ -1385,7 +1639,7 @@ nve0_ram_oclass = {
1385 .handle = 0, 1639 .handle = 0,
1386 .ofuncs = &(struct nouveau_ofuncs) { 1640 .ofuncs = &(struct nouveau_ofuncs) {
1387 .ctor = nve0_ram_ctor, 1641 .ctor = nve0_ram_ctor,
1388 .dtor = _nouveau_ram_dtor, 1642 .dtor = nve0_ram_dtor,
1389 .init = nve0_ram_init, 1643 .init = nve0_ram_init,
1390 .fini = _nouveau_ram_fini, 1644 .fini = _nouveau_ram_fini,
1391 } 1645 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c
new file mode 100644
index 000000000000..bb1eb8f3e639
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c
@@ -0,0 +1,94 @@
1/*
2 * Copyright 2014 Roy Spliet
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Roy Spliet <rspliet@eclipso.eu>
23 * Ben Skeggs
24 */
25
26#include "priv.h"
27
28struct ramxlat {
29 int id;
30 u8 enc;
31};
32
33static inline int
34ramxlat(const struct ramxlat *xlat, int id)
35{
36 while (xlat->id >= 0) {
37 if (xlat->id == id)
38 return xlat->enc;
39 xlat++;
40 }
41 return -EINVAL;
42}
43
44static const struct ramxlat
45ramddr2_cl[] = {
46 { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 },
47 /* The following are available in some, but not all DDR2 docs */
48 { 7, 7 },
49 { -1 }
50};
51
52static const struct ramxlat
53ramddr2_wr[] = {
54 { 2, 1 }, { 3, 2 }, { 4, 3 }, { 5, 4 }, { 6, 5 },
55 /* The following are available in some, but not all DDR2 docs */
56 { 7, 6 },
57 { -1 }
58};
59
60int
61nouveau_sddr2_calc(struct nouveau_ram *ram)
62{
63 int CL, WR, DLL = 0, ODT = 0;
64
65 switch (ram->next->bios.timing_ver) {
66 case 0x10:
67 CL = ram->next->bios.timing_10_CL;
68 WR = ram->next->bios.timing_10_WR;
69 DLL = !ram->next->bios.ramcfg_10_02_40;
70 ODT = ram->next->bios.timing_10_ODT & 3;
71 break;
72 case 0x20:
73 CL = (ram->next->bios.timing[1] & 0x0000001f);
74 WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
75 break;
76 default:
77 return -ENOSYS;
78 }
79
80 CL = ramxlat(ramddr2_cl, CL);
81 WR = ramxlat(ramddr2_wr, WR);
82 if (CL < 0 || WR < 0)
83 return -EINVAL;
84
85 ram->mr[0] &= ~0xf70;
86 ram->mr[0] |= (WR & 0x07) << 9;
87 ram->mr[0] |= (CL & 0x07) << 4;
88
89 ram->mr[1] &= ~0x045;
90 ram->mr[1] |= (ODT & 0x1) << 2;
91 ram->mr[1] |= (ODT & 0x2) << 5;
92 ram->mr[1] |= !DLL;
93 return 0;
94}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
index ebd4cd9c35d9..83949b11833a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
@@ -20,9 +20,9 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 * Roy Spliet <rspliet@eclipso.eu>
23 */ 24 */
24 25
25#include <subdev/bios.h>
26#include "priv.h" 26#include "priv.h"
27 27
28struct ramxlat { 28struct ramxlat {
@@ -69,31 +69,52 @@ ramddr3_cwl[] = {
69int 69int
70nouveau_sddr3_calc(struct nouveau_ram *ram) 70nouveau_sddr3_calc(struct nouveau_ram *ram)
71{ 71{
72 struct nouveau_bios *bios = nouveau_bios(ram); 72 int CWL, CL, WR, DLL = 0, ODT = 0;
73 int WL, CL, WR;
74 73
75 switch (!!ram->timing.data * ram->timing.version) { 74 switch (ram->next->bios.timing_ver) {
75 case 0x10:
76 if (ram->next->bios.timing_hdr < 0x17) {
77 /* XXX: NV50: Get CWL from the timing register */
78 return -ENOSYS;
79 }
80 CWL = ram->next->bios.timing_10_CWL;
81 CL = ram->next->bios.timing_10_CL;
82 WR = ram->next->bios.timing_10_WR;
83 DLL = !ram->next->bios.ramcfg_10_02_40;
84 ODT = ram->next->bios.timing_10_ODT;
85 break;
76 case 0x20: 86 case 0x20:
77 WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7; 87 CWL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
78 CL = nv_ro08(bios, ram->timing.data + 0x04) & 0x1f; 88 CL = (ram->next->bios.timing[1] & 0x0000001f) >> 0;
79 WR = nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f; 89 WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
90 /* XXX: Get these values from the VBIOS instead */
91 DLL = !(ram->mr[1] & 0x1);
92 ODT = (ram->mr[1] & 0x004) >> 2 |
93 (ram->mr[1] & 0x040) >> 5 |
94 (ram->mr[1] & 0x200) >> 7;
80 break; 95 break;
81 default: 96 default:
82 return -ENOSYS; 97 return -ENOSYS;
83 } 98 }
84 99
85 WL = ramxlat(ramddr3_cwl, WL); 100 CWL = ramxlat(ramddr3_cwl, CWL);
86 CL = ramxlat(ramddr3_cl, CL); 101 CL = ramxlat(ramddr3_cl, CL);
87 WR = ramxlat(ramddr3_wr, WR); 102 WR = ramxlat(ramddr3_wr, WR);
88 if (WL < 0 || CL < 0 || WR < 0) 103 if (CL < 0 || CWL < 0 || WR < 0)
89 return -EINVAL; 104 return -EINVAL;
90 105
91 ram->mr[0] &= ~0xe74; 106 ram->mr[0] &= ~0xf74;
92 ram->mr[0] |= (WR & 0x07) << 9; 107 ram->mr[0] |= (WR & 0x07) << 9;
93 ram->mr[0] |= (CL & 0x0e) << 3; 108 ram->mr[0] |= (CL & 0x0e) << 3;
94 ram->mr[0] |= (CL & 0x01) << 2; 109 ram->mr[0] |= (CL & 0x01) << 2;
95 110
111 ram->mr[1] &= ~0x245;
112 ram->mr[1] |= (ODT & 0x1) << 2;
113 ram->mr[1] |= (ODT & 0x2) << 5;
114 ram->mr[1] |= (ODT & 0x4) << 7;
115 ram->mr[1] |= !DLL;
116
96 ram->mr[2] &= ~0x038; 117 ram->mr[2] &= ~0x038;
97 ram->mr[2] |= (WL & 0x07) << 3; 118 ram->mr[2] |= (CWL & 0x07) << 3;
98 return 0; 119 return 0;
99} 120}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fuse/base.c b/drivers/gpu/drm/nouveau/core/subdev/fuse/base.c
new file mode 100644
index 000000000000..9e8e92127715
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fuse/base.c
@@ -0,0 +1,54 @@
1/*
2 * Copyright 2014 Martin Peres
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <subdev/fuse.h>
26
27int
28_nouveau_fuse_init(struct nouveau_object *object)
29{
30 struct nouveau_fuse *fuse = (void *)object;
31 return nouveau_subdev_init(&fuse->base);
32}
33
34void
35_nouveau_fuse_dtor(struct nouveau_object *object)
36{
37 struct nouveau_fuse *fuse = (void *)object;
38 nouveau_subdev_destroy(&fuse->base);
39}
40
41int
42nouveau_fuse_create_(struct nouveau_object *parent,
43 struct nouveau_object *engine,
44 struct nouveau_oclass *oclass, int length, void **pobject)
45{
46 struct nouveau_fuse *fuse;
47 int ret;
48
49 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "FUSE",
50 "fuse", length, pobject);
51 fuse = *pobject;
52
53 return ret;
54}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fuse/g80.c b/drivers/gpu/drm/nouveau/core/subdev/fuse/g80.c
new file mode 100644
index 000000000000..a374ade485be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fuse/g80.c
@@ -0,0 +1,81 @@
1/*
2 * Copyright 2014 Martin Peres
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include "priv.h"
26
27struct g80_fuse_priv {
28 struct nouveau_fuse base;
29
30 spinlock_t fuse_enable_lock;
31};
32
33static u32
34g80_fuse_rd32(struct nouveau_object *object, u64 addr)
35{
36 struct g80_fuse_priv *priv = (void *)object;
37 unsigned long flags;
38 u32 fuse_enable, val;
39
40 spin_lock_irqsave(&priv->fuse_enable_lock, flags);
41
42 /* racy if another part of nouveau start writing to this reg */
43 fuse_enable = nv_mask(priv, 0x1084, 0x800, 0x800);
44 val = nv_rd32(priv, 0x21000 + addr);
45 nv_wr32(priv, 0x1084, fuse_enable);
46
47 spin_unlock_irqrestore(&priv->fuse_enable_lock, flags);
48
49 return val;
50}
51
52
53static int
54g80_fuse_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct g80_fuse_priv *priv;
59 int ret;
60
61 ret = nouveau_fuse_create(parent, engine, oclass, &priv);
62 *pobject = nv_object(priv);
63 if (ret)
64 return ret;
65
66 spin_lock_init(&priv->fuse_enable_lock);
67
68 return 0;
69}
70
71struct nouveau_oclass
72g80_fuse_oclass = {
73 .handle = NV_SUBDEV(FUSE, 0x50),
74 .ofuncs = &(struct nouveau_ofuncs) {
75 .ctor = g80_fuse_ctor,
76 .dtor = _nouveau_fuse_dtor,
77 .init = _nouveau_fuse_init,
78 .fini = _nouveau_fuse_fini,
79 .rd32 = g80_fuse_rd32,
80 },
81};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fuse/gf100.c b/drivers/gpu/drm/nouveau/core/subdev/fuse/gf100.c
new file mode 100644
index 000000000000..5ed03f54b3d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fuse/gf100.c
@@ -0,0 +1,83 @@
1/*
2 * Copyright 2014 Martin Peres
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include "priv.h"
26
27struct gf100_fuse_priv {
28 struct nouveau_fuse base;
29
30 spinlock_t fuse_enable_lock;
31};
32
33static u32
34gf100_fuse_rd32(struct nouveau_object *object, u64 addr)
35{
36 struct gf100_fuse_priv *priv = (void *)object;
37 unsigned long flags;
38 u32 fuse_enable, unk, val;
39
40 spin_lock_irqsave(&priv->fuse_enable_lock, flags);
41
42 /* racy if another part of nouveau start writing to these regs */
43 fuse_enable = nv_mask(priv, 0x22400, 0x800, 0x800);
44 unk = nv_mask(priv, 0x21000, 0x1, 0x1);
45 val = nv_rd32(priv, 0x21100 + addr);
46 nv_wr32(priv, 0x21000, unk);
47 nv_wr32(priv, 0x22400, fuse_enable);
48
49 spin_unlock_irqrestore(&priv->fuse_enable_lock, flags);
50
51 return val;
52}
53
54
55static int
56gf100_fuse_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
57 struct nouveau_oclass *oclass, void *data, u32 size,
58 struct nouveau_object **pobject)
59{
60 struct gf100_fuse_priv *priv;
61 int ret;
62
63 ret = nouveau_fuse_create(parent, engine, oclass, &priv);
64 *pobject = nv_object(priv);
65 if (ret)
66 return ret;
67
68 spin_lock_init(&priv->fuse_enable_lock);
69
70 return 0;
71}
72
73struct nouveau_oclass
74gf100_fuse_oclass = {
75 .handle = NV_SUBDEV(FUSE, 0xC0),
76 .ofuncs = &(struct nouveau_ofuncs) {
77 .ctor = gf100_fuse_ctor,
78 .dtor = _nouveau_fuse_dtor,
79 .init = _nouveau_fuse_init,
80 .fini = _nouveau_fuse_fini,
81 .rd32 = gf100_fuse_rd32,
82 },
83};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/fuse/gm107.c
new file mode 100644
index 000000000000..4f1a636c6538
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fuse/gm107.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2014 Martin Peres
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include "priv.h"
26
27struct gm107_fuse_priv {
28 struct nouveau_fuse base;
29};
30
31static u32
32gm107_fuse_rd32(struct nouveau_object *object, u64 addr)
33{
34 struct gf100_fuse_priv *priv = (void *)object;
35
36 return nv_rd32(priv, 0x21100 + addr);
37}
38
39
40static int
41gm107_fuse_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
42 struct nouveau_oclass *oclass, void *data, u32 size,
43 struct nouveau_object **pobject)
44{
45 struct gm107_fuse_priv *priv;
46 int ret;
47
48 ret = nouveau_fuse_create(parent, engine, oclass, &priv);
49 *pobject = nv_object(priv);
50 if (ret)
51 return ret;
52
53 return 0;
54}
55
56struct nouveau_oclass
57gm107_fuse_oclass = {
58 .handle = NV_SUBDEV(FUSE, 0x117),
59 .ofuncs = &(struct nouveau_ofuncs) {
60 .ctor = gm107_fuse_ctor,
61 .dtor = _nouveau_fuse_dtor,
62 .init = _nouveau_fuse_init,
63 .fini = _nouveau_fuse_fini,
64 .rd32 = gm107_fuse_rd32,
65 },
66};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fuse/priv.h
new file mode 100644
index 000000000000..d2085411a5cb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fuse/priv.h
@@ -0,0 +1,9 @@
1#ifndef __NVKM_FUSE_PRIV_H__
2#define __NVKM_FUSE_PRIV_H__
3
4#include <subdev/fuse.h>
5
6int _nouveau_fuse_init(struct nouveau_object *object);
7void _nouveau_fuse_dtor(struct nouveau_object *object);
8
9#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index b1e3ed7c8beb..7ad99b763f4c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -122,7 +122,8 @@ nouveau_gpio_intr_init(struct nvkm_event *event, int type, int index)
122} 122}
123 123
124static int 124static int
125nouveau_gpio_intr_ctor(void *data, u32 size, struct nvkm_notify *notify) 125nouveau_gpio_intr_ctor(struct nouveau_object *object, void *data, u32 size,
126 struct nvkm_notify *notify)
126{ 127{
127 struct nvkm_gpio_ntfy_req *req = data; 128 struct nvkm_gpio_ntfy_req *req = data;
128 if (!WARN_ON(size != sizeof(*req))) { 129 if (!WARN_ON(size != sizeof(*req))) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv94.c
index 252083d376f5..cae404ccadac 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv94.c
@@ -25,7 +25,7 @@
25#include "priv.h" 25#include "priv.h"
26 26
27void 27void
28nv92_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo) 28nv94_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
29{ 29{
30 u32 intr0 = nv_rd32(gpio, 0x00e054); 30 u32 intr0 = nv_rd32(gpio, 0x00e054);
31 u32 intr1 = nv_rd32(gpio, 0x00e074); 31 u32 intr1 = nv_rd32(gpio, 0x00e074);
@@ -38,7 +38,7 @@ nv92_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
38} 38}
39 39
40void 40void
41nv92_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data) 41nv94_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
42{ 42{
43 u32 inte0 = nv_rd32(gpio, 0x00e050); 43 u32 inte0 = nv_rd32(gpio, 0x00e050);
44 u32 inte1 = nv_rd32(gpio, 0x00e070); 44 u32 inte1 = nv_rd32(gpio, 0x00e070);
@@ -57,8 +57,8 @@ nv92_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
57} 57}
58 58
59struct nouveau_oclass * 59struct nouveau_oclass *
60nv92_gpio_oclass = &(struct nouveau_gpio_impl) { 60nv94_gpio_oclass = &(struct nouveau_gpio_impl) {
61 .base.handle = NV_SUBDEV(GPIO, 0x92), 61 .base.handle = NV_SUBDEV(GPIO, 0x94),
62 .base.ofuncs = &(struct nouveau_ofuncs) { 62 .base.ofuncs = &(struct nouveau_ofuncs) {
63 .ctor = _nouveau_gpio_ctor, 63 .ctor = _nouveau_gpio_ctor,
64 .dtor = _nouveau_gpio_dtor, 64 .dtor = _nouveau_gpio_dtor,
@@ -66,8 +66,8 @@ nv92_gpio_oclass = &(struct nouveau_gpio_impl) {
66 .fini = _nouveau_gpio_fini, 66 .fini = _nouveau_gpio_fini,
67 }, 67 },
68 .lines = 32, 68 .lines = 32,
69 .intr_stat = nv92_gpio_intr_stat, 69 .intr_stat = nv94_gpio_intr_stat,
70 .intr_mask = nv92_gpio_intr_mask, 70 .intr_mask = nv94_gpio_intr_mask,
71 .drive = nv50_gpio_drive, 71 .drive = nv50_gpio_drive,
72 .sense = nv50_gpio_sense, 72 .sense = nv50_gpio_sense,
73 .reset = nv50_gpio_reset, 73 .reset = nv50_gpio_reset,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
index a4682b0956ad..480d6d2af770 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -77,8 +77,8 @@ nvd0_gpio_oclass = &(struct nouveau_gpio_impl) {
77 .fini = _nouveau_gpio_fini, 77 .fini = _nouveau_gpio_fini,
78 }, 78 },
79 .lines = 32, 79 .lines = 32,
80 .intr_stat = nv92_gpio_intr_stat, 80 .intr_stat = nv94_gpio_intr_stat,
81 .intr_mask = nv92_gpio_intr_mask, 81 .intr_mask = nv94_gpio_intr_mask,
82 .drive = nvd0_gpio_drive, 82 .drive = nvd0_gpio_drive,
83 .sense = nvd0_gpio_sense, 83 .sense = nvd0_gpio_sense,
84 .reset = nvd0_gpio_reset, 84 .reset = nvd0_gpio_reset,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
index e1724dfc86ae..bff98b86e2b5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
@@ -56,8 +56,8 @@ void nv50_gpio_reset(struct nouveau_gpio *, u8);
56int nv50_gpio_drive(struct nouveau_gpio *, int, int, int); 56int nv50_gpio_drive(struct nouveau_gpio *, int, int, int);
57int nv50_gpio_sense(struct nouveau_gpio *, int); 57int nv50_gpio_sense(struct nouveau_gpio *, int);
58 58
59void nv92_gpio_intr_stat(struct nouveau_gpio *, u32 *, u32 *); 59void nv94_gpio_intr_stat(struct nouveau_gpio *, u32 *, u32 *);
60void nv92_gpio_intr_mask(struct nouveau_gpio *, u32, u32, u32); 60void nv94_gpio_intr_mask(struct nouveau_gpio *, u32, u32, u32);
61 61
62void nvd0_gpio_reset(struct nouveau_gpio *, u8); 62void nvd0_gpio_reset(struct nouveau_gpio *, u8);
63int nvd0_gpio_drive(struct nouveau_gpio *, int, int, int); 63int nvd0_gpio_drive(struct nouveau_gpio *, int, int, int);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index a652cafde3d6..2b1bf545e488 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <core/option.h> 25#include <core/option.h>
26#include <core/object.h>
26#include <core/event.h> 27#include <core/event.h>
27 28
28#include <subdev/bios.h> 29#include <subdev/bios.h>
@@ -346,7 +347,8 @@ nouveau_i2c_intr_init(struct nvkm_event *event, int type, int index)
346} 347}
347 348
348static int 349static int
349nouveau_i2c_intr_ctor(void *data, u32 size, struct nvkm_notify *notify) 350nouveau_i2c_intr_ctor(struct nouveau_object *object, void *data, u32 size,
351 struct nvkm_notify *notify)
350{ 352{
351 struct nvkm_i2c_ntfy_req *req = data; 353 struct nvkm_i2c_ntfy_req *req = data;
352 if (!WARN_ON(size != sizeof(*req))) { 354 if (!WARN_ON(size != sizeof(*req))) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index 7b64befee48f..e8b1401c59c0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -69,7 +69,7 @@ nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
69 if (ret) 69 if (ret)
70 return ret; 70 return ret;
71 71
72 ret = nouveau_mm_head(&priv->heap, 1, args->size, args->size, 72 ret = nouveau_mm_head(&priv->heap, 0, 1, args->size, args->size,
73 args->align, &node->mem); 73 args->align, &node->mem);
74 if (ret) 74 if (ret)
75 return ret; 75 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c
index 32ed442c5913..7fa331516f84 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c
@@ -31,7 +31,7 @@ nvkm_ltc_tags_alloc(struct nouveau_ltc *ltc, u32 n,
31 struct nvkm_ltc_priv *priv = (void *)ltc; 31 struct nvkm_ltc_priv *priv = (void *)ltc;
32 int ret; 32 int ret;
33 33
34 ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode); 34 ret = nouveau_mm_head(&priv->tags, 0, 1, n, n, 1, pnode);
35 if (ret) 35 if (ret)
36 *pnode = NULL; 36 *pnode = NULL;
37 37
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
index d5d65285efe5..2db0977284f8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
@@ -62,16 +62,38 @@ gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
62 nv_wr32(priv, 0x17ea58, depth); 62 nv_wr32(priv, 0x17ea58, depth);
63} 63}
64 64
65static const struct nouveau_bitfield
66gf100_ltc_lts_intr_name[] = {
67 { 0x00000001, "IDLE_ERROR_IQ" },
68 { 0x00000002, "IDLE_ERROR_CBC" },
69 { 0x00000004, "IDLE_ERROR_TSTG" },
70 { 0x00000008, "IDLE_ERROR_DSTG" },
71 { 0x00000010, "EVICTED_CB" },
72 { 0x00000020, "ILLEGAL_COMPSTAT" },
73 { 0x00000040, "BLOCKLINEAR_CB" },
74 { 0x00000100, "ECC_SEC_ERROR" },
75 { 0x00000200, "ECC_DED_ERROR" },
76 { 0x00000400, "DEBUG" },
77 { 0x00000800, "ATOMIC_TO_Z" },
78 { 0x00001000, "ILLEGAL_ATOMIC" },
79 { 0x00002000, "BLKACTIVITY_ERR" },
80 {}
81};
82
65static void 83static void
66gf100_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts) 84gf100_ltc_lts_intr(struct nvkm_ltc_priv *priv, int ltc, int lts)
67{ 85{
68 u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400); 86 u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400);
69 u32 stat = nv_rd32(priv, base + 0x020); 87 u32 intr = nv_rd32(priv, base + 0x020);
88 u32 stat = intr & 0x0000ffff;
70 89
71 if (stat) { 90 if (stat) {
72 nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", ltc, lts, stat); 91 nv_info(priv, "LTC%d_LTS%d:", ltc, lts);
73 nv_wr32(priv, base + 0x020, stat); 92 nouveau_bitfield_print(gf100_ltc_lts_intr_name, stat);
93 pr_cont("\n");
74 } 94 }
95
96 nv_wr32(priv, base + 0x020, intr);
75} 97}
76 98
77void 99void
@@ -84,14 +106,9 @@ gf100_ltc_intr(struct nouveau_subdev *subdev)
84 while (mask) { 106 while (mask) {
85 u32 lts, ltc = __ffs(mask); 107 u32 lts, ltc = __ffs(mask);
86 for (lts = 0; lts < priv->lts_nr; lts++) 108 for (lts = 0; lts < priv->lts_nr; lts++)
87 gf100_ltc_lts_isr(priv, ltc, lts); 109 gf100_ltc_lts_intr(priv, ltc, lts);
88 mask &= ~(1 << ltc); 110 mask &= ~(1 << ltc);
89 } 111 }
90
91 /* we do something horribly wrong and upset PMFB a lot, so mask off
92 * interrupts from it after the first one until it's fixed
93 */
94 nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
95} 112}
96 113
97static int 114static int
@@ -153,7 +170,7 @@ gf100_ltc_init_tag_ram(struct nouveau_fb *pfb, struct nvkm_ltc_priv *priv)
153 tag_size += tag_align; 170 tag_size += tag_align;
154 tag_size = (tag_size + 0xfff) >> 12; /* round up */ 171 tag_size = (tag_size + 0xfff) >> 12; /* round up */
155 172
156 ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1, 173 ret = nouveau_mm_tail(&pfb->vram, 1, 1, tag_size, tag_size, 1,
157 &priv->tag_ram); 174 &priv->tag_ram);
158 if (ret) { 175 if (ret) {
159 priv->num_tags = 0; 176 priv->num_tags = 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
index a4de64289762..89fc4238f50c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
@@ -87,11 +87,6 @@ gm107_ltc_intr(struct nouveau_subdev *subdev)
87 gm107_ltc_lts_isr(priv, ltc, lts); 87 gm107_ltc_lts_isr(priv, ltc, lts);
88 mask &= ~(1 << ltc); 88 mask &= ~(1 << ltc);
89 } 89 }
90
91 /* we do something horribly wrong and upset PMFB a lot, so mask off
92 * interrupts from it after the first one until it's fixed
93 */
94 nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
95} 90}
96 91
97static int 92static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h
index 594924f39126..41f179d93da6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h
@@ -4,6 +4,8 @@
4#include <subdev/ltc.h> 4#include <subdev/ltc.h>
5#include <subdev/fb.h> 5#include <subdev/fb.h>
6 6
7#include <core/enum.h>
8
7struct nvkm_ltc_priv { 9struct nvkm_ltc_priv {
8 struct nouveau_ltc base; 10 struct nouveau_ltc base;
9 u32 ltc_nr; 11 u32 ltc_nr;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
index 69f1f34f6931..0ab55f27ec45 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
@@ -203,6 +203,8 @@ _nouveau_pwr_init(struct nouveau_object *object)
203 nv_wait(ppwr, 0x10a04c, 0xffffffff, 0x00000000); 203 nv_wait(ppwr, 0x10a04c, 0xffffffff, 0x00000000);
204 nv_mask(ppwr, 0x000200, 0x00002000, 0x00000000); 204 nv_mask(ppwr, 0x000200, 0x00002000, 0x00000000);
205 nv_mask(ppwr, 0x000200, 0x00002000, 0x00002000); 205 nv_mask(ppwr, 0x000200, 0x00002000, 0x00002000);
206 nv_rd32(ppwr, 0x000200);
207 nv_wait(ppwr, 0x10a10c, 0x00000006, 0x00000000);
206 208
207 /* upload data segment */ 209 /* upload data segment */
208 nv_wr32(ppwr, 0x10a1c0, 0x01000000); 210 nv_wr32(ppwr, 0x10a1c0, 0x01000000);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/arith.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/arith.fuc
new file mode 100644
index 000000000000..214a6d9e088d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/arith.fuc
@@ -0,0 +1,94 @@
1/*
2 * Copyright 2014 Martin Peres <martin.peres@free.fr>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the folloing conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25/******************************************************************************
26 * arith data segment
27 *****************************************************************************/
28#ifdef INCLUDE_PROC
29#endif
30
31#ifdef INCLUDE_DATA
32#endif
33
34/******************************************************************************
35 * arith code segment
36 *****************************************************************************/
37#ifdef INCLUDE_CODE
38
39// does a 32x32 -> 64 multiplication
40//
41// A * B = A_lo * B_lo
42// + ( A_hi * B_lo ) << 16
43// + ( A_lo * B_hi ) << 16
44// + ( A_hi * B_hi ) << 32
45//
46// $r15 - current
47// $r14 - A
48// $r13 - B
49// $r12 - mul_lo (return)
50// $r11 - mul_hi (return)
51// $r0 - zero
52mulu32_32_64:
53 push $r1 // A_hi
54 push $r2 // B_hi
55 push $r3 // tmp0
56 push $r4 // tmp1
57
58 shr b32 $r1 $r14 16
59 shr b32 $r2 $r13 16
60
61 clear b32 $r12
62 clear b32 $r11
63
64 // A_lo * B_lo
65 mulu $r12 $r14 $r13
66
67 // ( A_hi * B_lo ) << 16
68 mulu $r3 $r1 $r13 // tmp0 = A_hi * B_lo
69 mov b32 $r4 $r3
70 and $r3 0xffff // tmp0 = tmp0_lo
71 shl b32 $r3 16
72 shr b32 $r4 16 // tmp1 = tmp0_hi
73 add b32 $r12 $r3
74 adc b32 $r11 $r4
75
76 // ( A_lo * B_hi ) << 16
77 mulu $r3 $r14 $r2 // tmp0 = A_lo * B_hi
78 mov b32 $r4 $r3
79 and $r3 0xffff // tmp0 = tmp0_lo
80 shl b32 $r3 16
81 shr b32 $r4 16 // tmp1 = tmp0_hi
82 add b32 $r12 $r3
83 adc b32 $r11 $r4
84
85 // ( A_hi * B_hi ) << 32
86 mulu $r3 $r1 $r2 // tmp0 = A_hi * B_hi
87 add b32 $r11 $r3
88
89 pop $r4
90 pop $r3
91 pop $r2
92 pop $r1
93 ret
94#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
index 8f29badd785f..5cf5be63cbef 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
@@ -98,12 +98,16 @@ wr32:
98// $r14 - ns 98// $r14 - ns
99// $r0 - zero 99// $r0 - zero
100nsec: 100nsec:
101 push $r9
102 push $r8
101 nv_iord($r8, NV_PPWR_TIMER_LOW) 103 nv_iord($r8, NV_PPWR_TIMER_LOW)
102 nsec_loop: 104 nsec_loop:
103 nv_iord($r9, NV_PPWR_TIMER_LOW) 105 nv_iord($r9, NV_PPWR_TIMER_LOW)
104 sub b32 $r9 $r8 106 sub b32 $r9 $r8
105 cmp b32 $r9 $r14 107 cmp b32 $r9 $r14
106 bra l #nsec_loop 108 bra l #nsec_loop
109 pop $r8
110 pop $r9
107 ret 111 ret
108 112
109// busy-wait for a period of time 113// busy-wait for a period of time
@@ -115,6 +119,8 @@ nsec:
115// $r11 - timeout (ns) 119// $r11 - timeout (ns)
116// $r0 - zero 120// $r0 - zero
117wait: 121wait:
122 push $r9
123 push $r8
118 nv_iord($r8, NV_PPWR_TIMER_LOW) 124 nv_iord($r8, NV_PPWR_TIMER_LOW)
119 wait_loop: 125 wait_loop:
120 nv_rd32($r10, $r14) 126 nv_rd32($r10, $r14)
@@ -126,6 +132,8 @@ wait:
126 cmp b32 $r9 $r11 132 cmp b32 $r9 $r11
127 bra l #wait_loop 133 bra l #wait_loop
128 wait_done: 134 wait_done:
135 pop $r8
136 pop $r9
129 ret 137 ret
130 138
131// $r15 - current (kern) 139// $r15 - current (kern)
@@ -242,12 +250,89 @@ intr:
242 bclr $flags $p0 250 bclr $flags $p0
243 iret 251 iret
244 252
245// request the current process be sent a message after a timeout expires 253// calculate the number of ticks in the specified nanoseconds delay
254//
255// $r15 - current
256// $r14 - ns
257// $r14 - ticks (return)
258// $r0 - zero
259ticks_from_ns:
260 push $r12
261 push $r11
262
263 /* try not losing precision (multiply then divide) */
264 imm32($r13, HW_TICKS_PER_US)
265 call #mulu32_32_64
266
267 /* use an immeditate, it's ok because HW_TICKS_PER_US < 16 bits */
268 div $r12 $r12 1000
269
270 /* check if there wasn't any overflow */
271 cmpu b32 $r11 0
272 bra e #ticks_from_ns_quit
273
274 /* let's divide then multiply, too bad for the precision! */
275 div $r14 $r14 1000
276 imm32($r13, HW_TICKS_PER_US)
277 call #mulu32_32_64
278
279 /* this cannot overflow as long as HW_TICKS_PER_US < 1000 */
280
281ticks_from_ns_quit:
282 mov b32 $r14 $r12
283 pop $r11
284 pop $r12
285 ret
286
287// calculate the number of ticks in the specified microsecond delay
288//
289// $r15 - current
290// $r14 - us
291// $r14 - ticks (return)
292// $r0 - zero
293ticks_from_us:
294 push $r12
295 push $r11
296
297 /* simply multiply $us by HW_TICKS_PER_US */
298 imm32($r13, HW_TICKS_PER_US)
299 call #mulu32_32_64
300 mov b32 $r14 $r12
301
302 /* check if there wasn't any overflow */
303 cmpu b32 $r11 0
304 bra e #ticks_from_us_quit
305
306 /* Overflow! */
307 clear b32 $r14
308
309ticks_from_us_quit:
310 pop $r11
311 pop $r12
312 ret
313
314// calculate the number of ticks in the specified microsecond delay
246// 315//
247// $r15 - current 316// $r15 - current
248// $r14 - ticks 317// $r14 - ticks
318// $r14 - us (return)
319// $r0 - zero
320ticks_to_us:
321 /* simply divide $ticks by HW_TICKS_PER_US */
322 imm32($r13, HW_TICKS_PER_US)
323 div $r14 $r14 $r13
324
325 ret
326
327// request the current process be sent a message after a timeout expires
328//
329// $r15 - current
330// $r14 - ticks (make sure it is < 2^31 to avoid any possible overflow)
249// $r0 - zero 331// $r0 - zero
250timer: 332timer:
333 push $r9
334 push $r8
335
251 // interrupts off to prevent racing with timer isr 336 // interrupts off to prevent racing with timer isr
252 bclr $flags ie0 337 bclr $flags ie0
253 338
@@ -255,13 +340,22 @@ timer:
255 ld b32 $r8 D[$r15 + #proc_time] 340 ld b32 $r8 D[$r15 + #proc_time]
256 cmp b32 $r8 0 341 cmp b32 $r8 0
257 bra g #timer_done 342 bra g #timer_done
258 st b32 D[$r15 + #proc_time] $r14
259 343
260 // halt watchdog timer temporarily and check for a pending 344 // halt watchdog timer temporarily
261 // interrupt. if there's one already pending, we can just 345 clear b32 $r8
262 // bail since the timer isr will queue the next soonest
263 // right after it's done
264 nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r8) 346 nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r8)
347
348 // find out how much time elapsed since the last update
349 // of the watchdog and add this time to the wanted ticks
350 nv_iord($r8, NV_PPWR_WATCHDOG_TIME)
351 ld b32 $r9 D[$r0 + #time_prev]
352 sub b32 $r9 $r8
353 add b32 $r14 $r9
354 st b32 D[$r15 + #proc_time] $r14
355
356 // check for a pending interrupt. if there's one already
357 // pending, we can just bail since the timer isr will
358 // queue the next soonest right after it's done
265 nv_iord($r8, NV_PPWR_INTR) 359 nv_iord($r8, NV_PPWR_INTR)
266 and $r8 NV_PPWR_INTR_WATCHDOG 360 and $r8 NV_PPWR_INTR_WATCHDOG
267 bra nz #timer_enable 361 bra nz #timer_enable
@@ -272,10 +366,10 @@ timer:
272 cmp b32 $r14 $r0 366 cmp b32 $r14 $r0
273 bra e #timer_reset 367 bra e #timer_reset
274 cmp b32 $r14 $r8 368 cmp b32 $r14 $r8
275 bra l #timer_done 369 bra g #timer_enable
276 timer_reset: 370 timer_reset:
277 nv_iowr(NV_PPWR_WATCHDOG_TIME, $r14) 371 nv_iowr(NV_PPWR_WATCHDOG_TIME, $r14)
278 st b32 D[$r0 + #time_prev] $r14 372 st b32 D[$r0 + #time_prev] $r14
279 373
280 // re-enable the watchdog timer 374 // re-enable the watchdog timer
281 timer_enable: 375 timer_enable:
@@ -285,6 +379,9 @@ timer:
285 // interrupts back on 379 // interrupts back on
286 timer_done: 380 timer_done:
287 bset $flags ie0 381 bset $flags ie0
382
383 pop $r8
384 pop $r9
288 ret 385 ret
289 386
290// send message to another process 387// send message to another process
@@ -371,6 +468,9 @@ send:
371// $r14 - process 468// $r14 - process
372// $r0 - zero 469// $r0 - zero
373recv: 470recv:
471 push $r9
472 push $r8
473
374 ld b32 $r8 D[$r14 + #proc_qget] 474 ld b32 $r8 D[$r14 + #proc_qget]
375 ld b32 $r9 D[$r14 + #proc_qput] 475 ld b32 $r9 D[$r14 + #proc_qput]
376 bclr $flags $p1 476 bclr $flags $p1
@@ -403,6 +503,8 @@ recv:
403 bset $flags $p1 503 bset $flags $p1
404 pop $r15 504 pop $r15
405 recv_done: 505 recv_done:
506 pop $r8
507 pop $r9
406 ret 508 ret
407 509
408init: 510init:
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
index 5668e045bac1..96fc984dafdc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
@@ -250,3 +250,23 @@
250*/ st b32 D[$r0] reg /* 250*/ st b32 D[$r0] reg /*
251*/ clear b32 $r0 251*/ clear b32 $r0
252#endif 252#endif
253
254#define st(size, addr, reg) /*
255*/ movw $r0 addr /*
256*/ st size D[$r0] reg /*
257*/ clear b32 $r0
258
259#define ld(size, reg, addr) /*
260*/ movw $r0 addr /*
261*/ ld size reg D[$r0] /*
262*/ clear b32 $r0
263
264// does a 64+64 -> 64 unsigned addition (C = A + B)
265#define addu64(reg_a_c_hi, reg_a_c_lo, b_hi, b_lo) /*
266*/ add b32 reg_a_c_lo b_lo /*
267*/ adc b32 reg_a_c_hi b_hi
268
269// does a 64+64 -> 64 substraction (C = A - B)
270#define subu64(reg_a_c_hi, reg_a_c_lo, b_hi, b_lo) /*
271*/ sub b32 reg_a_c_lo b_lo /*
272*/ sbb b32 reg_a_c_hi b_hi
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
index d43741eccb11..e89789a53b80 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
@@ -43,17 +43,23 @@ process(PROC_MEMX, #memx_init, #memx_recv)
43*/ .b32 func 43*/ .b32 func
44 44
45memx_func_head: 45memx_func_head:
46handler(ENTER , 0x0001, 0x0000, #memx_func_enter) 46handler(ENTER , 0x0000, 0x0000, #memx_func_enter)
47memx_func_next: 47memx_func_next:
48handler(LEAVE , 0x0000, 0x0000, #memx_func_leave) 48handler(LEAVE , 0x0000, 0x0000, #memx_func_leave)
49handler(WR32 , 0x0000, 0x0002, #memx_func_wr32) 49handler(WR32 , 0x0000, 0x0002, #memx_func_wr32)
50handler(WAIT , 0x0004, 0x0000, #memx_func_wait) 50handler(WAIT , 0x0004, 0x0000, #memx_func_wait)
51handler(DELAY , 0x0001, 0x0000, #memx_func_delay) 51handler(DELAY , 0x0001, 0x0000, #memx_func_delay)
52handler(VBLANK, 0x0001, 0x0000, #memx_func_wait_vblank)
52memx_func_tail: 53memx_func_tail:
53 54
54.equ #memx_func_size #memx_func_next - #memx_func_head 55.equ #memx_func_size #memx_func_next - #memx_func_head
55.equ #memx_func_num (#memx_func_tail - #memx_func_head) / #memx_func_size 56.equ #memx_func_num (#memx_func_tail - #memx_func_head) / #memx_func_size
56 57
58memx_ts_start:
59.b32 0
60memx_ts_end:
61.b32 0
62
57memx_data_head: 63memx_data_head:
58.skip 0x0800 64.skip 0x0800
59memx_data_tail: 65memx_data_tail:
@@ -67,19 +73,44 @@ memx_data_tail:
67// 73//
68// $r15 - current (memx) 74// $r15 - current (memx)
69// $r4 - packet length 75// $r4 - packet length
70// +00: bitmask of heads to wait for vblank on
71// $r3 - opcode desciption 76// $r3 - opcode desciption
72// $r0 - zero 77// $r0 - zero
73memx_func_enter: 78memx_func_enter:
79#if NVKM_PPWR_CHIPSET == GT215
80 movw $r8 0x1610
81 nv_rd32($r7, $r8)
82 imm32($r6, 0xfffffffc)
83 and $r7 $r6
84 movw $r6 0x2
85 or $r7 $r6
86 nv_wr32($r8, $r7)
87#else
88 movw $r6 0x001620
89 imm32($r7, ~0x00000aa2);
90 nv_rd32($r8, $r6)
91 and $r8 $r7
92 nv_wr32($r6, $r8)
93
94 imm32($r7, ~0x00000001)
95 nv_rd32($r8, $r6)
96 and $r8 $r7
97 nv_wr32($r6, $r8)
98
99 movw $r6 0x0026f0
100 nv_rd32($r8, $r6)
101 and $r8 $r7
102 nv_wr32($r6, $r8)
103#endif
104
74 mov $r6 NV_PPWR_OUTPUT_SET_FB_PAUSE 105 mov $r6 NV_PPWR_OUTPUT_SET_FB_PAUSE
75 nv_iowr(NV_PPWR_OUTPUT_SET, $r6) 106 nv_iowr(NV_PPWR_OUTPUT_SET, $r6)
76 memx_func_enter_wait: 107 memx_func_enter_wait:
77 nv_iord($r6, NV_PPWR_OUTPUT) 108 nv_iord($r6, NV_PPWR_OUTPUT)
78 and $r6 NV_PPWR_OUTPUT_FB_PAUSE 109 and $r6 NV_PPWR_OUTPUT_FB_PAUSE
79 bra z #memx_func_enter_wait 110 bra z #memx_func_enter_wait
80 //XXX: TODO 111
81 ld b32 $r6 D[$r1 + 0x00] 112 nv_iord($r6, NV_PPWR_TIMER_LOW)
82 add b32 $r1 0x04 113 st b32 D[$r0 + #memx_ts_start] $r6
83 ret 114 ret
84 115
85// description 116// description
@@ -89,14 +120,93 @@ memx_func_enter:
89// $r3 - opcode desciption 120// $r3 - opcode desciption
90// $r0 - zero 121// $r0 - zero
91memx_func_leave: 122memx_func_leave:
123 nv_iord($r6, NV_PPWR_TIMER_LOW)
124 st b32 D[$r0 + #memx_ts_end] $r6
125
92 mov $r6 NV_PPWR_OUTPUT_CLR_FB_PAUSE 126 mov $r6 NV_PPWR_OUTPUT_CLR_FB_PAUSE
93 nv_iowr(NV_PPWR_OUTPUT_CLR, $r6) 127 nv_iowr(NV_PPWR_OUTPUT_CLR, $r6)
94 memx_func_leave_wait: 128 memx_func_leave_wait:
95 nv_iord($r6, NV_PPWR_OUTPUT) 129 nv_iord($r6, NV_PPWR_OUTPUT)
96 and $r6 NV_PPWR_OUTPUT_FB_PAUSE 130 and $r6 NV_PPWR_OUTPUT_FB_PAUSE
97 bra nz #memx_func_leave_wait 131 bra nz #memx_func_leave_wait
132
133#if NVKM_PPWR_CHIPSET == GT215
134 movw $r8 0x1610
135 nv_rd32($r7, $r8)
136 imm32($r6, 0xffffffcc)
137 and $r7 $r6
138 nv_wr32($r8, $r7)
139#else
140 movw $r6 0x0026f0
141 imm32($r7, 0x00000001)
142 nv_rd32($r8, $r6)
143 or $r8 $r7
144 nv_wr32($r6, $r8)
145
146 movw $r6 0x001620
147 nv_rd32($r8, $r6)
148 or $r8 $r7
149 nv_wr32($r6, $r8)
150
151 imm32($r7, 0x00000aa2);
152 nv_rd32($r8, $r6)
153 or $r8 $r7
154 nv_wr32($r6, $r8)
155#endif
156 ret
157
158#if NVKM_PPWR_CHIPSET < GF119
159// description
160//
161// $r15 - current (memx)
162// $r4 - packet length
163// +00: head to wait for vblank on
164// $r3 - opcode desciption
165// $r0 - zero
166memx_func_wait_vblank:
167 ld b32 $r6 D[$r1 + 0x00]
168 cmp b32 $r6 0x0
169 bra z #memx_func_wait_vblank_head0
170 cmp b32 $r6 0x1
171 bra z #memx_func_wait_vblank_head1
172 bra #memx_func_wait_vblank_fini
173
174 memx_func_wait_vblank_head1:
175 movw $r7 0x20
176 bra #memx_func_wait_vblank_0
177
178 memx_func_wait_vblank_head0:
179 movw $r7 0x8
180
181 memx_func_wait_vblank_0:
182 nv_iord($r6, NV_PPWR_INPUT)
183 and $r6 $r7
184 bra nz #memx_func_wait_vblank_0
185
186 memx_func_wait_vblank_1:
187 nv_iord($r6, NV_PPWR_INPUT)
188 and $r6 $r7
189 bra z #memx_func_wait_vblank_1
190
191 memx_func_wait_vblank_fini:
192 add b32 $r1 0x4
193 ret
194
195#else
196
197// XXX: currently no-op
198//
199// $r15 - current (memx)
200// $r4 - packet length
201// +00: head to wait for vblank on
202// $r3 - opcode desciption
203// $r0 - zero
204memx_func_wait_vblank:
205 add b32 $r1 0x4
98 ret 206 ret
99 207
208#endif
209
100// description 210// description
101// 211//
102// $r15 - current (memx) 212// $r15 - current (memx)
@@ -160,14 +270,17 @@ memx_exec:
160 push $r13 270 push $r13
161 mov b32 $r1 $r12 271 mov b32 $r1 $r12
162 mov b32 $r2 $r11 272 mov b32 $r2 $r11
273
163 memx_exec_next: 274 memx_exec_next:
164 // fetch the packet header, and locate opcode info 275 // fetch the packet header
165 ld b32 $r3 D[$r1] 276 ld b32 $r3 D[$r1]
166 add b32 $r1 4 277 add b32 $r1 4
167 shr b32 $r4 $r3 16 278 extr $r4 $r3 16:31
168 mulu $r3 #memx_func_size 279 extr $r3 $r3 0:15
169 280
170 // execute the opcode handler 281 // execute the opcode handler
282 sub b32 $r3 1
283 mulu $r3 #memx_func_size
171 ld b32 $r5 D[$r3 + #memx_func_head + #memx_func] 284 ld b32 $r5 D[$r3 + #memx_func_head + #memx_func]
172 call $r5 285 call $r5
173 286
@@ -176,6 +289,10 @@ memx_exec:
176 bra l #memx_exec_next 289 bra l #memx_exec_next
177 290
178 // send completion reply 291 // send completion reply
292 ld b32 $r11 D[$r0 + #memx_ts_start]
293 ld b32 $r12 D[$r0 + #memx_ts_end]
294 sub b32 $r12 $r11
295 nv_iord($r11, NV_PPWR_INPUT)
179 pop $r13 296 pop $r13
180 pop $r14 297 pop $r14
181 call(send) 298 call(send)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
index 17a8a383d91a..b439519ec866 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#define NVKM_PPWR_CHIPSET GK208 25#define NVKM_PPWR_CHIPSET GK208
26#define HW_TICKS_PER_US 324
26 27
27#define NVKM_FALCON_PC24 28#define NVKM_FALCON_PC24
28#define NVKM_FALCON_UNSHIFTED_IO 29#define NVKM_FALCON_UNSHIFTED_IO
@@ -34,6 +35,7 @@
34.section #nv108_pwr_data 35.section #nv108_pwr_data
35#define INCLUDE_PROC 36#define INCLUDE_PROC
36#include "kernel.fuc" 37#include "kernel.fuc"
38#include "arith.fuc"
37#include "host.fuc" 39#include "host.fuc"
38#include "memx.fuc" 40#include "memx.fuc"
39#include "perf.fuc" 41#include "perf.fuc"
@@ -44,6 +46,7 @@
44 46
45#define INCLUDE_DATA 47#define INCLUDE_DATA
46#include "kernel.fuc" 48#include "kernel.fuc"
49#include "arith.fuc"
47#include "host.fuc" 50#include "host.fuc"
48#include "memx.fuc" 51#include "memx.fuc"
49#include "perf.fuc" 52#include "perf.fuc"
@@ -56,6 +59,7 @@
56.section #nv108_pwr_code 59.section #nv108_pwr_code
57#define INCLUDE_CODE 60#define INCLUDE_CODE
58#include "kernel.fuc" 61#include "kernel.fuc"
62#include "arith.fuc"
59#include "host.fuc" 63#include "host.fuc"
60#include "memx.fuc" 64#include "memx.fuc"
61#include "perf.fuc" 65#include "perf.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
index 986495d533dd..4d278a96b2bb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -24,8 +24,8 @@ uint32_t nv108_pwr_data[] = {
24 0x00000000, 24 0x00000000,
25/* 0x0058: proc_list_head */ 25/* 0x0058: proc_list_head */
26 0x54534f48, 26 0x54534f48,
27 0x00000379, 27 0x00000453,
28 0x0000032a, 28 0x00000404,
29 0x00000000, 29 0x00000000,
30 0x00000000, 30 0x00000000,
31 0x00000000, 31 0x00000000,
@@ -46,8 +46,8 @@ uint32_t nv108_pwr_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x00000464, 49 0x0000061c,
50 0x00000456, 50 0x0000060e,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t nv108_pwr_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x00000468, 71 0x00000620,
72 0x00000466, 72 0x0000061e,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t nv108_pwr_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x0000086c, 93 0x00000a24,
94 0x00000713, 94 0x000008cb,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t nv108_pwr_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x0000088d, 115 0x00000a45,
116 0x0000086e, 116 0x00000a26,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t nv108_pwr_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x00000898, 137 0x00000a50,
138 0x00000896, 138 0x00000a4e,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -227,25 +227,31 @@ uint32_t nv108_pwr_data[] = {
227 0x00000000, 227 0x00000000,
228 0x00000000, 228 0x00000000,
229/* 0x0370: memx_func_head */ 229/* 0x0370: memx_func_head */
230 0x00010000,
231 0x00000000,
232 0x000003a9,
233/* 0x037c: memx_func_next */
234 0x00000001, 230 0x00000001,
235 0x00000000, 231 0x00000000,
236 0x000003c7, 232 0x00000483,
233/* 0x037c: memx_func_next */
237 0x00000002, 234 0x00000002,
235 0x00000000,
236 0x00000500,
237 0x00000003,
238 0x00000002, 238 0x00000002,
239 0x000003df, 239 0x00000580,
240 0x00040003, 240 0x00040004,
241 0x00000000,
242 0x0000059d,
243 0x00010005,
244 0x00000000,
245 0x000005b7,
246 0x00010006,
241 0x00000000, 247 0x00000000,
242 0x000003fc, 248 0x0000057b,
243 0x00010004, 249/* 0x03b8: memx_func_tail */
250/* 0x03b8: memx_ts_start */
244 0x00000000, 251 0x00000000,
245 0x00000416, 252/* 0x03bc: memx_ts_end */
246/* 0x03ac: memx_func_tail */
247/* 0x03ac: memx_data_head */
248 0x00000000, 253 0x00000000,
254/* 0x03c0: memx_data_head */
249 0x00000000, 255 0x00000000,
250 0x00000000, 256 0x00000000,
251 0x00000000, 257 0x00000000,
@@ -757,8 +763,9 @@ uint32_t nv108_pwr_data[] = {
757 0x00000000, 763 0x00000000,
758 0x00000000, 764 0x00000000,
759 0x00000000, 765 0x00000000,
760/* 0x0bac: memx_data_tail */ 766 0x00000000,
761/* 0x0bac: i2c_scl_map */ 767/* 0x0bc0: memx_data_tail */
768/* 0x0bc0: i2c_scl_map */
762 0x00000400, 769 0x00000400,
763 0x00000800, 770 0x00000800,
764 0x00001000, 771 0x00001000,
@@ -769,7 +776,7 @@ uint32_t nv108_pwr_data[] = {
769 0x00020000, 776 0x00020000,
770 0x00040000, 777 0x00040000,
771 0x00080000, 778 0x00080000,
772/* 0x0bd4: i2c_sda_map */ 779/* 0x0be8: i2c_sda_map */
773 0x00100000, 780 0x00100000,
774 0x00200000, 781 0x00200000,
775 0x00400000, 782 0x00400000,
@@ -781,10 +788,69 @@ uint32_t nv108_pwr_data[] = {
781 0x10000000, 788 0x10000000,
782 0x20000000, 789 0x20000000,
783 0x00000000, 790 0x00000000,
791 0x00000000,
792 0x00000000,
793 0x00000000,
794 0x00000000,
795 0x00000000,
796 0x00000000,
797 0x00000000,
798 0x00000000,
799 0x00000000,
800 0x00000000,
801 0x00000000,
802 0x00000000,
803 0x00000000,
804 0x00000000,
805 0x00000000,
806 0x00000000,
807 0x00000000,
808 0x00000000,
809 0x00000000,
810 0x00000000,
811 0x00000000,
812 0x00000000,
813 0x00000000,
814 0x00000000,
815 0x00000000,
816 0x00000000,
817 0x00000000,
818 0x00000000,
819 0x00000000,
820 0x00000000,
821 0x00000000,
822 0x00000000,
823 0x00000000,
824 0x00000000,
825 0x00000000,
826 0x00000000,
827 0x00000000,
828 0x00000000,
829 0x00000000,
830 0x00000000,
831 0x00000000,
832 0x00000000,
833 0x00000000,
834 0x00000000,
835 0x00000000,
836 0x00000000,
837 0x00000000,
838 0x00000000,
839 0x00000000,
840 0x00000000,
841 0x00000000,
842 0x00000000,
843 0x00000000,
844 0x00000000,
845 0x00000000,
846 0x00000000,
847 0x00000000,
848 0x00000000,
849 0x00000000,
784}; 850};
785 851
786uint32_t nv108_pwr_code[] = { 852uint32_t nv108_pwr_code[] = {
787 0x02910ef5, 853 0x031c0ef5,
788/* 0x0004: rd32 */ 854/* 0x0004: rd32 */
789 0xf607a040, 855 0xf607a040,
790 0x04bd000e, 856 0x04bd000e,
@@ -812,15 +878,18 @@ uint32_t nv108_pwr_code[] = {
812 0x7000d4f1, 878 0x7000d4f1,
813 0xf8f61bf4, 879 0xf8f61bf4,
814/* 0x005d: nsec */ 880/* 0x005d: nsec */
815 0xcf2c0800, 881 0xf990f900,
816/* 0x0062: nsec_loop */ 882 0xcf2c0880,
883/* 0x0066: nsec_loop */
817 0x2c090088, 884 0x2c090088,
818 0xbb0099cf, 885 0xbb0099cf,
819 0x9ea60298, 886 0x9ea60298,
820 0xf8f61ef4, 887 0xfcf61ef4,
821/* 0x0071: wait */ 888 0xf890fc80,
822 0xcf2c0800, 889/* 0x0079: wait */
823/* 0x0076: wait_loop */ 890 0xf990f900,
891 0xcf2c0880,
892/* 0x0082: wait_loop */
824 0xeeb20088, 893 0xeeb20088,
825 0x0000047e, 894 0x0000047e,
826 0xadfddab2, 895 0xadfddab2,
@@ -828,28 +897,29 @@ uint32_t nv108_pwr_code[] = {
828 0x2c09100b, 897 0x2c09100b,
829 0xbb0099cf, 898 0xbb0099cf,
830 0x9ba60298, 899 0x9ba60298,
831/* 0x0093: wait_done */ 900/* 0x009f: wait_done */
832 0xf8e61ef4, 901 0xfce61ef4,
833/* 0x0095: intr_watchdog */ 902 0xf890fc80,
903/* 0x00a5: intr_watchdog */
834 0x03e99800, 904 0x03e99800,
835 0xf40096b0, 905 0xf40096b0,
836 0x0a98280b, 906 0x0a98280b,
837 0x029abb9a, 907 0x029abb9a,
838 0x0d0e1cf4, 908 0x0d0e1cf4,
839 0x01de7e01, 909 0x02617e01,
840 0xf494bd00, 910 0xf494bd00,
841/* 0x00b2: intr_watchdog_next_time */ 911/* 0x00c2: intr_watchdog_next_time */
842 0x0a98140e, 912 0x0a98140e,
843 0x00a6b09b, 913 0x00a6b09b,
844 0xa6080bf4, 914 0xa6080bf4,
845 0x061cf49a, 915 0x061cf49a,
846/* 0x00c0: intr_watchdog_next_time_set */ 916/* 0x00d0: intr_watchdog_next_time_set */
847/* 0x00c3: intr_watchdog_next_proc */ 917/* 0x00d3: intr_watchdog_next_proc */
848 0xb59b09b5, 918 0xb59b09b5,
849 0xe0b603e9, 919 0xe0b603e9,
850 0x68e6b158, 920 0x68e6b158,
851 0xc81bf402, 921 0xc81bf402,
852/* 0x00d2: intr */ 922/* 0x00e2: intr */
853 0x00f900f8, 923 0x00f900f8,
854 0x80f904bd, 924 0x80f904bd,
855 0xa0f990f9, 925 0xa0f990f9,
@@ -865,13 +935,13 @@ uint32_t nv108_pwr_code[] = {
865 0xc40088cf, 935 0xc40088cf,
866 0x0bf40289, 936 0x0bf40289,
867 0x9b00b51f, 937 0x9b00b51f,
868 0x957e580e, 938 0xa57e580e,
869 0x09980000, 939 0x09980000,
870 0x0096b09b, 940 0x0096b09b,
871 0x000d0bf4, 941 0x000d0bf4,
872 0x0009f634, 942 0x0009f634,
873 0x09b504bd, 943 0x09b504bd,
874/* 0x0125: intr_skip_watchdog */ 944/* 0x0135: intr_skip_watchdog */
875 0x0089e49a, 945 0x0089e49a,
876 0x360bf408, 946 0x360bf408,
877 0xcf068849, 947 0xcf068849,
@@ -881,20 +951,20 @@ uint32_t nv108_pwr_code[] = {
881 0xc0f900cc, 951 0xc0f900cc,
882 0xf14f484e, 952 0xf14f484e,
883 0x0d5453e3, 953 0x0d5453e3,
884 0x023f7e00, 954 0x02c27e00,
885 0x40c0fc00, 955 0x40c0fc00,
886 0x0cf604c0, 956 0x0cf604c0,
887/* 0x0157: intr_subintr_skip_fifo */ 957/* 0x0167: intr_subintr_skip_fifo */
888 0x4004bd00, 958 0x4004bd00,
889 0x09f60688, 959 0x09f60688,
890/* 0x015f: intr_skip_subintr */ 960/* 0x016f: intr_skip_subintr */
891 0xc404bd00, 961 0xc404bd00,
892 0x0bf42089, 962 0x0bf42089,
893 0xbfa4f107, 963 0xbfa4f107,
894/* 0x0169: intr_skip_pause */ 964/* 0x0179: intr_skip_pause */
895 0x4089c4ff, 965 0x4089c4ff,
896 0xf1070bf4, 966 0xf1070bf4,
897/* 0x0173: intr_skip_user0 */ 967/* 0x0183: intr_skip_user0 */
898 0x00ffbfa4, 968 0x00ffbfa4,
899 0x0008f604, 969 0x0008f604,
900 0x80fc04bd, 970 0x80fc04bd,
@@ -904,304 +974,417 @@ uint32_t nv108_pwr_code[] = {
904 0xfca0fcb0, 974 0xfca0fcb0,
905 0xfc80fc90, 975 0xfc80fc90,
906 0x0032f400, 976 0x0032f400,
907/* 0x0196: timer */ 977/* 0x01a6: ticks_from_ns */
908 0x32f401f8, 978 0xc0f901f8,
909 0x03f89810, 979 0xd7f1b0f9,
910 0xf40086b0, 980 0xd3f00144,
911 0xfeb53a1c, 981 0x7721f500,
912 0xf6380003, 982 0xe8ccec03,
983 0x00b4b003,
984 0xec120bf4,
985 0xf103e8ee,
986 0xf00144d7,
987 0x21f500d3,
988/* 0x01ce: ticks_from_ns_quit */
989 0xceb20377,
990 0xc0fcb0fc,
991/* 0x01d6: ticks_from_us */
992 0xc0f900f8,
993 0xd7f1b0f9,
994 0xd3f00144,
995 0x7721f500,
996 0xb0ceb203,
997 0x0bf400b4,
998/* 0x01ef: ticks_from_us_quit */
999 0xfce4bd05,
1000 0xf8c0fcb0,
1001/* 0x01f5: ticks_to_us */
1002 0x44d7f100,
1003 0x00d3f001,
1004 0xf8ecedff,
1005/* 0x0201: timer */
1006 0xf990f900,
1007 0x1032f480,
1008 0xb003f898,
1009 0x1cf40086,
1010 0x0084bd4a,
1011 0x0008f638,
1012 0x340804bd,
1013 0x980088cf,
1014 0x98bb9a09,
1015 0x00e9bb02,
1016 0x0803feb5,
1017 0x0088cf08,
1018 0xf40284f0,
1019 0x34081c1b,
1020 0xa60088cf,
1021 0x080bf4e0,
1022 0x1cf4e8a6,
1023/* 0x0245: timer_reset */
1024 0xf634000d,
1025 0x04bd000e,
1026/* 0x024f: timer_enable */
1027 0x089a0eb5,
1028 0xf6380001,
913 0x04bd0008, 1029 0x04bd0008,
914 0x88cf0808, 1030/* 0x0258: timer_done */
915 0x0284f000, 1031 0xfc1031f4,
916 0x081c1bf4, 1032 0xf890fc80,
917 0x0088cf34, 1033/* 0x0261: send_proc */
918 0x0bf4e0a6, 1034 0xf980f900,
919 0xf4e8a608, 1035 0x05e89890,
920/* 0x01c6: timer_reset */ 1036 0xf004e998,
921 0x3400161e, 1037 0x89a60486,
922 0xbd000ef6, 1038 0xc42a0bf4,
923 0x9a0eb504, 1039 0x88940398,
924/* 0x01d0: timer_enable */ 1040 0x1880b604,
925 0x38000108, 1041 0x98008ebb,
926 0xbd0008f6, 1042 0x8ab500fa,
927/* 0x01d9: timer_done */ 1043 0x018db500,
928 0x1031f404, 1044 0xb5028cb5,
929/* 0x01de: send_proc */ 1045 0x90b6038b,
930 0x80f900f8, 1046 0x0794f001,
931 0xe89890f9, 1047 0xf404e9b5,
932 0x04e99805, 1048/* 0x029a: send_done */
933 0xa60486f0, 1049 0x90fc0231,
934 0x2a0bf489, 1050 0x00f880fc,
935 0x940398c4, 1051/* 0x02a0: find */
936 0x80b60488, 1052 0x580880f9,
937 0x008ebb18, 1053/* 0x02a7: find_loop */
938 0xb500fa98, 1054 0x980131f4,
939 0x8db5008a, 1055 0xaea6008a,
940 0x028cb501, 1056 0xb6100bf4,
941 0xb6038bb5, 1057 0x86b15880,
942 0x94f00190, 1058 0x1bf40268,
943 0x04e9b507, 1059 0x0132f4f1,
944/* 0x0217: send_done */ 1060/* 0x02bc: find_done */
945 0xfc0231f4, 1061 0x80fc8eb2,
946 0xf880fc90, 1062/* 0x02c2: send */
947/* 0x021d: find */ 1063 0xa07e00f8,
948 0x0880f900, 1064 0x01f40002,
949 0x0131f458, 1065/* 0x02cb: recv */
950/* 0x0224: find_loop */ 1066 0xf900f89b,
951 0xa6008a98, 1067 0x9880f990,
952 0x100bf4ae, 1068 0xe99805e8,
953 0xb15880b6, 1069 0x0132f404,
954 0xf4026886, 1070 0x0bf489a6,
955 0x32f4f11b, 1071 0x0389c43c,
956/* 0x0239: find_done */ 1072 0xf00180b6,
957 0xfc8eb201, 1073 0xe8b50784,
958/* 0x023f: send */ 1074 0x02ea9805,
959 0x7e00f880, 1075 0x8ffef0f9,
960 0xf400021d, 1076 0xb2f0f901,
961 0x00f89b01, 1077 0x049994ef,
962/* 0x0248: recv */ 1078 0xb600e9bb,
963 0x9805e898, 1079 0xeb9818e0,
964 0x32f404e9, 1080 0x02ec9803,
965 0xf489a601, 1081 0x9801ed98,
966 0x89c43c0b, 1082 0xa5f900ee,
967 0x0180b603, 1083 0xf8fef0fc,
968 0xb50784f0, 1084 0x0131f400,
969 0xea9805e8, 1085/* 0x0316: recv_done */
970 0xfef0f902, 1086 0x80fcf0fc,
971 0xf0f9018f, 1087 0x00f890fc,
972 0x9994efb2, 1088/* 0x031c: init */
973 0x00e9bb04, 1089 0xcf010841,
974 0x9818e0b6, 1090 0x11e70011,
975 0xec9803eb, 1091 0x14b60109,
976 0x01ed9802, 1092 0x0014fe08,
977 0xf900ee98, 1093 0xf000e041,
978 0xfef0fca5, 1094 0x1c000013,
979 0x31f400f8,
980/* 0x028f: recv_done */
981 0xf8f0fc01,
982/* 0x0291: init */
983 0x01084100,
984 0xe70011cf,
985 0xb6010911,
986 0x14fe0814,
987 0x00e04100,
988 0x000013f0,
989 0x0001f61c,
990 0xff0104bd,
991 0x01f61400,
992 0x0104bd00,
993 0x0015f102,
994 0xf6100008,
995 0x04bd0001,
996 0xf000d241,
997 0x10fe0013,
998 0x1031f400,
999 0x38000101,
1000 0xbd0001f6, 1095 0xbd0001f6,
1001/* 0x02db: init_proc */ 1096 0x00ff0104,
1002 0x98580f04, 1097 0x0001f614,
1003 0x16b001f1, 1098 0x020104bd,
1004 0xfa0bf400, 1099 0x080015f1,
1005 0xf0b615f9, 1100 0x01f61000,
1006 0xf20ef458,
1007/* 0x02ec: host_send */
1008 0xcf04b041,
1009 0xa0420011,
1010 0x0022cf04,
1011 0x0bf412a6,
1012 0x071ec42e,
1013 0xb704ee94,
1014 0x980270e0,
1015 0xec9803eb,
1016 0x01ed9802,
1017 0x7e00ee98,
1018 0xb600023f,
1019 0x1ec40110,
1020 0x04b0400f,
1021 0xbd000ef6,
1022 0xc70ef404,
1023/* 0x0328: host_send_done */
1024/* 0x032a: host_recv */
1025 0x494100f8,
1026 0x5413f14e,
1027 0xf4e1a652,
1028/* 0x0336: host_recv_wait */
1029 0xcc41b90b,
1030 0x0011cf04,
1031 0xcf04c842,
1032 0x16f00022,
1033 0xf412a608,
1034 0x23c4ef0b,
1035 0x0434b607,
1036 0x02f030b7,
1037 0xb5033bb5,
1038 0x3db5023c,
1039 0x003eb501,
1040 0xf00120b6,
1041 0xc8400f24,
1042 0x0002f604,
1043 0x400204bd,
1044 0x02f60000,
1045 0xf804bd00,
1046/* 0x0379: host_init */
1047 0x00804100,
1048 0xf11014b6,
1049 0x40027015,
1050 0x01f604d0,
1051 0x4104bd00, 1101 0x4104bd00,
1102 0x13f000e2,
1103 0x0010fe00,
1104 0x011031f4,
1105 0xf6380001,
1106 0x04bd0001,
1107/* 0x0366: init_proc */
1108 0xf198580f,
1109 0x0016b001,
1110 0xf9fa0bf4,
1111 0x58f0b615,
1112/* 0x0377: mulu32_32_64 */
1113 0xf9f20ef4,
1114 0xf920f910,
1115 0x9540f930,
1116 0xd29510e1,
1117 0xbdc4bd10,
1118 0xc0edffb4,
1119 0xb2301dff,
1120 0xff34f134,
1121 0x1034b6ff,
1122 0xbb1045b6,
1123 0xb4bb00c3,
1124 0x30e2ff01,
1125 0x34f134b2,
1126 0x34b6ffff,
1127 0x1045b610,
1128 0xbb00c3bb,
1129 0x12ff01b4,
1130 0x00b3bb30,
1131 0x30fc40fc,
1132 0x10fc20fc,
1133/* 0x03c6: host_send */
1134 0xb04100f8,
1135 0x0011cf04,
1136 0xcf04a042,
1137 0x12a60022,
1138 0xc42e0bf4,
1139 0xee94071e,
1140 0x70e0b704,
1141 0x03eb9802,
1142 0x9802ec98,
1143 0xee9801ed,
1144 0x02c27e00,
1145 0x0110b600,
1146 0x400f1ec4,
1147 0x0ef604b0,
1148 0xf404bd00,
1149/* 0x0402: host_send_done */
1150 0x00f8c70e,
1151/* 0x0404: host_recv */
1152 0xf14e4941,
1153 0xa6525413,
1154 0xb90bf4e1,
1155/* 0x0410: host_recv_wait */
1156 0xcf04cc41,
1157 0xc8420011,
1158 0x0022cf04,
1159 0xa60816f0,
1160 0xef0bf412,
1161 0xb60723c4,
1162 0x30b70434,
1163 0x3bb502f0,
1164 0x023cb503,
1165 0xb5013db5,
1166 0x20b6003e,
1167 0x0f24f001,
1168 0xf604c840,
1169 0x04bd0002,
1170 0x00004002,
1171 0xbd0002f6,
1172/* 0x0453: host_init */
1173 0x4100f804,
1052 0x14b60080, 1174 0x14b60080,
1053 0xf015f110, 1175 0x7015f110,
1054 0x04dc4002, 1176 0x04d04002,
1177 0xbd0001f6,
1178 0x00804104,
1179 0xf11014b6,
1180 0x4002f015,
1181 0x01f604dc,
1182 0x0104bd00,
1183 0x04c44001,
1055 0xbd0001f6, 1184 0xbd0001f6,
1056 0x40010104, 1185/* 0x0483: memx_func_enter */
1057 0x01f604c4, 1186 0xf100f804,
1058 0xf804bd00, 1187 0xf1162067,
1059/* 0x03a9: memx_func_enter */ 1188 0xf1f55d77,
1060 0x40040600, 1189 0xb2ffff73,
1061 0x06f607e0, 1190 0x00047e6e,
1062/* 0x03b3: memx_func_enter_wait */ 1191 0xfdd8b200,
1063 0x4604bd00, 1192 0x60f90487,
1064 0x66cf07c0, 1193 0xd0fc80f9,
1065 0x0464f000, 1194 0x2e7ee0fc,
1066 0x98f70bf4, 1195 0x77f10000,
1067 0x10b60016, 1196 0x73f1fffe,
1068/* 0x03c7: memx_func_leave */ 1197 0x6eb2ffff,
1069 0x0600f804, 1198 0x0000047e,
1070 0x07e44004, 1199 0x87fdd8b2,
1071 0xbd0006f6, 1200 0xf960f904,
1072/* 0x03d1: memx_func_leave_wait */ 1201 0xfcd0fc80,
1073 0x07c04604, 1202 0x002e7ee0,
1074 0xf00066cf, 1203 0xf067f100,
1075 0x1bf40464, 1204 0x7e6eb226,
1076/* 0x03df: memx_func_wr32 */ 1205 0xb2000004,
1077 0x9800f8f7, 1206 0x0487fdd8,
1078 0x15980016, 1207 0x80f960f9,
1079 0x0810b601,
1080 0x50f960f9,
1081 0xe0fcd0fc, 1208 0xe0fcd0fc,
1082 0x00002e7e, 1209 0x00002e7e,
1083 0xf40242b6, 1210 0xe0400406,
1084 0x00f8e81b, 1211 0x0006f607,
1085/* 0x03fc: memx_func_wait */ 1212/* 0x04ea: memx_func_enter_wait */
1086 0x88cf2c08, 1213 0xc04604bd,
1087 0x001e9800, 1214 0x0066cf07,
1088 0x98011d98, 1215 0xf40464f0,
1089 0x1b98021c, 1216 0x2c06f70b,
1090 0x1010b603, 1217 0xb50066cf,
1091 0x0000717e, 1218 0x00f8ee06,
1092/* 0x0416: memx_func_delay */ 1219/* 0x0500: memx_func_leave */
1093 0x1e9800f8, 1220 0x66cf2c06,
1094 0x0410b600, 1221 0xef06b500,
1095 0x00005d7e, 1222 0xe4400406,
1096/* 0x0422: memx_exec */ 1223 0x0006f607,
1097 0xe0f900f8, 1224/* 0x0512: memx_func_leave_wait */
1098 0xc1b2d0f9, 1225 0xc04604bd,
1099/* 0x042a: memx_exec_next */ 1226 0x0066cf07,
1100 0x1398b2b2, 1227 0xf40464f0,
1101 0x0410b600, 1228 0x67f1f71b,
1102 0xf0103495, 1229 0x77f126f0,
1103 0x35980c30, 1230 0x73f00001,
1104 0xa655f9de, 1231 0x7e6eb200,
1105 0xed1ef412, 1232 0xb2000004,
1233 0x0587fdd8,
1234 0x80f960f9,
1106 0xe0fcd0fc, 1235 0xe0fcd0fc,
1107 0x00023f7e, 1236 0x00002e7e,
1108/* 0x044a: memx_info */ 1237 0x162067f1,
1109 0xac4c00f8, 1238 0x047e6eb2,
1239 0xd8b20000,
1240 0xf90587fd,
1241 0xfc80f960,
1242 0x7ee0fcd0,
1243 0xf100002e,
1244 0xf00aa277,
1245 0x6eb20073,
1246 0x0000047e,
1247 0x87fdd8b2,
1248 0xf960f905,
1249 0xfcd0fc80,
1250 0x002e7ee0,
1251/* 0x057b: memx_func_wait_vblank */
1252 0xb600f800,
1253 0x00f80410,
1254/* 0x0580: memx_func_wr32 */
1255 0x98001698,
1256 0x10b60115,
1257 0xf960f908,
1258 0xfcd0fc50,
1259 0x002e7ee0,
1260 0x0242b600,
1261 0xf8e81bf4,
1262/* 0x059d: memx_func_wait */
1263 0xcf2c0800,
1264 0x1e980088,
1265 0x011d9800,
1266 0x98021c98,
1267 0x10b6031b,
1268 0x00797e10,
1269/* 0x05b7: memx_func_delay */
1270 0x9800f800,
1271 0x10b6001e,
1272 0x005d7e04,
1273/* 0x05c3: memx_exec */
1274 0xf900f800,
1275 0xb2d0f9e0,
1276/* 0x05cb: memx_exec_next */
1277 0x98b2b2c1,
1278 0x10b60013,
1279 0xf034e704,
1280 0xe033e701,
1281 0x0132b601,
1282 0x980c30f0,
1283 0x55f9de35,
1284 0x1ef412a6,
1285 0xee0b98e5,
1286 0xbbef0c98,
1287 0xc44b02cb,
1288 0x00bbcf07,
1289 0xe0fcd0fc,
1290 0x0002c27e,
1291/* 0x0602: memx_info */
1292 0xc04c00f8,
1110 0x08004b03, 1293 0x08004b03,
1111 0x00023f7e, 1294 0x0002c27e,
1112/* 0x0456: memx_recv */ 1295/* 0x060e: memx_recv */
1113 0xd6b000f8, 1296 0xd6b000f8,
1114 0xc90bf401, 1297 0xb20bf401,
1115 0xf400d6b0, 1298 0xf400d6b0,
1116 0x00f8eb0b, 1299 0x00f8eb0b,
1117/* 0x0464: memx_init */ 1300/* 0x061c: memx_init */
1118/* 0x0466: perf_recv */ 1301/* 0x061e: perf_recv */
1119 0x00f800f8, 1302 0x00f800f8,
1120/* 0x0468: perf_init */ 1303/* 0x0620: perf_init */
1121/* 0x046a: i2c_drive_scl */ 1304/* 0x0622: i2c_drive_scl */
1122 0x36b000f8, 1305 0x36b000f8,
1123 0x0d0bf400, 1306 0x0d0bf400,
1124 0xf607e040, 1307 0xf607e040,
1125 0x04bd0001, 1308 0x04bd0001,
1126/* 0x047a: i2c_drive_scl_lo */ 1309/* 0x0632: i2c_drive_scl_lo */
1127 0xe44000f8, 1310 0xe44000f8,
1128 0x0001f607, 1311 0x0001f607,
1129 0x00f804bd, 1312 0x00f804bd,
1130/* 0x0484: i2c_drive_sda */ 1313/* 0x063c: i2c_drive_sda */
1131 0xf40036b0, 1314 0xf40036b0,
1132 0xe0400d0b, 1315 0xe0400d0b,
1133 0x0002f607, 1316 0x0002f607,
1134 0x00f804bd, 1317 0x00f804bd,
1135/* 0x0494: i2c_drive_sda_lo */ 1318/* 0x064c: i2c_drive_sda_lo */
1136 0xf607e440, 1319 0xf607e440,
1137 0x04bd0002, 1320 0x04bd0002,
1138/* 0x049e: i2c_sense_scl */ 1321/* 0x0656: i2c_sense_scl */
1139 0x32f400f8, 1322 0x32f400f8,
1140 0x07c44301, 1323 0x07c44301,
1141 0xfd0033cf, 1324 0xfd0033cf,
1142 0x0bf40431, 1325 0x0bf40431,
1143 0x0131f406, 1326 0x0131f406,
1144/* 0x04b0: i2c_sense_scl_done */ 1327/* 0x0668: i2c_sense_scl_done */
1145/* 0x04b2: i2c_sense_sda */ 1328/* 0x066a: i2c_sense_sda */
1146 0x32f400f8, 1329 0x32f400f8,
1147 0x07c44301, 1330 0x07c44301,
1148 0xfd0033cf, 1331 0xfd0033cf,
1149 0x0bf40432, 1332 0x0bf40432,
1150 0x0131f406, 1333 0x0131f406,
1151/* 0x04c4: i2c_sense_sda_done */ 1334/* 0x067c: i2c_sense_sda_done */
1152/* 0x04c6: i2c_raise_scl */ 1335/* 0x067e: i2c_raise_scl */
1153 0x40f900f8, 1336 0x40f900f8,
1154 0x03089844, 1337 0x03089844,
1155 0x046a7e01, 1338 0x06227e01,
1156/* 0x04d1: i2c_raise_scl_wait */ 1339/* 0x0689: i2c_raise_scl_wait */
1157 0x03e84e00, 1340 0x03e84e00,
1158 0x00005d7e, 1341 0x00005d7e,
1159 0x00049e7e, 1342 0x0006567e,
1160 0xb60901f4, 1343 0xb60901f4,
1161 0x1bf40142, 1344 0x1bf40142,
1162/* 0x04e5: i2c_raise_scl_done */ 1345/* 0x069d: i2c_raise_scl_done */
1163 0xf840fcef, 1346 0xf840fcef,
1164/* 0x04e9: i2c_start */ 1347/* 0x06a1: i2c_start */
1165 0x049e7e00, 1348 0x06567e00,
1166 0x0d11f400, 1349 0x0d11f400,
1167 0x0004b27e, 1350 0x00066a7e,
1168 0xf40611f4, 1351 0xf40611f4,
1169/* 0x04fa: i2c_start_rep */ 1352/* 0x06b2: i2c_start_rep */
1170 0x00032e0e, 1353 0x00032e0e,
1171 0x00046a7e, 1354 0x0006227e,
1172 0x847e0103, 1355 0x3c7e0103,
1173 0x76bb0004, 1356 0x76bb0006,
1174 0x0465b600, 1357 0x0465b600,
1175 0x659450f9, 1358 0x659450f9,
1176 0x0256bb04, 1359 0x0256bb04,
1177 0x75fd50bd, 1360 0x75fd50bd,
1178 0x7e50fc04, 1361 0x7e50fc04,
1179 0xb60004c6, 1362 0xb600067e,
1180 0x11f40464, 1363 0x11f40464,
1181/* 0x0525: i2c_start_send */ 1364/* 0x06dd: i2c_start_send */
1182 0x7e00031d, 1365 0x7e00031d,
1183 0x4e000484, 1366 0x4e00063c,
1184 0x5d7e1388, 1367 0x5d7e1388,
1185 0x00030000, 1368 0x00030000,
1186 0x00046a7e, 1369 0x0006227e,
1187 0x7e13884e, 1370 0x7e13884e,
1188/* 0x053f: i2c_start_out */ 1371/* 0x06f7: i2c_start_out */
1189 0xf800005d, 1372 0xf800005d,
1190/* 0x0541: i2c_stop */ 1373/* 0x06f9: i2c_stop */
1191 0x7e000300, 1374 0x7e000300,
1192 0x0300046a, 1375 0x03000622,
1193 0x04847e00, 1376 0x063c7e00,
1194 0x03e84e00, 1377 0x03e84e00,
1195 0x00005d7e, 1378 0x00005d7e,
1196 0x6a7e0103, 1379 0x227e0103,
1197 0x884e0004, 1380 0x884e0006,
1198 0x005d7e13, 1381 0x005d7e13,
1199 0x7e010300, 1382 0x7e010300,
1200 0x4e000484, 1383 0x4e00063c,
1201 0x5d7e1388, 1384 0x5d7e1388,
1202 0x00f80000, 1385 0x00f80000,
1203/* 0x0570: i2c_bitw */ 1386/* 0x0728: i2c_bitw */
1204 0x0004847e, 1387 0x00063c7e,
1205 0x7e03e84e, 1388 0x7e03e84e,
1206 0xbb00005d, 1389 0xbb00005d,
1207 0x65b60076, 1390 0x65b60076,
@@ -1209,44 +1392,44 @@ uint32_t nv108_pwr_code[] = {
1209 0x56bb0465, 1392 0x56bb0465,
1210 0xfd50bd02, 1393 0xfd50bd02,
1211 0x50fc0475, 1394 0x50fc0475,
1212 0x0004c67e, 1395 0x00067e7e,
1213 0xf40464b6, 1396 0xf40464b6,
1214 0x884e1711, 1397 0x884e1711,
1215 0x005d7e13, 1398 0x005d7e13,
1216 0x7e000300, 1399 0x7e000300,
1217 0x4e00046a, 1400 0x4e000622,
1218 0x5d7e1388, 1401 0x5d7e1388,
1219/* 0x05ae: i2c_bitw_out */ 1402/* 0x0766: i2c_bitw_out */
1220 0x00f80000, 1403 0x00f80000,
1221/* 0x05b0: i2c_bitr */ 1404/* 0x0768: i2c_bitr */
1222 0x847e0103, 1405 0x3c7e0103,
1223 0xe84e0004, 1406 0xe84e0006,
1224 0x005d7e03, 1407 0x005d7e03,
1225 0x0076bb00, 1408 0x0076bb00,
1226 0xf90465b6, 1409 0xf90465b6,
1227 0x04659450, 1410 0x04659450,
1228 0xbd0256bb, 1411 0xbd0256bb,
1229 0x0475fd50, 1412 0x0475fd50,
1230 0xc67e50fc, 1413 0x7e7e50fc,
1231 0x64b60004, 1414 0x64b60006,
1232 0x1a11f404, 1415 0x1a11f404,
1233 0x0004b27e, 1416 0x00066a7e,
1234 0x6a7e0003, 1417 0x227e0003,
1235 0x884e0004, 1418 0x884e0006,
1236 0x005d7e13, 1419 0x005d7e13,
1237 0x013cf000, 1420 0x013cf000,
1238/* 0x05f3: i2c_bitr_done */ 1421/* 0x07ab: i2c_bitr_done */
1239 0xf80131f4, 1422 0xf80131f4,
1240/* 0x05f5: i2c_get_byte */ 1423/* 0x07ad: i2c_get_byte */
1241 0x04000500, 1424 0x04000500,
1242/* 0x05f9: i2c_get_byte_next */ 1425/* 0x07b1: i2c_get_byte_next */
1243 0x0154b608, 1426 0x0154b608,
1244 0xb60076bb, 1427 0xb60076bb,
1245 0x50f90465, 1428 0x50f90465,
1246 0xbb046594, 1429 0xbb046594,
1247 0x50bd0256, 1430 0x50bd0256,
1248 0xfc0475fd, 1431 0xfc0475fd,
1249 0x05b07e50, 1432 0x07687e50,
1250 0x0464b600, 1433 0x0464b600,
1251 0xfd2a11f4, 1434 0xfd2a11f4,
1252 0x42b60553, 1435 0x42b60553,
@@ -1257,11 +1440,11 @@ uint32_t nv108_pwr_code[] = {
1257 0x0256bb04, 1440 0x0256bb04,
1258 0x75fd50bd, 1441 0x75fd50bd,
1259 0x7e50fc04, 1442 0x7e50fc04,
1260 0xb6000570, 1443 0xb6000728,
1261/* 0x0642: i2c_get_byte_done */ 1444/* 0x07fa: i2c_get_byte_done */
1262 0x00f80464, 1445 0x00f80464,
1263/* 0x0644: i2c_put_byte */ 1446/* 0x07fc: i2c_put_byte */
1264/* 0x0646: i2c_put_byte_next */ 1447/* 0x07fe: i2c_put_byte_next */
1265 0x42b60804, 1448 0x42b60804,
1266 0x3854ff01, 1449 0x3854ff01,
1267 0xb60076bb, 1450 0xb60076bb,
@@ -1269,7 +1452,7 @@ uint32_t nv108_pwr_code[] = {
1269 0xbb046594, 1452 0xbb046594,
1270 0x50bd0256, 1453 0x50bd0256,
1271 0xfc0475fd, 1454 0xfc0475fd,
1272 0x05707e50, 1455 0x07287e50,
1273 0x0464b600, 1456 0x0464b600,
1274 0xb03411f4, 1457 0xb03411f4,
1275 0x1bf40046, 1458 0x1bf40046,
@@ -1278,21 +1461,21 @@ uint32_t nv108_pwr_code[] = {
1278 0x04659450, 1461 0x04659450,
1279 0xbd0256bb, 1462 0xbd0256bb,
1280 0x0475fd50, 1463 0x0475fd50,
1281 0xb07e50fc, 1464 0x687e50fc,
1282 0x64b60005, 1465 0x64b60007,
1283 0x0f11f404, 1466 0x0f11f404,
1284 0xb00076bb, 1467 0xb00076bb,
1285 0x1bf40136, 1468 0x1bf40136,
1286 0x0132f406, 1469 0x0132f406,
1287/* 0x069c: i2c_put_byte_done */ 1470/* 0x0854: i2c_put_byte_done */
1288/* 0x069e: i2c_addr */ 1471/* 0x0856: i2c_addr */
1289 0x76bb00f8, 1472 0x76bb00f8,
1290 0x0465b600, 1473 0x0465b600,
1291 0x659450f9, 1474 0x659450f9,
1292 0x0256bb04, 1475 0x0256bb04,
1293 0x75fd50bd, 1476 0x75fd50bd,
1294 0x7e50fc04, 1477 0x7e50fc04,
1295 0xb60004e9, 1478 0xb60006a1,
1296 0x11f40464, 1479 0x11f40464,
1297 0x2ec3e729, 1480 0x2ec3e729,
1298 0x0134b601, 1481 0x0134b601,
@@ -1302,32 +1485,32 @@ uint32_t nv108_pwr_code[] = {
1302 0x56bb0465, 1485 0x56bb0465,
1303 0xfd50bd02, 1486 0xfd50bd02,
1304 0x50fc0475, 1487 0x50fc0475,
1305 0x0006447e, 1488 0x0007fc7e,
1306/* 0x06e3: i2c_addr_done */ 1489/* 0x089b: i2c_addr_done */
1307 0xf80464b6, 1490 0xf80464b6,
1308/* 0x06e5: i2c_acquire_addr */ 1491/* 0x089d: i2c_acquire_addr */
1309 0xf8cec700, 1492 0xf8cec700,
1310 0xb705e4b6, 1493 0xb705e4b6,
1311 0xf8d014e0, 1494 0xf8d014e0,
1312/* 0x06f1: i2c_acquire */ 1495/* 0x08a9: i2c_acquire */
1313 0x06e57e00, 1496 0x089d7e00,
1314 0x00047e00, 1497 0x00047e00,
1315 0x03d9f000, 1498 0x03d9f000,
1316 0x00002e7e, 1499 0x00002e7e,
1317/* 0x0702: i2c_release */ 1500/* 0x08ba: i2c_release */
1318 0xe57e00f8, 1501 0x9d7e00f8,
1319 0x047e0006, 1502 0x047e0008,
1320 0xdaf00000, 1503 0xdaf00000,
1321 0x002e7e03, 1504 0x002e7e03,
1322/* 0x0713: i2c_recv */ 1505/* 0x08cb: i2c_recv */
1323 0xf400f800, 1506 0xf400f800,
1324 0xc1c70132, 1507 0xc1c70132,
1325 0x0214b6f8, 1508 0x0214b6f8,
1326 0xf52816b0, 1509 0xf52816b0,
1327 0xb801371f, 1510 0xb801371f,
1328 0x000bd413, 1511 0x000be813,
1329 0xb8003298, 1512 0xb8003298,
1330 0x000bac13, 1513 0x000bc013,
1331 0xf4003198, 1514 0xf4003198,
1332 0xd0f90231, 1515 0xd0f90231,
1333 0xd0f9e0f9, 1516 0xd0f9e0f9,
@@ -1339,7 +1522,7 @@ uint32_t nv108_pwr_code[] = {
1339 0x56bb0465, 1522 0x56bb0465,
1340 0xfd50bd02, 1523 0xfd50bd02,
1341 0x50fc0475, 1524 0x50fc0475,
1342 0x0006f17e, 1525 0x0008a97e,
1343 0xfc0464b6, 1526 0xfc0464b6,
1344 0x00d6b0d0, 1527 0x00d6b0d0,
1345 0x00b01bf5, 1528 0x00b01bf5,
@@ -1349,7 +1532,7 @@ uint32_t nv108_pwr_code[] = {
1349 0x0256bb04, 1532 0x0256bb04,
1350 0x75fd50bd, 1533 0x75fd50bd,
1351 0x7e50fc04, 1534 0x7e50fc04,
1352 0xb600069e, 1535 0xb6000856,
1353 0x11f50464, 1536 0x11f50464,
1354 0xc5c700cc, 1537 0xc5c700cc,
1355 0x0076bbe0, 1538 0x0076bbe0,
@@ -1357,8 +1540,8 @@ uint32_t nv108_pwr_code[] = {
1357 0x04659450, 1540 0x04659450,
1358 0xbd0256bb, 1541 0xbd0256bb,
1359 0x0475fd50, 1542 0x0475fd50,
1360 0x447e50fc, 1543 0xfc7e50fc,
1361 0x64b60006, 1544 0x64b60007,
1362 0xa911f504, 1545 0xa911f504,
1363 0xbb010500, 1546 0xbb010500,
1364 0x65b60076, 1547 0x65b60076,
@@ -1366,7 +1549,7 @@ uint32_t nv108_pwr_code[] = {
1366 0x56bb0465, 1549 0x56bb0465,
1367 0xfd50bd02, 1550 0xfd50bd02,
1368 0x50fc0475, 1551 0x50fc0475,
1369 0x00069e7e, 1552 0x0008567e,
1370 0xf50464b6, 1553 0xf50464b6,
1371 0xbb008711, 1554 0xbb008711,
1372 0x65b60076, 1555 0x65b60076,
@@ -1374,7 +1557,7 @@ uint32_t nv108_pwr_code[] = {
1374 0x56bb0465, 1557 0x56bb0465,
1375 0xfd50bd02, 1558 0xfd50bd02,
1376 0x50fc0475, 1559 0x50fc0475,
1377 0x0005f57e, 1560 0x0007ad7e,
1378 0xf40464b6, 1561 0xf40464b6,
1379 0x5bcb6711, 1562 0x5bcb6711,
1380 0x0076bbe0, 1563 0x0076bbe0,
@@ -1382,37 +1565,37 @@ uint32_t nv108_pwr_code[] = {
1382 0x04659450, 1565 0x04659450,
1383 0xbd0256bb, 1566 0xbd0256bb,
1384 0x0475fd50, 1567 0x0475fd50,
1385 0x417e50fc, 1568 0xf97e50fc,
1386 0x64b60005, 1569 0x64b60006,
1387 0xbd5bb204, 1570 0xbd5bb204,
1388 0x410ef474, 1571 0x410ef474,
1389/* 0x0818: i2c_recv_not_rd08 */ 1572/* 0x09d0: i2c_recv_not_rd08 */
1390 0xf401d6b0, 1573 0xf401d6b0,
1391 0x00053b1b, 1574 0x00053b1b,
1392 0x00069e7e, 1575 0x0008567e,
1393 0xc73211f4, 1576 0xc73211f4,
1394 0x447ee0c5, 1577 0xfc7ee0c5,
1395 0x11f40006, 1578 0x11f40007,
1396 0x7e000528, 1579 0x7e000528,
1397 0xf400069e, 1580 0xf4000856,
1398 0xb5c71f11, 1581 0xb5c71f11,
1399 0x06447ee0, 1582 0x07fc7ee0,
1400 0x1511f400, 1583 0x1511f400,
1401 0x0005417e, 1584 0x0006f97e,
1402 0xc5c774bd, 1585 0xc5c774bd,
1403 0x091bf408, 1586 0x091bf408,
1404 0xf40232f4, 1587 0xf40232f4,
1405/* 0x0856: i2c_recv_not_wr08 */ 1588/* 0x0a0e: i2c_recv_not_wr08 */
1406/* 0x0856: i2c_recv_done */ 1589/* 0x0a0e: i2c_recv_done */
1407 0xcec7030e, 1590 0xcec7030e,
1408 0x07027ef8, 1591 0x08ba7ef8,
1409 0xfce0fc00, 1592 0xfce0fc00,
1410 0x0912f4d0, 1593 0x0912f4d0,
1411 0x3f7e7cb2, 1594 0xc27e7cb2,
1412/* 0x086a: i2c_recv_exit */ 1595/* 0x0a22: i2c_recv_exit */
1413 0x00f80002, 1596 0x00f80002,
1414/* 0x086c: i2c_init */ 1597/* 0x0a24: i2c_init */
1415/* 0x086e: test_recv */ 1598/* 0x0a26: test_recv */
1416 0x584100f8, 1599 0x584100f8,
1417 0x0011cf04, 1600 0x0011cf04,
1418 0x400110b6, 1601 0x400110b6,
@@ -1420,28 +1603,28 @@ uint32_t nv108_pwr_code[] = {
1420 0xf104bd00, 1603 0xf104bd00,
1421 0xf1d900e7, 1604 0xf1d900e7,
1422 0x7e134fe3, 1605 0x7e134fe3,
1423 0xf8000196, 1606 0xf8000201,
1424/* 0x088d: test_init */ 1607/* 0x0a45: test_init */
1425 0x08004e00, 1608 0x08004e00,
1426 0x0001967e, 1609 0x0002017e,
1427/* 0x0896: idle_recv */ 1610/* 0x0a4e: idle_recv */
1428 0x00f800f8, 1611 0x00f800f8,
1429/* 0x0898: idle */ 1612/* 0x0a50: idle */
1430 0x410031f4, 1613 0x410031f4,
1431 0x11cf0454, 1614 0x11cf0454,
1432 0x0110b600, 1615 0x0110b600,
1433 0xf6045440, 1616 0xf6045440,
1434 0x04bd0001, 1617 0x04bd0001,
1435/* 0x08ac: idle_loop */ 1618/* 0x0a64: idle_loop */
1436 0x32f45801, 1619 0x32f45801,
1437/* 0x08b1: idle_proc */ 1620/* 0x0a69: idle_proc */
1438/* 0x08b1: idle_proc_exec */ 1621/* 0x0a69: idle_proc_exec */
1439 0xb210f902, 1622 0xb210f902,
1440 0x02487e1e, 1623 0x02cb7e1e,
1441 0xf410fc00, 1624 0xf410fc00,
1442 0x31f40911, 1625 0x31f40911,
1443 0xf00ef402, 1626 0xf00ef402,
1444/* 0x08c4: idle_proc_next */ 1627/* 0x0a7c: idle_proc_next */
1445 0xa65810b6, 1628 0xa65810b6,
1446 0xe81bf41f, 1629 0xe81bf41f,
1447 0xf4e002f4, 1630 0xf4e002f4,
@@ -1457,4 +1640,22 @@ uint32_t nv108_pwr_code[] = {
1457 0x00000000, 1640 0x00000000,
1458 0x00000000, 1641 0x00000000,
1459 0x00000000, 1642 0x00000000,
1643 0x00000000,
1644 0x00000000,
1645 0x00000000,
1646 0x00000000,
1647 0x00000000,
1648 0x00000000,
1649 0x00000000,
1650 0x00000000,
1651 0x00000000,
1652 0x00000000,
1653 0x00000000,
1654 0x00000000,
1655 0x00000000,
1656 0x00000000,
1657 0x00000000,
1658 0x00000000,
1659 0x00000000,
1660 0x00000000,
1460}; 1661};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
index 6744fcc06151..daa06c1c655e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#define NVKM_PPWR_CHIPSET GT215 25#define NVKM_PPWR_CHIPSET GT215
26#define HW_TICKS_PER_US 203 // should be 202.5
26 27
27//#define NVKM_FALCON_PC24 28//#define NVKM_FALCON_PC24
28//#define NVKM_FALCON_UNSHIFTED_IO 29//#define NVKM_FALCON_UNSHIFTED_IO
@@ -34,6 +35,7 @@
34.section #nva3_pwr_data 35.section #nva3_pwr_data
35#define INCLUDE_PROC 36#define INCLUDE_PROC
36#include "kernel.fuc" 37#include "kernel.fuc"
38#include "arith.fuc"
37#include "host.fuc" 39#include "host.fuc"
38#include "memx.fuc" 40#include "memx.fuc"
39#include "perf.fuc" 41#include "perf.fuc"
@@ -44,6 +46,7 @@
44 46
45#define INCLUDE_DATA 47#define INCLUDE_DATA
46#include "kernel.fuc" 48#include "kernel.fuc"
49#include "arith.fuc"
47#include "host.fuc" 50#include "host.fuc"
48#include "memx.fuc" 51#include "memx.fuc"
49#include "perf.fuc" 52#include "perf.fuc"
@@ -56,6 +59,7 @@
56.section #nva3_pwr_code 59.section #nva3_pwr_code
57#define INCLUDE_CODE 60#define INCLUDE_CODE
58#include "kernel.fuc" 61#include "kernel.fuc"
62#include "arith.fuc"
59#include "host.fuc" 63#include "host.fuc"
60#include "memx.fuc" 64#include "memx.fuc"
61#include "perf.fuc" 65#include "perf.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
index e087ce3041be..64e97baabc3c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -24,8 +24,8 @@ uint32_t nva3_pwr_data[] = {
24 0x00000000, 24 0x00000000,
25/* 0x0058: proc_list_head */ 25/* 0x0058: proc_list_head */
26 0x54534f48, 26 0x54534f48,
27 0x00000430, 27 0x00000512,
28 0x000003cd, 28 0x000004af,
29 0x00000000, 29 0x00000000,
30 0x00000000, 30 0x00000000,
31 0x00000000, 31 0x00000000,
@@ -46,8 +46,8 @@ uint32_t nva3_pwr_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x00000542, 49 0x000006e0,
50 0x00000534, 50 0x000006d2,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t nva3_pwr_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x00000546, 71 0x000006e4,
72 0x00000544, 72 0x000006e2,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t nva3_pwr_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x00000976, 93 0x00000b14,
94 0x00000819, 94 0x000009b7,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t nva3_pwr_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x0000099f, 115 0x00000b3d,
116 0x00000978, 116 0x00000b16,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t nva3_pwr_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x000009ab, 137 0x00000b49,
138 0x000009a9, 138 0x00000b47,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -227,25 +227,31 @@ uint32_t nva3_pwr_data[] = {
227 0x00000000, 227 0x00000000,
228 0x00000000, 228 0x00000000,
229/* 0x0370: memx_func_head */ 229/* 0x0370: memx_func_head */
230 0x00010000,
231 0x00000000,
232 0x0000046f,
233/* 0x037c: memx_func_next */
234 0x00000001, 230 0x00000001,
235 0x00000000, 231 0x00000000,
236 0x00000496, 232 0x00000551,
233/* 0x037c: memx_func_next */
237 0x00000002, 234 0x00000002,
235 0x00000000,
236 0x000005a8,
237 0x00000003,
238 0x00000002, 238 0x00000002,
239 0x000004b7, 239 0x0000063a,
240 0x00040003, 240 0x00040004,
241 0x00000000,
242 0x00000656,
243 0x00010005,
244 0x00000000,
245 0x00000673,
246 0x00010006,
241 0x00000000, 247 0x00000000,
242 0x000004d3, 248 0x000005f8,
243 0x00010004, 249/* 0x03b8: memx_func_tail */
250/* 0x03b8: memx_ts_start */
244 0x00000000, 251 0x00000000,
245 0x000004f0, 252/* 0x03bc: memx_ts_end */
246/* 0x03ac: memx_func_tail */
247/* 0x03ac: memx_data_head */
248 0x00000000, 253 0x00000000,
254/* 0x03c0: memx_data_head */
249 0x00000000, 255 0x00000000,
250 0x00000000, 256 0x00000000,
251 0x00000000, 257 0x00000000,
@@ -757,8 +763,9 @@ uint32_t nva3_pwr_data[] = {
757 0x00000000, 763 0x00000000,
758 0x00000000, 764 0x00000000,
759 0x00000000, 765 0x00000000,
760/* 0x0bac: memx_data_tail */ 766 0x00000000,
761/* 0x0bac: i2c_scl_map */ 767/* 0x0bc0: memx_data_tail */
768/* 0x0bc0: i2c_scl_map */
762 0x00001000, 769 0x00001000,
763 0x00004000, 770 0x00004000,
764 0x00010000, 771 0x00010000,
@@ -769,7 +776,7 @@ uint32_t nva3_pwr_data[] = {
769 0x01000000, 776 0x01000000,
770 0x04000000, 777 0x04000000,
771 0x10000000, 778 0x10000000,
772/* 0x0bd4: i2c_sda_map */ 779/* 0x0be8: i2c_sda_map */
773 0x00002000, 780 0x00002000,
774 0x00008000, 781 0x00008000,
775 0x00020000, 782 0x00020000,
@@ -780,7 +787,7 @@ uint32_t nva3_pwr_data[] = {
780 0x02000000, 787 0x02000000,
781 0x08000000, 788 0x08000000,
782 0x20000000, 789 0x20000000,
783/* 0x0bfc: i2c_ctrl */ 790/* 0x0c10: i2c_ctrl */
784 0x0000e138, 791 0x0000e138,
785 0x0000e150, 792 0x0000e150,
786 0x0000e168, 793 0x0000e168,
@@ -841,15 +848,10 @@ uint32_t nva3_pwr_data[] = {
841 0x00000000, 848 0x00000000,
842 0x00000000, 849 0x00000000,
843 0x00000000, 850 0x00000000,
844 0x00000000,
845 0x00000000,
846 0x00000000,
847 0x00000000,
848 0x00000000,
849}; 851};
850 852
851uint32_t nva3_pwr_code[] = { 853uint32_t nva3_pwr_code[] = {
852 0x030d0ef5, 854 0x039e0ef5,
853/* 0x0004: rd32 */ 855/* 0x0004: rd32 */
854 0x07a007f1, 856 0x07a007f1,
855 0xd00604b6, 857 0xd00604b6,
@@ -885,19 +887,22 @@ uint32_t nva3_pwr_code[] = {
885 0xd4f100dd, 887 0xd4f100dd,
886 0x1bf47000, 888 0x1bf47000,
887/* 0x007f: nsec */ 889/* 0x007f: nsec */
888 0xf000f8f2, 890 0xf900f8f2,
891 0xf080f990,
889 0x84b62c87, 892 0x84b62c87,
890 0x0088cf06, 893 0x0088cf06,
891/* 0x0088: nsec_loop */ 894/* 0x008c: nsec_loop */
892 0xb62c97f0, 895 0xb62c97f0,
893 0x99cf0694, 896 0x99cf0694,
894 0x0298bb00, 897 0x0298bb00,
895 0xf4069eb8, 898 0xf4069eb8,
896 0x00f8f11e, 899 0x80fcf11e,
897/* 0x009c: wait */ 900 0x00f890fc,
901/* 0x00a4: wait */
902 0x80f990f9,
898 0xb62c87f0, 903 0xb62c87f0,
899 0x88cf0684, 904 0x88cf0684,
900/* 0x00a5: wait_loop */ 905/* 0x00b1: wait_loop */
901 0x02eeb900, 906 0x02eeb900,
902 0xb90421f4, 907 0xb90421f4,
903 0xadfd02da, 908 0xadfd02da,
@@ -907,28 +912,29 @@ uint32_t nva3_pwr_code[] = {
907 0x0099cf06, 912 0x0099cf06,
908 0xb80298bb, 913 0xb80298bb,
909 0x1ef4069b, 914 0x1ef4069b,
910/* 0x00c9: wait_done */ 915/* 0x00d5: wait_done */
911/* 0x00cb: intr_watchdog */ 916 0xfc80fcdf,
912 0x9800f8df, 917/* 0x00db: intr_watchdog */
918 0x9800f890,
913 0x96b003e9, 919 0x96b003e9,
914 0x2a0bf400, 920 0x2a0bf400,
915 0xbb9a0a98, 921 0xbb9a0a98,
916 0x1cf4029a, 922 0x1cf4029a,
917 0x01d7f00f, 923 0x01d7f00f,
918 0x025421f5, 924 0x02dd21f5,
919 0x0ef494bd, 925 0x0ef494bd,
920/* 0x00e9: intr_watchdog_next_time */ 926/* 0x00f9: intr_watchdog_next_time */
921 0x9b0a9815, 927 0x9b0a9815,
922 0xf400a6b0, 928 0xf400a6b0,
923 0x9ab8090b, 929 0x9ab8090b,
924 0x061cf406, 930 0x061cf406,
925/* 0x00f8: intr_watchdog_next_time_set */ 931/* 0x0108: intr_watchdog_next_time_set */
926/* 0x00fb: intr_watchdog_next_proc */ 932/* 0x010b: intr_watchdog_next_proc */
927 0x809b0980, 933 0x809b0980,
928 0xe0b603e9, 934 0xe0b603e9,
929 0x68e6b158, 935 0x68e6b158,
930 0xc61bf402, 936 0xc61bf402,
931/* 0x010a: intr */ 937/* 0x011a: intr */
932 0x00f900f8, 938 0x00f900f8,
933 0x80f904bd, 939 0x80f904bd,
934 0xa0f990f9, 940 0xa0f990f9,
@@ -948,13 +954,13 @@ uint32_t nva3_pwr_code[] = {
948 0xf40289c4, 954 0xf40289c4,
949 0x0080230b, 955 0x0080230b,
950 0x58e7f09b, 956 0x58e7f09b,
951 0x98cb21f4, 957 0x98db21f4,
952 0x96b09b09, 958 0x96b09b09,
953 0x110bf400, 959 0x110bf400,
954 0xb63407f0, 960 0xb63407f0,
955 0x09d00604, 961 0x09d00604,
956 0x8004bd00, 962 0x8004bd00,
957/* 0x016e: intr_skip_watchdog */ 963/* 0x017e: intr_skip_watchdog */
958 0x89e49a09, 964 0x89e49a09,
959 0x0bf40800, 965 0x0bf40800,
960 0x8897f148, 966 0x8897f148,
@@ -967,22 +973,22 @@ uint32_t nva3_pwr_code[] = {
967 0x48e7f1c0, 973 0x48e7f1c0,
968 0x53e3f14f, 974 0x53e3f14f,
969 0x00d7f054, 975 0x00d7f054,
970 0x02b921f5, 976 0x034221f5,
971 0x07f1c0fc, 977 0x07f1c0fc,
972 0x04b604c0, 978 0x04b604c0,
973 0x000cd006, 979 0x000cd006,
974/* 0x01ae: intr_subintr_skip_fifo */ 980/* 0x01be: intr_subintr_skip_fifo */
975 0x07f104bd, 981 0x07f104bd,
976 0x04b60688, 982 0x04b60688,
977 0x0009d006, 983 0x0009d006,
978/* 0x01ba: intr_skip_subintr */ 984/* 0x01ca: intr_skip_subintr */
979 0x89c404bd, 985 0x89c404bd,
980 0x070bf420, 986 0x070bf420,
981 0xffbfa4f1, 987 0xffbfa4f1,
982/* 0x01c4: intr_skip_pause */ 988/* 0x01d4: intr_skip_pause */
983 0xf44089c4, 989 0xf44089c4,
984 0xa4f1070b, 990 0xa4f1070b,
985/* 0x01ce: intr_skip_user0 */ 991/* 0x01de: intr_skip_user0 */
986 0x07f0ffbf, 992 0x07f0ffbf,
987 0x0604b604, 993 0x0604b604,
988 0xbd0008d0, 994 0xbd0008d0,
@@ -993,596 +999,732 @@ uint32_t nva3_pwr_code[] = {
993 0x90fca0fc, 999 0x90fca0fc,
994 0x00fc80fc, 1000 0x00fc80fc,
995 0xf80032f4, 1001 0xf80032f4,
996/* 0x01f5: timer */ 1002/* 0x0205: ticks_from_ns */
997 0x1032f401, 1003 0xf9c0f901,
998 0xb003f898, 1004 0xcbd7f1b0,
999 0x1cf40086, 1005 0x00d3f000,
1000 0x03fe8051, 1006 0x041321f5,
1007 0x03e8ccec,
1008 0xf400b4b0,
1009 0xeeec120b,
1010 0xd7f103e8,
1011 0xd3f000cb,
1012 0x1321f500,
1013/* 0x022d: ticks_from_ns_quit */
1014 0x02ceb904,
1015 0xc0fcb0fc,
1016/* 0x0236: ticks_from_us */
1017 0xc0f900f8,
1018 0xd7f1b0f9,
1019 0xd3f000cb,
1020 0x1321f500,
1021 0x02ceb904,
1022 0xf400b4b0,
1023 0xe4bd050b,
1024/* 0x0250: ticks_from_us_quit */
1025 0xc0fcb0fc,
1026/* 0x0256: ticks_to_us */
1027 0xd7f100f8,
1028 0xd3f000cb,
1029 0xecedff00,
1030/* 0x0262: timer */
1031 0x90f900f8,
1032 0x32f480f9,
1033 0x03f89810,
1034 0xf40086b0,
1035 0x84bd651c,
1001 0xb63807f0, 1036 0xb63807f0,
1002 0x08d00604, 1037 0x08d00604,
1003 0xf004bd00, 1038 0xf004bd00,
1004 0x84b60887, 1039 0x84b63487,
1005 0x0088cf06, 1040 0x0088cf06,
1006 0xf40284f0, 1041 0xbb9a0998,
1007 0x87f0261b, 1042 0xe9bb0298,
1008 0x0684b634, 1043 0x03fe8000,
1009 0xb80088cf, 1044 0xb60887f0,
1010 0x0bf406e0, 1045 0x88cf0684,
1011 0x06e8b809, 1046 0x0284f000,
1012/* 0x0233: timer_reset */ 1047 0xf0261bf4,
1013 0xf01f1ef4, 1048 0x84b63487,
1014 0x04b63407, 1049 0x0088cf06,
1015 0x000ed006, 1050 0xf406e0b8,
1016 0x0e8004bd, 1051 0xe8b8090b,
1017/* 0x0241: timer_enable */ 1052 0x111cf406,
1018 0x0187f09a, 1053/* 0x02b8: timer_reset */
1054 0xb63407f0,
1055 0x0ed00604,
1056 0x8004bd00,
1057/* 0x02c6: timer_enable */
1058 0x87f09a0e,
1059 0x3807f001,
1060 0xd00604b6,
1061 0x04bd0008,
1062/* 0x02d4: timer_done */
1063 0xfc1031f4,
1064 0xf890fc80,
1065/* 0x02dd: send_proc */
1066 0xf980f900,
1067 0x05e89890,
1068 0xf004e998,
1069 0x89b80486,
1070 0x2a0bf406,
1071 0x940398c4,
1072 0x80b60488,
1073 0x008ebb18,
1074 0x8000fa98,
1075 0x8d80008a,
1076 0x028c8001,
1077 0xb6038b80,
1078 0x94f00190,
1079 0x04e98007,
1080/* 0x0317: send_done */
1081 0xfc0231f4,
1082 0xf880fc90,
1083/* 0x031d: find */
1084 0xf080f900,
1085 0x31f45887,
1086/* 0x0325: find_loop */
1087 0x008a9801,
1088 0xf406aeb8,
1089 0x80b6100b,
1090 0x6886b158,
1091 0xf01bf402,
1092/* 0x033b: find_done */
1093 0xb90132f4,
1094 0x80fc028e,
1095/* 0x0342: send */
1096 0x21f500f8,
1097 0x01f4031d,
1098/* 0x034b: recv */
1099 0xf900f897,
1100 0x9880f990,
1101 0xe99805e8,
1102 0x0132f404,
1103 0xf40689b8,
1104 0x89c43d0b,
1105 0x0180b603,
1106 0x800784f0,
1107 0xea9805e8,
1108 0xfef0f902,
1109 0xf0f9018f,
1110 0x9402efb9,
1111 0xe9bb0499,
1112 0x18e0b600,
1113 0x9803eb98,
1114 0xed9802ec,
1115 0x00ee9801,
1116 0xf0fca5f9,
1117 0xf400f8fe,
1118 0xf0fc0131,
1119/* 0x0398: recv_done */
1120 0x90fc80fc,
1121/* 0x039e: init */
1122 0x17f100f8,
1123 0x14b60108,
1124 0x0011cf06,
1125 0x010911e7,
1126 0xfe0814b6,
1127 0x17f10014,
1128 0x13f000e0,
1129 0x1c07f000,
1130 0xd00604b6,
1131 0x04bd0001,
1132 0xf0ff17f0,
1133 0x04b61407,
1134 0x0001d006,
1135 0x17f004bd,
1136 0x0015f102,
1137 0x1007f008,
1138 0xd00604b6,
1139 0x04bd0001,
1140 0x011a17f1,
1141 0xfe0013f0,
1142 0x31f40010,
1143 0x0117f010,
1019 0xb63807f0, 1144 0xb63807f0,
1020 0x08d00604,
1021/* 0x024f: timer_done */
1022 0xf404bd00,
1023 0x00f81031,
1024/* 0x0254: send_proc */
1025 0x90f980f9,
1026 0x9805e898,
1027 0x86f004e9,
1028 0x0689b804,
1029 0xc42a0bf4,
1030 0x88940398,
1031 0x1880b604,
1032 0x98008ebb,
1033 0x8a8000fa,
1034 0x018d8000,
1035 0x80028c80,
1036 0x90b6038b,
1037 0x0794f001,
1038 0xf404e980,
1039/* 0x028e: send_done */
1040 0x90fc0231,
1041 0x00f880fc,
1042/* 0x0294: find */
1043 0x87f080f9,
1044 0x0131f458,
1045/* 0x029c: find_loop */
1046 0xb8008a98,
1047 0x0bf406ae,
1048 0x5880b610,
1049 0x026886b1,
1050 0xf4f01bf4,
1051/* 0x02b2: find_done */
1052 0x8eb90132,
1053 0xf880fc02,
1054/* 0x02b9: send */
1055 0x9421f500,
1056 0x9701f402,
1057/* 0x02c2: recv */
1058 0xe89800f8,
1059 0x04e99805,
1060 0xb80132f4,
1061 0x0bf40689,
1062 0x0389c43d,
1063 0xf00180b6,
1064 0xe8800784,
1065 0x02ea9805,
1066 0x8ffef0f9,
1067 0xb9f0f901,
1068 0x999402ef,
1069 0x00e9bb04,
1070 0x9818e0b6,
1071 0xec9803eb,
1072 0x01ed9802,
1073 0xf900ee98,
1074 0xfef0fca5,
1075 0x31f400f8,
1076/* 0x030b: recv_done */
1077 0xf8f0fc01,
1078/* 0x030d: init */
1079 0x0817f100,
1080 0x0614b601,
1081 0xe70011cf,
1082 0xb6010911,
1083 0x14fe0814,
1084 0xe017f100,
1085 0x0013f000,
1086 0xb61c07f0,
1087 0x01d00604, 1145 0x01d00604,
1088 0xf004bd00, 1146 0xf004bd00,
1089 0x07f0ff17, 1147/* 0x0402: init_proc */
1090 0x0604b614, 1148 0xf19858f7,
1091 0xbd0001d0, 1149 0x0016b001,
1092 0x0217f004, 1150 0xf9fa0bf4,
1093 0x080015f1, 1151 0x58f0b615,
1094 0xb61007f0, 1152/* 0x0413: mulu32_32_64 */
1095 0x01d00604, 1153 0xf9f20ef4,
1096 0xf104bd00, 1154 0xf920f910,
1097 0xf0010a17, 1155 0x9540f930,
1098 0x10fe0013, 1156 0xd29510e1,
1099 0x1031f400, 1157 0xbdc4bd10,
1100 0xf00117f0, 1158 0xc0edffb4,
1101 0x04b63807, 1159 0xb9301dff,
1102 0x0001d006, 1160 0x34f10234,
1103 0xf7f004bd, 1161 0x34b6ffff,
1104/* 0x0371: init_proc */ 1162 0x1045b610,
1105 0x01f19858, 1163 0xbb00c3bb,
1106 0xf40016b0, 1164 0xe2ff01b4,
1107 0x15f9fa0b, 1165 0x0234b930,
1108 0xf458f0b6, 1166 0xffff34f1,
1109/* 0x0382: host_send */ 1167 0xb61034b6,
1110 0x17f1f20e, 1168 0xc3bb1045,
1111 0x14b604b0, 1169 0x01b4bb00,
1112 0x0011cf06, 1170 0xbb3012ff,
1113 0x04a027f1, 1171 0x40fc00b3,
1114 0xcf0624b6, 1172 0x20fc30fc,
1115 0x12b80022, 1173 0x00f810fc,
1116 0x320bf406, 1174/* 0x0464: host_send */
1117 0x94071ec4, 1175 0x04b017f1,
1118 0xe0b704ee, 1176 0xcf0614b6,
1119 0xeb980270, 1177 0x27f10011,
1120 0x02ec9803, 1178 0x24b604a0,
1121 0x9801ed98, 1179 0x0022cf06,
1122 0x21f500ee, 1180 0xf40612b8,
1123 0x10b602b9, 1181 0x1ec4320b,
1124 0x0f1ec401, 1182 0x04ee9407,
1125 0x04b007f1, 1183 0x0270e0b7,
1126 0xd00604b6, 1184 0x9803eb98,
1127 0x04bd000e, 1185 0xed9802ec,
1128/* 0x03cb: host_send_done */ 1186 0x00ee9801,
1129 0xf8ba0ef4, 1187 0x034221f5,
1130/* 0x03cd: host_recv */ 1188 0xc40110b6,
1131 0x4917f100, 1189 0x07f10f1e,
1132 0x5413f14e, 1190 0x04b604b0,
1133 0x06e1b852, 1191 0x000ed006,
1134/* 0x03db: host_recv_wait */ 1192 0x0ef404bd,
1135 0xf1aa0bf4, 1193/* 0x04ad: host_send_done */
1136 0xb604cc17, 1194/* 0x04af: host_recv */
1137 0x11cf0614, 1195 0xf100f8ba,
1138 0xc827f100, 1196 0xf14e4917,
1139 0x0624b604, 1197 0xb8525413,
1140 0xf00022cf, 1198 0x0bf406e1,
1141 0x12b80816, 1199/* 0x04bd: host_recv_wait */
1142 0xe60bf406, 1200 0xcc17f1aa,
1143 0xb60723c4, 1201 0x0614b604,
1144 0x30b70434, 1202 0xf10011cf,
1145 0x3b8002f0, 1203 0xb604c827,
1146 0x023c8003, 1204 0x22cf0624,
1147 0x80013d80, 1205 0x0816f000,
1148 0x20b6003e, 1206 0xf40612b8,
1149 0x0f24f001, 1207 0x23c4e60b,
1150 0x04c807f1, 1208 0x0434b607,
1209 0x02f030b7,
1210 0x80033b80,
1211 0x3d80023c,
1212 0x003e8001,
1213 0xf00120b6,
1214 0x07f10f24,
1215 0x04b604c8,
1216 0x0002d006,
1217 0x27f004bd,
1218 0x0007f040,
1151 0xd00604b6, 1219 0xd00604b6,
1152 0x04bd0002, 1220 0x04bd0002,
1153 0xf04027f0, 1221/* 0x0512: host_init */
1154 0x04b60007, 1222 0x17f100f8,
1155 0x0002d006, 1223 0x14b60080,
1156 0x00f804bd, 1224 0x7015f110,
1157/* 0x0430: host_init */ 1225 0xd007f102,
1158 0x008017f1,
1159 0xf11014b6,
1160 0xf1027015,
1161 0xb604d007,
1162 0x01d00604,
1163 0xf104bd00,
1164 0xb6008017,
1165 0x15f11014,
1166 0x07f102f0,
1167 0x04b604dc,
1168 0x0001d006,
1169 0x17f004bd,
1170 0xc407f101,
1171 0x0604b604, 1226 0x0604b604,
1172 0xbd0001d0, 1227 0xbd0001d0,
1173/* 0x046f: memx_func_enter */ 1228 0x8017f104,
1174 0xf000f804, 1229 0x1014b600,
1230 0x02f015f1,
1231 0x04dc07f1,
1232 0xd00604b6,
1233 0x04bd0001,
1234 0xf10117f0,
1235 0xb604c407,
1236 0x01d00604,
1237 0xf804bd00,
1238/* 0x0551: memx_func_enter */
1239 0x1087f100,
1240 0x028eb916,
1241 0xb90421f4,
1242 0x67f102d7,
1243 0x63f1fffc,
1244 0x76fdffff,
1245 0x0267f104,
1246 0x0576fd00,
1247 0x70f980f9,
1248 0xe0fcd0fc,
1249 0xf03f21f4,
1175 0x07f10467, 1250 0x07f10467,
1176 0x04b607e0, 1251 0x04b607e0,
1177 0x0006d006, 1252 0x0006d006,
1178/* 0x047e: memx_func_enter_wait */ 1253/* 0x058a: memx_func_enter_wait */
1179 0x67f104bd, 1254 0x67f104bd,
1180 0x64b607c0, 1255 0x64b607c0,
1181 0x0066cf06, 1256 0x0066cf06,
1182 0xf40464f0, 1257 0xf40464f0,
1183 0x1698f30b, 1258 0x67f0f30b,
1184 0x0410b600, 1259 0x0664b62c,
1185/* 0x0496: memx_func_leave */ 1260 0x800066cf,
1186 0x67f000f8, 1261 0x00f8ee06,
1187 0xe407f104, 1262/* 0x05a8: memx_func_leave */
1188 0x0604b607, 1263 0xb62c67f0,
1189 0xbd0006d0, 1264 0x66cf0664,
1190/* 0x04a5: memx_func_leave_wait */ 1265 0xef068000,
1191 0xc067f104, 1266 0xf10467f0,
1267 0xb607e407,
1268 0x06d00604,
1269/* 0x05c3: memx_func_leave_wait */
1270 0xf104bd00,
1271 0xb607c067,
1272 0x66cf0664,
1273 0x0464f000,
1274 0xf1f31bf4,
1275 0xb9161087,
1276 0x21f4028e,
1277 0x02d7b904,
1278 0xffcc67f1,
1279 0xffff63f1,
1280 0xf90476fd,
1281 0xfc70f980,
1282 0xf4e0fcd0,
1283 0x00f83f21,
1284/* 0x05f8: memx_func_wait_vblank */
1285 0xb0001698,
1286 0x0bf40066,
1287 0x0166b013,
1288 0xf4060bf4,
1289/* 0x060a: memx_func_wait_vblank_head1 */
1290 0x77f12e0e,
1291 0x0ef40020,
1292/* 0x0611: memx_func_wait_vblank_head0 */
1293 0x0877f107,
1294/* 0x0615: memx_func_wait_vblank_0 */
1295 0xc467f100,
1192 0x0664b607, 1296 0x0664b607,
1193 0xf00066cf, 1297 0xfd0066cf,
1194 0x1bf40464, 1298 0x1bf40467,
1195/* 0x04b7: memx_func_wr32 */ 1299/* 0x0625: memx_func_wait_vblank_1 */
1196 0x9800f8f3, 1300 0xc467f1f3,
1197 0x15980016, 1301 0x0664b607,
1198 0x0810b601, 1302 0xfd0066cf,
1199 0x50f960f9, 1303 0x0bf40467,
1200 0xe0fcd0fc, 1304/* 0x0635: memx_func_wait_vblank_fini */
1201 0xb63f21f4, 1305 0x0410b6f3,
1202 0x1bf40242, 1306/* 0x063a: memx_func_wr32 */
1203/* 0x04d3: memx_func_wait */ 1307 0x169800f8,
1204 0xf000f8e9, 1308 0x01159800,
1205 0x84b62c87, 1309 0xf90810b6,
1206 0x0088cf06, 1310 0xfc50f960,
1207 0x98001e98, 1311 0xf4e0fcd0,
1208 0x1c98011d, 1312 0x42b63f21,
1209 0x031b9802, 1313 0xe91bf402,
1210 0xf41010b6, 1314/* 0x0656: memx_func_wait */
1211 0x00f89c21, 1315 0x87f000f8,
1212/* 0x04f0: memx_func_delay */ 1316 0x0684b62c,
1213 0xb6001e98, 1317 0x980088cf,
1214 0x21f40410, 1318 0x1d98001e,
1215/* 0x04fb: memx_exec */ 1319 0x021c9801,
1216 0xf900f87f, 1320 0xb6031b98,
1217 0xb9d0f9e0, 1321 0x21f41010,
1218 0xb2b902c1, 1322/* 0x0673: memx_func_delay */
1219/* 0x0505: memx_exec_next */ 1323 0x9800f8a4,
1220 0x00139802, 1324 0x10b6001e,
1221 0x950410b6, 1325 0x7f21f404,
1222 0x30f01034, 1326/* 0x067e: memx_exec */
1223 0xde35980c, 1327 0xe0f900f8,
1224 0x12b855f9, 1328 0xc1b9d0f9,
1225 0xec1ef406, 1329 0x02b2b902,
1226 0xe0fcd0fc, 1330/* 0x0688: memx_exec_next */
1227 0x02b921f5, 1331 0xb6001398,
1228/* 0x0526: memx_info */ 1332 0x34e70410,
1229 0xc7f100f8, 1333 0x33e701f0,
1230 0xb7f103ac, 1334 0x32b601e0,
1231 0x21f50800, 1335 0x0c30f001,
1232 0x00f802b9, 1336 0xf9de3598,
1233/* 0x0534: memx_recv */ 1337 0x0612b855,
1234 0xf401d6b0, 1338 0x98e41ef4,
1235 0xd6b0c40b, 1339 0x0c98ee0b,
1236 0xe90bf400, 1340 0x02cbbbef,
1237/* 0x0542: memx_init */ 1341 0x07c4b7f1,
1238 0x00f800f8, 1342 0xcf06b4b6,
1239/* 0x0544: perf_recv */ 1343 0xd0fc00bb,
1240/* 0x0546: perf_init */ 1344 0x21f5e0fc,
1345 0x00f80342,
1346/* 0x06c4: memx_info */
1347 0x03c0c7f1,
1348 0x0800b7f1,
1349 0x034221f5,
1350/* 0x06d2: memx_recv */
1351 0xd6b000f8,
1352 0xa90bf401,
1353 0xf400d6b0,
1354 0x00f8e90b,
1355/* 0x06e0: memx_init */
1356/* 0x06e2: perf_recv */
1241 0x00f800f8, 1357 0x00f800f8,
1242/* 0x0548: i2c_drive_scl */ 1358/* 0x06e4: perf_init */
1243 0xf40036b0, 1359/* 0x06e6: i2c_drive_scl */
1244 0x07f1110b,
1245 0x04b607e0,
1246 0x0001d006,
1247 0x00f804bd,
1248/* 0x055c: i2c_drive_scl_lo */
1249 0x07e407f1,
1250 0xd00604b6,
1251 0x04bd0001,
1252/* 0x056a: i2c_drive_sda */
1253 0x36b000f8, 1360 0x36b000f8,
1254 0x110bf400, 1361 0x110bf400,
1255 0x07e007f1, 1362 0x07e007f1,
1256 0xd00604b6, 1363 0xd00604b6,
1257 0x04bd0002, 1364 0x04bd0001,
1258/* 0x057e: i2c_drive_sda_lo */ 1365/* 0x06fa: i2c_drive_scl_lo */
1259 0x07f100f8, 1366 0x07f100f8,
1260 0x04b607e4, 1367 0x04b607e4,
1368 0x0001d006,
1369 0x00f804bd,
1370/* 0x0708: i2c_drive_sda */
1371 0xf40036b0,
1372 0x07f1110b,
1373 0x04b607e0,
1261 0x0002d006, 1374 0x0002d006,
1262 0x00f804bd, 1375 0x00f804bd,
1263/* 0x058c: i2c_sense_scl */ 1376/* 0x071c: i2c_drive_sda_lo */
1264 0xf10132f4, 1377 0x07e407f1,
1265 0xb607c437, 1378 0xd00604b6,
1266 0x33cf0634, 1379 0x04bd0002,
1267 0x0431fd00, 1380/* 0x072a: i2c_sense_scl */
1268 0xf4060bf4, 1381 0x32f400f8,
1269/* 0x05a2: i2c_sense_scl_done */ 1382 0xc437f101,
1270 0x00f80131, 1383 0x0634b607,
1271/* 0x05a4: i2c_sense_sda */ 1384 0xfd0033cf,
1272 0xf10132f4, 1385 0x0bf40431,
1273 0xb607c437, 1386 0x0131f406,
1274 0x33cf0634, 1387/* 0x0740: i2c_sense_scl_done */
1275 0x0432fd00, 1388/* 0x0742: i2c_sense_sda */
1276 0xf4060bf4, 1389 0x32f400f8,
1277/* 0x05ba: i2c_sense_sda_done */ 1390 0xc437f101,
1278 0x00f80131, 1391 0x0634b607,
1279/* 0x05bc: i2c_raise_scl */ 1392 0xfd0033cf,
1280 0x47f140f9, 1393 0x0bf40432,
1281 0x37f00898, 1394 0x0131f406,
1282 0x4821f501, 1395/* 0x0758: i2c_sense_sda_done */
1283/* 0x05c9: i2c_raise_scl_wait */ 1396/* 0x075a: i2c_raise_scl */
1284 0xe8e7f105, 1397 0x40f900f8,
1285 0x7f21f403, 1398 0x089847f1,
1286 0x058c21f5, 1399 0xf50137f0,
1287 0xb60901f4, 1400/* 0x0767: i2c_raise_scl_wait */
1288 0x1bf40142, 1401 0xf106e621,
1289/* 0x05dd: i2c_raise_scl_done */ 1402 0xf403e8e7,
1290 0xf840fcef, 1403 0x21f57f21,
1291/* 0x05e1: i2c_start */ 1404 0x01f4072a,
1292 0x8c21f500, 1405 0x0142b609,
1293 0x0d11f405, 1406/* 0x077b: i2c_raise_scl_done */
1294 0x05a421f5, 1407 0xfcef1bf4,
1295 0xf40611f4, 1408/* 0x077f: i2c_start */
1296/* 0x05f2: i2c_start_rep */ 1409 0xf500f840,
1297 0x37f0300e, 1410 0xf4072a21,
1298 0x4821f500, 1411 0x21f50d11,
1299 0x0137f005, 1412 0x11f40742,
1300 0x056a21f5, 1413 0x300ef406,
1301 0xb60076bb, 1414/* 0x0790: i2c_start_rep */
1302 0x50f90465, 1415 0xf50037f0,
1303 0xbb046594, 1416 0xf006e621,
1304 0x50bd0256, 1417 0x21f50137,
1305 0xfc0475fd, 1418 0x76bb0708,
1306 0xbc21f550, 1419 0x0465b600,
1307 0x0464b605, 1420 0x659450f9,
1308/* 0x061f: i2c_start_send */ 1421 0x0256bb04,
1309 0xf01f11f4, 1422 0x75fd50bd,
1423 0xf550fc04,
1424 0xb6075a21,
1425 0x11f40464,
1426/* 0x07bd: i2c_start_send */
1427 0x0037f01f,
1428 0x070821f5,
1429 0x1388e7f1,
1430 0xf07f21f4,
1310 0x21f50037, 1431 0x21f50037,
1311 0xe7f1056a, 1432 0xe7f106e6,
1312 0x21f41388, 1433 0x21f41388,
1313 0x0037f07f, 1434/* 0x07d9: i2c_start_out */
1314 0x054821f5, 1435/* 0x07db: i2c_stop */
1315 0x1388e7f1, 1436 0xf000f87f,
1316/* 0x063b: i2c_start_out */ 1437 0x21f50037,
1317 0xf87f21f4, 1438 0x37f006e6,
1318/* 0x063d: i2c_stop */ 1439 0x0821f500,
1319 0x0037f000, 1440 0xe8e7f107,
1320 0x054821f5, 1441 0x7f21f403,
1321 0xf50037f0,
1322 0xf1056a21,
1323 0xf403e8e7,
1324 0x37f07f21,
1325 0x4821f501,
1326 0x88e7f105,
1327 0x7f21f413,
1328 0xf50137f0, 1442 0xf50137f0,
1329 0xf1056a21, 1443 0xf106e621,
1330 0xf41388e7, 1444 0xf41388e7,
1331 0x00f87f21, 1445 0x37f07f21,
1332/* 0x0670: i2c_bitw */ 1446 0x0821f501,
1333 0x056a21f5, 1447 0x88e7f107,
1334 0x03e8e7f1,
1335 0xbb7f21f4,
1336 0x65b60076,
1337 0x9450f904,
1338 0x56bb0465,
1339 0xfd50bd02,
1340 0x50fc0475,
1341 0x05bc21f5,
1342 0xf40464b6,
1343 0xe7f11811,
1344 0x21f41388,
1345 0x0037f07f,
1346 0x054821f5,
1347 0x1388e7f1,
1348/* 0x06af: i2c_bitw_out */
1349 0xf87f21f4,
1350/* 0x06b1: i2c_bitr */
1351 0x0137f000,
1352 0x056a21f5,
1353 0x03e8e7f1,
1354 0xbb7f21f4,
1355 0x65b60076,
1356 0x9450f904,
1357 0x56bb0465,
1358 0xfd50bd02,
1359 0x50fc0475,
1360 0x05bc21f5,
1361 0xf40464b6,
1362 0x21f51b11,
1363 0x37f005a4,
1364 0x4821f500,
1365 0x88e7f105,
1366 0x7f21f413, 1448 0x7f21f413,
1367 0xf4013cf0, 1449/* 0x080e: i2c_bitw */
1368/* 0x06f6: i2c_bitr_done */ 1450 0x21f500f8,
1369 0x00f80131, 1451 0xe7f10708,
1370/* 0x06f8: i2c_get_byte */ 1452 0x21f403e8,
1371 0xf00057f0, 1453 0x0076bb7f,
1372/* 0x06fe: i2c_get_byte_next */
1373 0x54b60847,
1374 0x0076bb01,
1375 0xf90465b6, 1454 0xf90465b6,
1376 0x04659450, 1455 0x04659450,
1377 0xbd0256bb, 1456 0xbd0256bb,
1378 0x0475fd50, 1457 0x0475fd50,
1379 0x21f550fc, 1458 0x21f550fc,
1380 0x64b606b1, 1459 0x64b6075a,
1381 0x2b11f404, 1460 0x1811f404,
1382 0xb60553fd, 1461 0x1388e7f1,
1383 0x1bf40142, 1462 0xf07f21f4,
1384 0x0137f0d8, 1463 0x21f50037,
1385 0xb60076bb, 1464 0xe7f106e6,
1386 0x50f90465, 1465 0x21f41388,
1387 0xbb046594, 1466/* 0x084d: i2c_bitw_out */
1388 0x50bd0256, 1467/* 0x084f: i2c_bitr */
1389 0xfc0475fd, 1468 0xf000f87f,
1390 0x7021f550, 1469 0x21f50137,
1391 0x0464b606, 1470 0xe7f10708,
1392/* 0x0748: i2c_get_byte_done */ 1471 0x21f403e8,
1393/* 0x074a: i2c_put_byte */ 1472 0x0076bb7f,
1394 0x47f000f8, 1473 0xf90465b6,
1395/* 0x074d: i2c_put_byte_next */ 1474 0x04659450,
1396 0x0142b608, 1475 0xbd0256bb,
1397 0xbb3854ff, 1476 0x0475fd50,
1477 0x21f550fc,
1478 0x64b6075a,
1479 0x1b11f404,
1480 0x074221f5,
1481 0xf50037f0,
1482 0xf106e621,
1483 0xf41388e7,
1484 0x3cf07f21,
1485 0x0131f401,
1486/* 0x0894: i2c_bitr_done */
1487/* 0x0896: i2c_get_byte */
1488 0x57f000f8,
1489 0x0847f000,
1490/* 0x089c: i2c_get_byte_next */
1491 0xbb0154b6,
1398 0x65b60076, 1492 0x65b60076,
1399 0x9450f904, 1493 0x9450f904,
1400 0x56bb0465, 1494 0x56bb0465,
1401 0xfd50bd02, 1495 0xfd50bd02,
1402 0x50fc0475, 1496 0x50fc0475,
1403 0x067021f5, 1497 0x084f21f5,
1404 0xf40464b6, 1498 0xf40464b6,
1405 0x46b03411, 1499 0x53fd2b11,
1406 0xd81bf400, 1500 0x0142b605,
1407 0xb60076bb, 1501 0xf0d81bf4,
1408 0x50f90465, 1502 0x76bb0137,
1409 0xbb046594, 1503 0x0465b600,
1410 0x50bd0256, 1504 0x659450f9,
1411 0xfc0475fd, 1505 0x0256bb04,
1412 0xb121f550, 1506 0x75fd50bd,
1413 0x0464b606, 1507 0xf550fc04,
1414 0xbb0f11f4, 1508 0xb6080e21,
1415 0x36b00076, 1509/* 0x08e6: i2c_get_byte_done */
1416 0x061bf401, 1510 0x00f80464,
1417/* 0x07a3: i2c_put_byte_done */ 1511/* 0x08e8: i2c_put_byte */
1418 0xf80132f4, 1512/* 0x08eb: i2c_put_byte_next */
1419/* 0x07a5: i2c_addr */ 1513 0xb60847f0,
1420 0x0076bb00, 1514 0x54ff0142,
1515 0x0076bb38,
1421 0xf90465b6, 1516 0xf90465b6,
1422 0x04659450, 1517 0x04659450,
1423 0xbd0256bb, 1518 0xbd0256bb,
1424 0x0475fd50, 1519 0x0475fd50,
1425 0x21f550fc, 1520 0x21f550fc,
1426 0x64b605e1, 1521 0x64b6080e,
1427 0x2911f404, 1522 0x3411f404,
1428 0x012ec3e7, 1523 0xf40046b0,
1429 0xfd0134b6, 1524 0x76bbd81b,
1430 0x76bb0553,
1431 0x0465b600, 1525 0x0465b600,
1432 0x659450f9, 1526 0x659450f9,
1433 0x0256bb04, 1527 0x0256bb04,
1434 0x75fd50bd, 1528 0x75fd50bd,
1435 0xf550fc04, 1529 0xf550fc04,
1436 0xb6074a21, 1530 0xb6084f21,
1437/* 0x07ea: i2c_addr_done */ 1531 0x11f40464,
1438 0x00f80464, 1532 0x0076bb0f,
1439/* 0x07ec: i2c_acquire_addr */ 1533 0xf40136b0,
1440 0xb6f8cec7, 1534 0x32f4061b,
1441 0xe0b702e4, 1535/* 0x0941: i2c_put_byte_done */
1442 0xee980bfc, 1536/* 0x0943: i2c_addr */
1443/* 0x07fb: i2c_acquire */ 1537 0xbb00f801,
1444 0xf500f800,
1445 0xf407ec21,
1446 0xd9f00421,
1447 0x3f21f403,
1448/* 0x080a: i2c_release */
1449 0x21f500f8,
1450 0x21f407ec,
1451 0x03daf004,
1452 0xf83f21f4,
1453/* 0x0819: i2c_recv */
1454 0x0132f400,
1455 0xb6f8c1c7,
1456 0x16b00214,
1457 0x3a1ff528,
1458 0xd413a001,
1459 0x0032980b,
1460 0x0bac13a0,
1461 0xf4003198,
1462 0xd0f90231,
1463 0xd0f9e0f9,
1464 0x000067f1,
1465 0x100063f1,
1466 0xbb016792,
1467 0x65b60076, 1538 0x65b60076,
1468 0x9450f904, 1539 0x9450f904,
1469 0x56bb0465, 1540 0x56bb0465,
1470 0xfd50bd02, 1541 0xfd50bd02,
1471 0x50fc0475, 1542 0x50fc0475,
1472 0x07fb21f5, 1543 0x077f21f5,
1473 0xfc0464b6, 1544 0xf40464b6,
1474 0x00d6b0d0, 1545 0xc3e72911,
1475 0x00b31bf5, 1546 0x34b6012e,
1476 0xbb0057f0, 1547 0x0553fd01,
1477 0x65b60076, 1548 0xb60076bb,
1478 0x9450f904, 1549 0x50f90465,
1479 0x56bb0465, 1550 0xbb046594,
1480 0xfd50bd02, 1551 0x50bd0256,
1481 0x50fc0475, 1552 0xfc0475fd,
1482 0x07a521f5, 1553 0xe821f550,
1483 0xf50464b6, 1554 0x0464b608,
1484 0xc700d011, 1555/* 0x0988: i2c_addr_done */
1485 0x76bbe0c5, 1556/* 0x098a: i2c_acquire_addr */
1486 0x0465b600, 1557 0xcec700f8,
1487 0x659450f9, 1558 0x02e4b6f8,
1488 0x0256bb04, 1559 0x0c10e0b7,
1489 0x75fd50bd, 1560 0xf800ee98,
1490 0xf550fc04, 1561/* 0x0999: i2c_acquire */
1491 0xb6074a21, 1562 0x8a21f500,
1492 0x11f50464, 1563 0x0421f409,
1493 0x57f000ad, 1564 0xf403d9f0,
1565 0x00f83f21,
1566/* 0x09a8: i2c_release */
1567 0x098a21f5,
1568 0xf00421f4,
1569 0x21f403da,
1570/* 0x09b7: i2c_recv */
1571 0xf400f83f,
1572 0xc1c70132,
1573 0x0214b6f8,
1574 0xf52816b0,
1575 0xa0013a1f,
1576 0x980be813,
1577 0x13a00032,
1578 0x31980bc0,
1579 0x0231f400,
1580 0xe0f9d0f9,
1581 0x67f1d0f9,
1582 0x63f10000,
1583 0x67921000,
1494 0x0076bb01, 1584 0x0076bb01,
1495 0xf90465b6, 1585 0xf90465b6,
1496 0x04659450, 1586 0x04659450,
1497 0xbd0256bb, 1587 0xbd0256bb,
1498 0x0475fd50, 1588 0x0475fd50,
1499 0x21f550fc, 1589 0x21f550fc,
1500 0x64b607a5, 1590 0x64b60999,
1501 0x8a11f504, 1591 0xb0d0fc04,
1592 0x1bf500d6,
1593 0x57f000b3,
1502 0x0076bb00, 1594 0x0076bb00,
1503 0xf90465b6, 1595 0xf90465b6,
1504 0x04659450, 1596 0x04659450,
1505 0xbd0256bb, 1597 0xbd0256bb,
1506 0x0475fd50, 1598 0x0475fd50,
1507 0x21f550fc, 1599 0x21f550fc,
1508 0x64b606f8, 1600 0x64b60943,
1509 0x6a11f404, 1601 0xd011f504,
1510 0xbbe05bcb, 1602 0xe0c5c700,
1603 0xb60076bb,
1604 0x50f90465,
1605 0xbb046594,
1606 0x50bd0256,
1607 0xfc0475fd,
1608 0xe821f550,
1609 0x0464b608,
1610 0x00ad11f5,
1611 0xbb0157f0,
1511 0x65b60076, 1612 0x65b60076,
1512 0x9450f904, 1613 0x9450f904,
1513 0x56bb0465, 1614 0x56bb0465,
1514 0xfd50bd02, 1615 0xfd50bd02,
1515 0x50fc0475, 1616 0x50fc0475,
1516 0x063d21f5, 1617 0x094321f5,
1517 0xb90464b6, 1618 0xf50464b6,
1518 0x74bd025b, 1619 0xbb008a11,
1519/* 0x091f: i2c_recv_not_rd08 */ 1620 0x65b60076,
1520 0xb0430ef4, 1621 0x9450f904,
1521 0x1bf401d6, 1622 0x56bb0465,
1522 0x0057f03d, 1623 0xfd50bd02,
1523 0x07a521f5, 1624 0x50fc0475,
1524 0xc73311f4, 1625 0x089621f5,
1525 0x21f5e0c5, 1626 0xf40464b6,
1526 0x11f4074a, 1627 0x5bcb6a11,
1527 0x0057f029, 1628 0x0076bbe0,
1528 0x07a521f5, 1629 0xf90465b6,
1529 0xc71f11f4, 1630 0x04659450,
1530 0x21f5e0b5, 1631 0xbd0256bb,
1531 0x11f4074a, 1632 0x0475fd50,
1532 0x3d21f515, 1633 0x21f550fc,
1533 0xc774bd06, 1634 0x64b607db,
1534 0x1bf408c5, 1635 0x025bb904,
1535 0x0232f409, 1636 0x0ef474bd,
1536/* 0x095f: i2c_recv_not_wr08 */ 1637/* 0x0abd: i2c_recv_not_rd08 */
1537/* 0x095f: i2c_recv_done */ 1638 0x01d6b043,
1538 0xc7030ef4, 1639 0xf03d1bf4,
1539 0x21f5f8ce, 1640 0x21f50057,
1540 0xe0fc080a, 1641 0x11f40943,
1541 0x12f4d0fc, 1642 0xe0c5c733,
1542 0x027cb90a, 1643 0x08e821f5,
1543 0x02b921f5, 1644 0xf02911f4,
1544/* 0x0974: i2c_recv_exit */ 1645 0x21f50057,
1545/* 0x0976: i2c_init */ 1646 0x11f40943,
1546 0x00f800f8, 1647 0xe0b5c71f,
1547/* 0x0978: test_recv */ 1648 0x08e821f5,
1548 0x05d817f1, 1649 0xf51511f4,
1650 0xbd07db21,
1651 0x08c5c774,
1652 0xf4091bf4,
1653 0x0ef40232,
1654/* 0x0afd: i2c_recv_not_wr08 */
1655/* 0x0afd: i2c_recv_done */
1656 0xf8cec703,
1657 0x09a821f5,
1658 0xd0fce0fc,
1659 0xb90a12f4,
1660 0x21f5027c,
1661/* 0x0b12: i2c_recv_exit */
1662 0x00f80342,
1663/* 0x0b14: i2c_init */
1664/* 0x0b16: test_recv */
1665 0x17f100f8,
1666 0x14b605d8,
1667 0x0011cf06,
1668 0xf10110b6,
1669 0xb605d807,
1670 0x01d00604,
1671 0xf104bd00,
1672 0xf1d900e7,
1673 0xf5134fe3,
1674 0xf8026221,
1675/* 0x0b3d: test_init */
1676 0x00e7f100,
1677 0x6221f508,
1678/* 0x0b47: idle_recv */
1679 0xf800f802,
1680/* 0x0b49: idle */
1681 0x0031f400,
1682 0x05d417f1,
1549 0xcf0614b6, 1683 0xcf0614b6,
1550 0x10b60011, 1684 0x10b60011,
1551 0xd807f101, 1685 0xd407f101,
1552 0x0604b605, 1686 0x0604b605,
1553 0xbd0001d0, 1687 0xbd0001d0,
1554 0x00e7f104, 1688/* 0x0b65: idle_loop */
1555 0x4fe3f1d9, 1689 0x5817f004,
1556 0xf521f513, 1690/* 0x0b6b: idle_proc */
1557/* 0x099f: test_init */ 1691/* 0x0b6b: idle_proc_exec */
1558 0xf100f801, 1692 0xf90232f4,
1559 0xf50800e7, 1693 0x021eb910,
1560 0xf801f521, 1694 0x034b21f5,
1561/* 0x09a9: idle_recv */ 1695 0x11f410fc,
1562/* 0x09ab: idle */ 1696 0x0231f409,
1563 0xf400f800, 1697/* 0x0b7f: idle_proc_next */
1564 0x17f10031, 1698 0xb6ef0ef4,
1565 0x14b605d4, 1699 0x1fb85810,
1566 0x0011cf06, 1700 0xe61bf406,
1567 0xf10110b6, 1701 0xf4dd02f4,
1568 0xb605d407, 1702 0x0ef40028,
1569 0x01d00604, 1703 0x000000bb,
1570/* 0x09c7: idle_loop */ 1704 0x00000000,
1571 0xf004bd00, 1705 0x00000000,
1572 0x32f45817, 1706 0x00000000,
1573/* 0x09cd: idle_proc */ 1707 0x00000000,
1574/* 0x09cd: idle_proc_exec */ 1708 0x00000000,
1575 0xb910f902, 1709 0x00000000,
1576 0x21f5021e, 1710 0x00000000,
1577 0x10fc02c2, 1711 0x00000000,
1578 0xf40911f4, 1712 0x00000000,
1579 0x0ef40231, 1713 0x00000000,
1580/* 0x09e1: idle_proc_next */ 1714 0x00000000,
1581 0x5810b6ef, 1715 0x00000000,
1582 0xf4061fb8, 1716 0x00000000,
1583 0x02f4e61b, 1717 0x00000000,
1584 0x0028f4dd, 1718 0x00000000,
1585 0x00bb0ef4, 1719 0x00000000,
1720 0x00000000,
1721 0x00000000,
1722 0x00000000,
1723 0x00000000,
1724 0x00000000,
1725 0x00000000,
1726 0x00000000,
1727 0x00000000,
1586 0x00000000, 1728 0x00000000,
1587 0x00000000, 1729 0x00000000,
1588 0x00000000, 1730 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
index 48f79434a449..21bf8cc7618f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#define NVKM_PPWR_CHIPSET GF100 25#define NVKM_PPWR_CHIPSET GF100
26#define HW_TICKS_PER_US 203 // should be 202.5
26 27
27//#define NVKM_FALCON_PC24 28//#define NVKM_FALCON_PC24
28//#define NVKM_FALCON_UNSHIFTED_IO 29//#define NVKM_FALCON_UNSHIFTED_IO
@@ -34,6 +35,7 @@
34.section #nvc0_pwr_data 35.section #nvc0_pwr_data
35#define INCLUDE_PROC 36#define INCLUDE_PROC
36#include "kernel.fuc" 37#include "kernel.fuc"
38#include "arith.fuc"
37#include "host.fuc" 39#include "host.fuc"
38#include "memx.fuc" 40#include "memx.fuc"
39#include "perf.fuc" 41#include "perf.fuc"
@@ -44,6 +46,7 @@
44 46
45#define INCLUDE_DATA 47#define INCLUDE_DATA
46#include "kernel.fuc" 48#include "kernel.fuc"
49#include "arith.fuc"
47#include "host.fuc" 50#include "host.fuc"
48#include "memx.fuc" 51#include "memx.fuc"
49#include "perf.fuc" 52#include "perf.fuc"
@@ -56,6 +59,7 @@
56.section #nvc0_pwr_code 59.section #nvc0_pwr_code
57#define INCLUDE_CODE 60#define INCLUDE_CODE
58#include "kernel.fuc" 61#include "kernel.fuc"
62#include "arith.fuc"
59#include "host.fuc" 63#include "host.fuc"
60#include "memx.fuc" 64#include "memx.fuc"
61#include "perf.fuc" 65#include "perf.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
index 0773ff0e3dc3..ca30fa4011b5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -24,8 +24,8 @@ uint32_t nvc0_pwr_data[] = {
24 0x00000000, 24 0x00000000,
25/* 0x0058: proc_list_head */ 25/* 0x0058: proc_list_head */
26 0x54534f48, 26 0x54534f48,
27 0x00000430, 27 0x00000512,
28 0x000003cd, 28 0x000004af,
29 0x00000000, 29 0x00000000,
30 0x00000000, 30 0x00000000,
31 0x00000000, 31 0x00000000,
@@ -46,8 +46,8 @@ uint32_t nvc0_pwr_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x00000542, 49 0x0000074b,
50 0x00000534, 50 0x0000073d,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvc0_pwr_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x00000546, 71 0x0000074f,
72 0x00000544, 72 0x0000074d,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvc0_pwr_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x00000976, 93 0x00000b7f,
94 0x00000819, 94 0x00000a22,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvc0_pwr_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x0000099f, 115 0x00000ba8,
116 0x00000978, 116 0x00000b81,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvc0_pwr_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x000009ab, 137 0x00000bb4,
138 0x000009a9, 138 0x00000bb2,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -227,25 +227,31 @@ uint32_t nvc0_pwr_data[] = {
227 0x00000000, 227 0x00000000,
228 0x00000000, 228 0x00000000,
229/* 0x0370: memx_func_head */ 229/* 0x0370: memx_func_head */
230 0x00010000,
231 0x00000000,
232 0x0000046f,
233/* 0x037c: memx_func_next */
234 0x00000001, 230 0x00000001,
235 0x00000000, 231 0x00000000,
236 0x00000496, 232 0x00000551,
233/* 0x037c: memx_func_next */
237 0x00000002, 234 0x00000002,
235 0x00000000,
236 0x000005db,
237 0x00000003,
238 0x00000002, 238 0x00000002,
239 0x000004b7, 239 0x000006a5,
240 0x00040003, 240 0x00040004,
241 0x00000000,
242 0x000006c1,
243 0x00010005,
244 0x00000000,
245 0x000006de,
246 0x00010006,
241 0x00000000, 247 0x00000000,
242 0x000004d3, 248 0x00000663,
243 0x00010004, 249/* 0x03b8: memx_func_tail */
250/* 0x03b8: memx_ts_start */
244 0x00000000, 251 0x00000000,
245 0x000004f0, 252/* 0x03bc: memx_ts_end */
246/* 0x03ac: memx_func_tail */
247/* 0x03ac: memx_data_head */
248 0x00000000, 253 0x00000000,
254/* 0x03c0: memx_data_head */
249 0x00000000, 255 0x00000000,
250 0x00000000, 256 0x00000000,
251 0x00000000, 257 0x00000000,
@@ -757,8 +763,9 @@ uint32_t nvc0_pwr_data[] = {
757 0x00000000, 763 0x00000000,
758 0x00000000, 764 0x00000000,
759 0x00000000, 765 0x00000000,
760/* 0x0bac: memx_data_tail */ 766 0x00000000,
761/* 0x0bac: i2c_scl_map */ 767/* 0x0bc0: memx_data_tail */
768/* 0x0bc0: i2c_scl_map */
762 0x00001000, 769 0x00001000,
763 0x00004000, 770 0x00004000,
764 0x00010000, 771 0x00010000,
@@ -769,7 +776,7 @@ uint32_t nvc0_pwr_data[] = {
769 0x01000000, 776 0x01000000,
770 0x04000000, 777 0x04000000,
771 0x10000000, 778 0x10000000,
772/* 0x0bd4: i2c_sda_map */ 779/* 0x0be8: i2c_sda_map */
773 0x00002000, 780 0x00002000,
774 0x00008000, 781 0x00008000,
775 0x00020000, 782 0x00020000,
@@ -780,7 +787,7 @@ uint32_t nvc0_pwr_data[] = {
780 0x02000000, 787 0x02000000,
781 0x08000000, 788 0x08000000,
782 0x20000000, 789 0x20000000,
783/* 0x0bfc: i2c_ctrl */ 790/* 0x0c10: i2c_ctrl */
784 0x0000e138, 791 0x0000e138,
785 0x0000e150, 792 0x0000e150,
786 0x0000e168, 793 0x0000e168,
@@ -841,15 +848,10 @@ uint32_t nvc0_pwr_data[] = {
841 0x00000000, 848 0x00000000,
842 0x00000000, 849 0x00000000,
843 0x00000000, 850 0x00000000,
844 0x00000000,
845 0x00000000,
846 0x00000000,
847 0x00000000,
848 0x00000000,
849}; 851};
850 852
851uint32_t nvc0_pwr_code[] = { 853uint32_t nvc0_pwr_code[] = {
852 0x030d0ef5, 854 0x039e0ef5,
853/* 0x0004: rd32 */ 855/* 0x0004: rd32 */
854 0x07a007f1, 856 0x07a007f1,
855 0xd00604b6, 857 0xd00604b6,
@@ -885,19 +887,22 @@ uint32_t nvc0_pwr_code[] = {
885 0xd4f100dd, 887 0xd4f100dd,
886 0x1bf47000, 888 0x1bf47000,
887/* 0x007f: nsec */ 889/* 0x007f: nsec */
888 0xf000f8f2, 890 0xf900f8f2,
891 0xf080f990,
889 0x84b62c87, 892 0x84b62c87,
890 0x0088cf06, 893 0x0088cf06,
891/* 0x0088: nsec_loop */ 894/* 0x008c: nsec_loop */
892 0xb62c97f0, 895 0xb62c97f0,
893 0x99cf0694, 896 0x99cf0694,
894 0x0298bb00, 897 0x0298bb00,
895 0xf4069eb8, 898 0xf4069eb8,
896 0x00f8f11e, 899 0x80fcf11e,
897/* 0x009c: wait */ 900 0x00f890fc,
901/* 0x00a4: wait */
902 0x80f990f9,
898 0xb62c87f0, 903 0xb62c87f0,
899 0x88cf0684, 904 0x88cf0684,
900/* 0x00a5: wait_loop */ 905/* 0x00b1: wait_loop */
901 0x02eeb900, 906 0x02eeb900,
902 0xb90421f4, 907 0xb90421f4,
903 0xadfd02da, 908 0xadfd02da,
@@ -907,28 +912,29 @@ uint32_t nvc0_pwr_code[] = {
907 0x0099cf06, 912 0x0099cf06,
908 0xb80298bb, 913 0xb80298bb,
909 0x1ef4069b, 914 0x1ef4069b,
910/* 0x00c9: wait_done */ 915/* 0x00d5: wait_done */
911/* 0x00cb: intr_watchdog */ 916 0xfc80fcdf,
912 0x9800f8df, 917/* 0x00db: intr_watchdog */
918 0x9800f890,
913 0x96b003e9, 919 0x96b003e9,
914 0x2a0bf400, 920 0x2a0bf400,
915 0xbb9a0a98, 921 0xbb9a0a98,
916 0x1cf4029a, 922 0x1cf4029a,
917 0x01d7f00f, 923 0x01d7f00f,
918 0x025421f5, 924 0x02dd21f5,
919 0x0ef494bd, 925 0x0ef494bd,
920/* 0x00e9: intr_watchdog_next_time */ 926/* 0x00f9: intr_watchdog_next_time */
921 0x9b0a9815, 927 0x9b0a9815,
922 0xf400a6b0, 928 0xf400a6b0,
923 0x9ab8090b, 929 0x9ab8090b,
924 0x061cf406, 930 0x061cf406,
925/* 0x00f8: intr_watchdog_next_time_set */ 931/* 0x0108: intr_watchdog_next_time_set */
926/* 0x00fb: intr_watchdog_next_proc */ 932/* 0x010b: intr_watchdog_next_proc */
927 0x809b0980, 933 0x809b0980,
928 0xe0b603e9, 934 0xe0b603e9,
929 0x68e6b158, 935 0x68e6b158,
930 0xc61bf402, 936 0xc61bf402,
931/* 0x010a: intr */ 937/* 0x011a: intr */
932 0x00f900f8, 938 0x00f900f8,
933 0x80f904bd, 939 0x80f904bd,
934 0xa0f990f9, 940 0xa0f990f9,
@@ -948,13 +954,13 @@ uint32_t nvc0_pwr_code[] = {
948 0xf40289c4, 954 0xf40289c4,
949 0x0080230b, 955 0x0080230b,
950 0x58e7f09b, 956 0x58e7f09b,
951 0x98cb21f4, 957 0x98db21f4,
952 0x96b09b09, 958 0x96b09b09,
953 0x110bf400, 959 0x110bf400,
954 0xb63407f0, 960 0xb63407f0,
955 0x09d00604, 961 0x09d00604,
956 0x8004bd00, 962 0x8004bd00,
957/* 0x016e: intr_skip_watchdog */ 963/* 0x017e: intr_skip_watchdog */
958 0x89e49a09, 964 0x89e49a09,
959 0x0bf40800, 965 0x0bf40800,
960 0x8897f148, 966 0x8897f148,
@@ -967,22 +973,22 @@ uint32_t nvc0_pwr_code[] = {
967 0x48e7f1c0, 973 0x48e7f1c0,
968 0x53e3f14f, 974 0x53e3f14f,
969 0x00d7f054, 975 0x00d7f054,
970 0x02b921f5, 976 0x034221f5,
971 0x07f1c0fc, 977 0x07f1c0fc,
972 0x04b604c0, 978 0x04b604c0,
973 0x000cd006, 979 0x000cd006,
974/* 0x01ae: intr_subintr_skip_fifo */ 980/* 0x01be: intr_subintr_skip_fifo */
975 0x07f104bd, 981 0x07f104bd,
976 0x04b60688, 982 0x04b60688,
977 0x0009d006, 983 0x0009d006,
978/* 0x01ba: intr_skip_subintr */ 984/* 0x01ca: intr_skip_subintr */
979 0x89c404bd, 985 0x89c404bd,
980 0x070bf420, 986 0x070bf420,
981 0xffbfa4f1, 987 0xffbfa4f1,
982/* 0x01c4: intr_skip_pause */ 988/* 0x01d4: intr_skip_pause */
983 0xf44089c4, 989 0xf44089c4,
984 0xa4f1070b, 990 0xa4f1070b,
985/* 0x01ce: intr_skip_user0 */ 991/* 0x01de: intr_skip_user0 */
986 0x07f0ffbf, 992 0x07f0ffbf,
987 0x0604b604, 993 0x0604b604,
988 0xbd0008d0, 994 0xbd0008d0,
@@ -993,597 +999,733 @@ uint32_t nvc0_pwr_code[] = {
993 0x90fca0fc, 999 0x90fca0fc,
994 0x00fc80fc, 1000 0x00fc80fc,
995 0xf80032f4, 1001 0xf80032f4,
996/* 0x01f5: timer */ 1002/* 0x0205: ticks_from_ns */
997 0x1032f401, 1003 0xf9c0f901,
998 0xb003f898, 1004 0xcbd7f1b0,
999 0x1cf40086, 1005 0x00d3f000,
1000 0x03fe8051, 1006 0x041321f5,
1007 0x03e8ccec,
1008 0xf400b4b0,
1009 0xeeec120b,
1010 0xd7f103e8,
1011 0xd3f000cb,
1012 0x1321f500,
1013/* 0x022d: ticks_from_ns_quit */
1014 0x02ceb904,
1015 0xc0fcb0fc,
1016/* 0x0236: ticks_from_us */
1017 0xc0f900f8,
1018 0xd7f1b0f9,
1019 0xd3f000cb,
1020 0x1321f500,
1021 0x02ceb904,
1022 0xf400b4b0,
1023 0xe4bd050b,
1024/* 0x0250: ticks_from_us_quit */
1025 0xc0fcb0fc,
1026/* 0x0256: ticks_to_us */
1027 0xd7f100f8,
1028 0xd3f000cb,
1029 0xecedff00,
1030/* 0x0262: timer */
1031 0x90f900f8,
1032 0x32f480f9,
1033 0x03f89810,
1034 0xf40086b0,
1035 0x84bd651c,
1001 0xb63807f0, 1036 0xb63807f0,
1002 0x08d00604, 1037 0x08d00604,
1003 0xf004bd00, 1038 0xf004bd00,
1004 0x84b60887, 1039 0x84b63487,
1005 0x0088cf06, 1040 0x0088cf06,
1006 0xf40284f0, 1041 0xbb9a0998,
1007 0x87f0261b, 1042 0xe9bb0298,
1008 0x0684b634, 1043 0x03fe8000,
1009 0xb80088cf, 1044 0xb60887f0,
1010 0x0bf406e0, 1045 0x88cf0684,
1011 0x06e8b809, 1046 0x0284f000,
1012/* 0x0233: timer_reset */ 1047 0xf0261bf4,
1013 0xf01f1ef4, 1048 0x84b63487,
1014 0x04b63407, 1049 0x0088cf06,
1015 0x000ed006, 1050 0xf406e0b8,
1016 0x0e8004bd, 1051 0xe8b8090b,
1017/* 0x0241: timer_enable */ 1052 0x111cf406,
1018 0x0187f09a, 1053/* 0x02b8: timer_reset */
1054 0xb63407f0,
1055 0x0ed00604,
1056 0x8004bd00,
1057/* 0x02c6: timer_enable */
1058 0x87f09a0e,
1059 0x3807f001,
1060 0xd00604b6,
1061 0x04bd0008,
1062/* 0x02d4: timer_done */
1063 0xfc1031f4,
1064 0xf890fc80,
1065/* 0x02dd: send_proc */
1066 0xf980f900,
1067 0x05e89890,
1068 0xf004e998,
1069 0x89b80486,
1070 0x2a0bf406,
1071 0x940398c4,
1072 0x80b60488,
1073 0x008ebb18,
1074 0x8000fa98,
1075 0x8d80008a,
1076 0x028c8001,
1077 0xb6038b80,
1078 0x94f00190,
1079 0x04e98007,
1080/* 0x0317: send_done */
1081 0xfc0231f4,
1082 0xf880fc90,
1083/* 0x031d: find */
1084 0xf080f900,
1085 0x31f45887,
1086/* 0x0325: find_loop */
1087 0x008a9801,
1088 0xf406aeb8,
1089 0x80b6100b,
1090 0x6886b158,
1091 0xf01bf402,
1092/* 0x033b: find_done */
1093 0xb90132f4,
1094 0x80fc028e,
1095/* 0x0342: send */
1096 0x21f500f8,
1097 0x01f4031d,
1098/* 0x034b: recv */
1099 0xf900f897,
1100 0x9880f990,
1101 0xe99805e8,
1102 0x0132f404,
1103 0xf40689b8,
1104 0x89c43d0b,
1105 0x0180b603,
1106 0x800784f0,
1107 0xea9805e8,
1108 0xfef0f902,
1109 0xf0f9018f,
1110 0x9402efb9,
1111 0xe9bb0499,
1112 0x18e0b600,
1113 0x9803eb98,
1114 0xed9802ec,
1115 0x00ee9801,
1116 0xf0fca5f9,
1117 0xf400f8fe,
1118 0xf0fc0131,
1119/* 0x0398: recv_done */
1120 0x90fc80fc,
1121/* 0x039e: init */
1122 0x17f100f8,
1123 0x14b60108,
1124 0x0011cf06,
1125 0x010911e7,
1126 0xfe0814b6,
1127 0x17f10014,
1128 0x13f000e0,
1129 0x1c07f000,
1130 0xd00604b6,
1131 0x04bd0001,
1132 0xf0ff17f0,
1133 0x04b61407,
1134 0x0001d006,
1135 0x17f004bd,
1136 0x0015f102,
1137 0x1007f008,
1138 0xd00604b6,
1139 0x04bd0001,
1140 0x011a17f1,
1141 0xfe0013f0,
1142 0x31f40010,
1143 0x0117f010,
1019 0xb63807f0, 1144 0xb63807f0,
1020 0x08d00604,
1021/* 0x024f: timer_done */
1022 0xf404bd00,
1023 0x00f81031,
1024/* 0x0254: send_proc */
1025 0x90f980f9,
1026 0x9805e898,
1027 0x86f004e9,
1028 0x0689b804,
1029 0xc42a0bf4,
1030 0x88940398,
1031 0x1880b604,
1032 0x98008ebb,
1033 0x8a8000fa,
1034 0x018d8000,
1035 0x80028c80,
1036 0x90b6038b,
1037 0x0794f001,
1038 0xf404e980,
1039/* 0x028e: send_done */
1040 0x90fc0231,
1041 0x00f880fc,
1042/* 0x0294: find */
1043 0x87f080f9,
1044 0x0131f458,
1045/* 0x029c: find_loop */
1046 0xb8008a98,
1047 0x0bf406ae,
1048 0x5880b610,
1049 0x026886b1,
1050 0xf4f01bf4,
1051/* 0x02b2: find_done */
1052 0x8eb90132,
1053 0xf880fc02,
1054/* 0x02b9: send */
1055 0x9421f500,
1056 0x9701f402,
1057/* 0x02c2: recv */
1058 0xe89800f8,
1059 0x04e99805,
1060 0xb80132f4,
1061 0x0bf40689,
1062 0x0389c43d,
1063 0xf00180b6,
1064 0xe8800784,
1065 0x02ea9805,
1066 0x8ffef0f9,
1067 0xb9f0f901,
1068 0x999402ef,
1069 0x00e9bb04,
1070 0x9818e0b6,
1071 0xec9803eb,
1072 0x01ed9802,
1073 0xf900ee98,
1074 0xfef0fca5,
1075 0x31f400f8,
1076/* 0x030b: recv_done */
1077 0xf8f0fc01,
1078/* 0x030d: init */
1079 0x0817f100,
1080 0x0614b601,
1081 0xe70011cf,
1082 0xb6010911,
1083 0x14fe0814,
1084 0xe017f100,
1085 0x0013f000,
1086 0xb61c07f0,
1087 0x01d00604, 1145 0x01d00604,
1088 0xf004bd00, 1146 0xf004bd00,
1089 0x07f0ff17, 1147/* 0x0402: init_proc */
1090 0x0604b614, 1148 0xf19858f7,
1091 0xbd0001d0, 1149 0x0016b001,
1092 0x0217f004, 1150 0xf9fa0bf4,
1093 0x080015f1, 1151 0x58f0b615,
1094 0xb61007f0, 1152/* 0x0413: mulu32_32_64 */
1095 0x01d00604, 1153 0xf9f20ef4,
1096 0xf104bd00, 1154 0xf920f910,
1097 0xf0010a17, 1155 0x9540f930,
1098 0x10fe0013, 1156 0xd29510e1,
1099 0x1031f400, 1157 0xbdc4bd10,
1100 0xf00117f0, 1158 0xc0edffb4,
1101 0x04b63807, 1159 0xb9301dff,
1102 0x0001d006, 1160 0x34f10234,
1103 0xf7f004bd, 1161 0x34b6ffff,
1104/* 0x0371: init_proc */ 1162 0x1045b610,
1105 0x01f19858, 1163 0xbb00c3bb,
1106 0xf40016b0, 1164 0xe2ff01b4,
1107 0x15f9fa0b, 1165 0x0234b930,
1108 0xf458f0b6, 1166 0xffff34f1,
1109/* 0x0382: host_send */ 1167 0xb61034b6,
1110 0x17f1f20e, 1168 0xc3bb1045,
1111 0x14b604b0, 1169 0x01b4bb00,
1112 0x0011cf06, 1170 0xbb3012ff,
1113 0x04a027f1, 1171 0x40fc00b3,
1114 0xcf0624b6, 1172 0x20fc30fc,
1115 0x12b80022, 1173 0x00f810fc,
1116 0x320bf406, 1174/* 0x0464: host_send */
1117 0x94071ec4, 1175 0x04b017f1,
1118 0xe0b704ee, 1176 0xcf0614b6,
1119 0xeb980270, 1177 0x27f10011,
1120 0x02ec9803, 1178 0x24b604a0,
1121 0x9801ed98, 1179 0x0022cf06,
1122 0x21f500ee, 1180 0xf40612b8,
1123 0x10b602b9, 1181 0x1ec4320b,
1124 0x0f1ec401, 1182 0x04ee9407,
1125 0x04b007f1, 1183 0x0270e0b7,
1126 0xd00604b6, 1184 0x9803eb98,
1127 0x04bd000e, 1185 0xed9802ec,
1128/* 0x03cb: host_send_done */ 1186 0x00ee9801,
1129 0xf8ba0ef4, 1187 0x034221f5,
1130/* 0x03cd: host_recv */ 1188 0xc40110b6,
1131 0x4917f100, 1189 0x07f10f1e,
1132 0x5413f14e, 1190 0x04b604b0,
1133 0x06e1b852, 1191 0x000ed006,
1134/* 0x03db: host_recv_wait */ 1192 0x0ef404bd,
1135 0xf1aa0bf4, 1193/* 0x04ad: host_send_done */
1136 0xb604cc17, 1194/* 0x04af: host_recv */
1137 0x11cf0614, 1195 0xf100f8ba,
1138 0xc827f100, 1196 0xf14e4917,
1139 0x0624b604, 1197 0xb8525413,
1140 0xf00022cf, 1198 0x0bf406e1,
1141 0x12b80816, 1199/* 0x04bd: host_recv_wait */
1142 0xe60bf406, 1200 0xcc17f1aa,
1143 0xb60723c4, 1201 0x0614b604,
1144 0x30b70434, 1202 0xf10011cf,
1145 0x3b8002f0, 1203 0xb604c827,
1146 0x023c8003, 1204 0x22cf0624,
1147 0x80013d80, 1205 0x0816f000,
1148 0x20b6003e, 1206 0xf40612b8,
1149 0x0f24f001, 1207 0x23c4e60b,
1150 0x04c807f1, 1208 0x0434b607,
1209 0x02f030b7,
1210 0x80033b80,
1211 0x3d80023c,
1212 0x003e8001,
1213 0xf00120b6,
1214 0x07f10f24,
1215 0x04b604c8,
1216 0x0002d006,
1217 0x27f004bd,
1218 0x0007f040,
1151 0xd00604b6, 1219 0xd00604b6,
1152 0x04bd0002, 1220 0x04bd0002,
1153 0xf04027f0, 1221/* 0x0512: host_init */
1154 0x04b60007, 1222 0x17f100f8,
1155 0x0002d006, 1223 0x14b60080,
1156 0x00f804bd, 1224 0x7015f110,
1157/* 0x0430: host_init */ 1225 0xd007f102,
1158 0x008017f1,
1159 0xf11014b6,
1160 0xf1027015,
1161 0xb604d007,
1162 0x01d00604,
1163 0xf104bd00,
1164 0xb6008017,
1165 0x15f11014,
1166 0x07f102f0,
1167 0x04b604dc,
1168 0x0001d006,
1169 0x17f004bd,
1170 0xc407f101,
1171 0x0604b604, 1226 0x0604b604,
1172 0xbd0001d0, 1227 0xbd0001d0,
1173/* 0x046f: memx_func_enter */ 1228 0x8017f104,
1174 0xf000f804, 1229 0x1014b600,
1230 0x02f015f1,
1231 0x04dc07f1,
1232 0xd00604b6,
1233 0x04bd0001,
1234 0xf10117f0,
1235 0xb604c407,
1236 0x01d00604,
1237 0xf804bd00,
1238/* 0x0551: memx_func_enter */
1239 0x2067f100,
1240 0x5d77f116,
1241 0xff73f1f5,
1242 0x026eb9ff,
1243 0xb90421f4,
1244 0x87fd02d8,
1245 0xf960f904,
1246 0xfcd0fc80,
1247 0x3f21f4e0,
1248 0xfffe77f1,
1249 0xffff73f1,
1250 0xf4026eb9,
1251 0xd8b90421,
1252 0x0487fd02,
1253 0x80f960f9,
1254 0xe0fcd0fc,
1255 0xf13f21f4,
1256 0xb926f067,
1257 0x21f4026e,
1258 0x02d8b904,
1259 0xf90487fd,
1260 0xfc80f960,
1261 0xf4e0fcd0,
1262 0x67f03f21,
1263 0xe007f104,
1264 0x0604b607,
1265 0xbd0006d0,
1266/* 0x05bd: memx_func_enter_wait */
1267 0xc067f104,
1268 0x0664b607,
1269 0xf00066cf,
1270 0x0bf40464,
1271 0x2c67f0f3,
1272 0xcf0664b6,
1273 0x06800066,
1274/* 0x05db: memx_func_leave */
1275 0xf000f8ee,
1276 0x64b62c67,
1277 0x0066cf06,
1278 0xf0ef0680,
1175 0x07f10467, 1279 0x07f10467,
1176 0x04b607e0, 1280 0x04b607e4,
1177 0x0006d006, 1281 0x0006d006,
1178/* 0x047e: memx_func_enter_wait */ 1282/* 0x05f6: memx_func_leave_wait */
1179 0x67f104bd, 1283 0x67f104bd,
1180 0x64b607c0, 1284 0x64b607c0,
1181 0x0066cf06, 1285 0x0066cf06,
1182 0xf40464f0, 1286 0xf40464f0,
1183 0x1698f30b, 1287 0x67f1f31b,
1288 0x77f126f0,
1289 0x73f00001,
1290 0x026eb900,
1291 0xb90421f4,
1292 0x87fd02d8,
1293 0xf960f905,
1294 0xfcd0fc80,
1295 0x3f21f4e0,
1296 0x162067f1,
1297 0xf4026eb9,
1298 0xd8b90421,
1299 0x0587fd02,
1300 0x80f960f9,
1301 0xe0fcd0fc,
1302 0xf13f21f4,
1303 0xf00aa277,
1304 0x6eb90073,
1305 0x0421f402,
1306 0xfd02d8b9,
1307 0x60f90587,
1308 0xd0fc80f9,
1309 0x21f4e0fc,
1310/* 0x0663: memx_func_wait_vblank */
1311 0x9800f83f,
1312 0x66b00016,
1313 0x130bf400,
1314 0xf40166b0,
1315 0x0ef4060b,
1316/* 0x0675: memx_func_wait_vblank_head1 */
1317 0x2077f12e,
1318 0x070ef400,
1319/* 0x067c: memx_func_wait_vblank_head0 */
1320 0x000877f1,
1321/* 0x0680: memx_func_wait_vblank_0 */
1322 0x07c467f1,
1323 0xcf0664b6,
1324 0x67fd0066,
1325 0xf31bf404,
1326/* 0x0690: memx_func_wait_vblank_1 */
1327 0x07c467f1,
1328 0xcf0664b6,
1329 0x67fd0066,
1330 0xf30bf404,
1331/* 0x06a0: memx_func_wait_vblank_fini */
1332 0xf80410b6,
1333/* 0x06a5: memx_func_wr32 */
1334 0x00169800,
1335 0xb6011598,
1336 0x60f90810,
1337 0xd0fc50f9,
1338 0x21f4e0fc,
1339 0x0242b63f,
1340 0xf8e91bf4,
1341/* 0x06c1: memx_func_wait */
1342 0x2c87f000,
1343 0xcf0684b6,
1344 0x1e980088,
1345 0x011d9800,
1346 0x98021c98,
1347 0x10b6031b,
1348 0xa421f410,
1349/* 0x06de: memx_func_delay */
1350 0x1e9800f8,
1184 0x0410b600, 1351 0x0410b600,
1185/* 0x0496: memx_func_leave */ 1352 0xf87f21f4,
1186 0x67f000f8, 1353/* 0x06e9: memx_exec */
1187 0xe407f104, 1354 0xf9e0f900,
1355 0x02c1b9d0,
1356/* 0x06f3: memx_exec_next */
1357 0x9802b2b9,
1358 0x10b60013,
1359 0xf034e704,
1360 0xe033e701,
1361 0x0132b601,
1362 0x980c30f0,
1363 0x55f9de35,
1364 0xf40612b8,
1365 0x0b98e41e,
1366 0xef0c98ee,
1367 0xf102cbbb,
1368 0xb607c4b7,
1369 0xbbcf06b4,
1370 0xfcd0fc00,
1371 0x4221f5e0,
1372/* 0x072f: memx_info */
1373 0xf100f803,
1374 0xf103c0c7,
1375 0xf50800b7,
1376 0xf8034221,
1377/* 0x073d: memx_recv */
1378 0x01d6b000,
1379 0xb0a90bf4,
1380 0x0bf400d6,
1381/* 0x074b: memx_init */
1382 0xf800f8e9,
1383/* 0x074d: perf_recv */
1384/* 0x074f: perf_init */
1385 0xf800f800,
1386/* 0x0751: i2c_drive_scl */
1387 0x0036b000,
1388 0xf1110bf4,
1389 0xb607e007,
1390 0x01d00604,
1391 0xf804bd00,
1392/* 0x0765: i2c_drive_scl_lo */
1393 0xe407f100,
1188 0x0604b607, 1394 0x0604b607,
1189 0xbd0006d0, 1395 0xbd0001d0,
1190/* 0x04a5: memx_func_leave_wait */ 1396/* 0x0773: i2c_drive_sda */
1191 0xc067f104, 1397 0xb000f804,
1192 0x0664b607, 1398 0x0bf40036,
1193 0xf00066cf, 1399 0xe007f111,
1194 0x1bf40464, 1400 0x0604b607,
1195/* 0x04b7: memx_func_wr32 */ 1401 0xbd0002d0,
1196 0x9800f8f3, 1402/* 0x0787: i2c_drive_sda_lo */
1197 0x15980016, 1403 0xf100f804,
1198 0x0810b601, 1404 0xb607e407,
1199 0x50f960f9, 1405 0x02d00604,
1200 0xe0fcd0fc, 1406 0xf804bd00,
1201 0xb63f21f4, 1407/* 0x0795: i2c_sense_scl */
1202 0x1bf40242, 1408 0x0132f400,
1203/* 0x04d3: memx_func_wait */ 1409 0x07c437f1,
1204 0xf000f8e9, 1410 0xcf0634b6,
1205 0x84b62c87, 1411 0x31fd0033,
1206 0x0088cf06, 1412 0x060bf404,
1207 0x98001e98, 1413/* 0x07ab: i2c_sense_scl_done */
1208 0x1c98011d, 1414 0xf80131f4,
1209 0x031b9802, 1415/* 0x07ad: i2c_sense_sda */
1210 0xf41010b6, 1416 0x0132f400,
1211 0x00f89c21, 1417 0x07c437f1,
1212/* 0x04f0: memx_func_delay */ 1418 0xcf0634b6,
1213 0xb6001e98, 1419 0x32fd0033,
1214 0x21f40410, 1420 0x060bf404,
1215/* 0x04fb: memx_exec */ 1421/* 0x07c3: i2c_sense_sda_done */
1216 0xf900f87f, 1422 0xf80131f4,
1217 0xb9d0f9e0, 1423/* 0x07c5: i2c_raise_scl */
1218 0xb2b902c1, 1424 0xf140f900,
1219/* 0x0505: memx_exec_next */ 1425 0xf0089847,
1220 0x00139802, 1426 0x21f50137,
1221 0x950410b6, 1427/* 0x07d2: i2c_raise_scl_wait */
1222 0x30f01034, 1428 0xe7f10751,
1223 0xde35980c, 1429 0x21f403e8,
1224 0x12b855f9, 1430 0x9521f57f,
1225 0xec1ef406, 1431 0x0901f407,
1226 0xe0fcd0fc, 1432 0xf40142b6,
1227 0x02b921f5, 1433/* 0x07e6: i2c_raise_scl_done */
1228/* 0x0526: memx_info */ 1434 0x40fcef1b,
1229 0xc7f100f8, 1435/* 0x07ea: i2c_start */
1230 0xb7f103ac, 1436 0x21f500f8,
1231 0x21f50800, 1437 0x11f40795,
1232 0x00f802b9, 1438 0xad21f50d,
1233/* 0x0534: memx_recv */ 1439 0x0611f407,
1234 0xf401d6b0, 1440/* 0x07fb: i2c_start_rep */
1235 0xd6b0c40b, 1441 0xf0300ef4,
1236 0xe90bf400,
1237/* 0x0542: memx_init */
1238 0x00f800f8,
1239/* 0x0544: perf_recv */
1240/* 0x0546: perf_init */
1241 0x00f800f8,
1242/* 0x0548: i2c_drive_scl */
1243 0xf40036b0,
1244 0x07f1110b,
1245 0x04b607e0,
1246 0x0001d006,
1247 0x00f804bd,
1248/* 0x055c: i2c_drive_scl_lo */
1249 0x07e407f1,
1250 0xd00604b6,
1251 0x04bd0001,
1252/* 0x056a: i2c_drive_sda */
1253 0x36b000f8,
1254 0x110bf400,
1255 0x07e007f1,
1256 0xd00604b6,
1257 0x04bd0002,
1258/* 0x057e: i2c_drive_sda_lo */
1259 0x07f100f8,
1260 0x04b607e4,
1261 0x0002d006,
1262 0x00f804bd,
1263/* 0x058c: i2c_sense_scl */
1264 0xf10132f4,
1265 0xb607c437,
1266 0x33cf0634,
1267 0x0431fd00,
1268 0xf4060bf4,
1269/* 0x05a2: i2c_sense_scl_done */
1270 0x00f80131,
1271/* 0x05a4: i2c_sense_sda */
1272 0xf10132f4,
1273 0xb607c437,
1274 0x33cf0634,
1275 0x0432fd00,
1276 0xf4060bf4,
1277/* 0x05ba: i2c_sense_sda_done */
1278 0x00f80131,
1279/* 0x05bc: i2c_raise_scl */
1280 0x47f140f9,
1281 0x37f00898,
1282 0x4821f501,
1283/* 0x05c9: i2c_raise_scl_wait */
1284 0xe8e7f105,
1285 0x7f21f403,
1286 0x058c21f5,
1287 0xb60901f4,
1288 0x1bf40142,
1289/* 0x05dd: i2c_raise_scl_done */
1290 0xf840fcef,
1291/* 0x05e1: i2c_start */
1292 0x8c21f500,
1293 0x0d11f405,
1294 0x05a421f5,
1295 0xf40611f4,
1296/* 0x05f2: i2c_start_rep */
1297 0x37f0300e,
1298 0x4821f500,
1299 0x0137f005,
1300 0x056a21f5,
1301 0xb60076bb,
1302 0x50f90465,
1303 0xbb046594,
1304 0x50bd0256,
1305 0xfc0475fd,
1306 0xbc21f550,
1307 0x0464b605,
1308/* 0x061f: i2c_start_send */
1309 0xf01f11f4,
1310 0x21f50037, 1442 0x21f50037,
1311 0xe7f1056a, 1443 0x37f00751,
1312 0x21f41388, 1444 0x7321f501,
1313 0x0037f07f, 1445 0x0076bb07,
1314 0x054821f5, 1446 0xf90465b6,
1315 0x1388e7f1, 1447 0x04659450,
1316/* 0x063b: i2c_start_out */ 1448 0xbd0256bb,
1317 0xf87f21f4, 1449 0x0475fd50,
1318/* 0x063d: i2c_stop */ 1450 0x21f550fc,
1319 0x0037f000, 1451 0x64b607c5,
1320 0x054821f5, 1452 0x1f11f404,
1453/* 0x0828: i2c_start_send */
1321 0xf50037f0, 1454 0xf50037f0,
1322 0xf1056a21, 1455 0xf1077321,
1323 0xf403e8e7, 1456 0xf41388e7,
1324 0x37f07f21, 1457 0x37f07f21,
1325 0x4821f501, 1458 0x5121f500,
1326 0x88e7f105, 1459 0x88e7f107,
1327 0x7f21f413, 1460 0x7f21f413,
1328 0xf50137f0, 1461/* 0x0844: i2c_start_out */
1329 0xf1056a21, 1462/* 0x0846: i2c_stop */
1330 0xf41388e7, 1463 0x37f000f8,
1331 0x00f87f21, 1464 0x5121f500,
1332/* 0x0670: i2c_bitw */ 1465 0x0037f007,
1333 0x056a21f5, 1466 0x077321f5,
1334 0x03e8e7f1, 1467 0x03e8e7f1,
1335 0xbb7f21f4, 1468 0xf07f21f4,
1336 0x65b60076, 1469 0x21f50137,
1337 0x9450f904, 1470 0xe7f10751,
1338 0x56bb0465,
1339 0xfd50bd02,
1340 0x50fc0475,
1341 0x05bc21f5,
1342 0xf40464b6,
1343 0xe7f11811,
1344 0x21f41388, 1471 0x21f41388,
1345 0x0037f07f, 1472 0x0137f07f,
1346 0x054821f5, 1473 0x077321f5,
1347 0x1388e7f1, 1474 0x1388e7f1,
1348/* 0x06af: i2c_bitw_out */
1349 0xf87f21f4, 1475 0xf87f21f4,
1350/* 0x06b1: i2c_bitr */ 1476/* 0x0879: i2c_bitw */
1351 0x0137f000, 1477 0x7321f500,
1352 0x056a21f5, 1478 0xe8e7f107,
1353 0x03e8e7f1, 1479 0x7f21f403,
1354 0xbb7f21f4, 1480 0xb60076bb,
1355 0x65b60076, 1481 0x50f90465,
1356 0x9450f904, 1482 0xbb046594,
1357 0x56bb0465, 1483 0x50bd0256,
1358 0xfd50bd02, 1484 0xfc0475fd,
1359 0x50fc0475, 1485 0xc521f550,
1360 0x05bc21f5, 1486 0x0464b607,
1361 0xf40464b6, 1487 0xf11811f4,
1362 0x21f51b11, 1488 0xf41388e7,
1363 0x37f005a4, 1489 0x37f07f21,
1364 0x4821f500, 1490 0x5121f500,
1365 0x88e7f105, 1491 0x88e7f107,
1366 0x7f21f413, 1492 0x7f21f413,
1367 0xf4013cf0, 1493/* 0x08b8: i2c_bitw_out */
1368/* 0x06f6: i2c_bitr_done */ 1494/* 0x08ba: i2c_bitr */
1369 0x00f80131, 1495 0x37f000f8,
1370/* 0x06f8: i2c_get_byte */ 1496 0x7321f501,
1371 0xf00057f0, 1497 0xe8e7f107,
1372/* 0x06fe: i2c_get_byte_next */ 1498 0x7f21f403,
1373 0x54b60847, 1499 0xb60076bb,
1500 0x50f90465,
1501 0xbb046594,
1502 0x50bd0256,
1503 0xfc0475fd,
1504 0xc521f550,
1505 0x0464b607,
1506 0xf51b11f4,
1507 0xf007ad21,
1508 0x21f50037,
1509 0xe7f10751,
1510 0x21f41388,
1511 0x013cf07f,
1512/* 0x08ff: i2c_bitr_done */
1513 0xf80131f4,
1514/* 0x0901: i2c_get_byte */
1515 0x0057f000,
1516/* 0x0907: i2c_get_byte_next */
1517 0xb60847f0,
1518 0x76bb0154,
1519 0x0465b600,
1520 0x659450f9,
1521 0x0256bb04,
1522 0x75fd50bd,
1523 0xf550fc04,
1524 0xb608ba21,
1525 0x11f40464,
1526 0x0553fd2b,
1527 0xf40142b6,
1528 0x37f0d81b,
1374 0x0076bb01, 1529 0x0076bb01,
1375 0xf90465b6, 1530 0xf90465b6,
1376 0x04659450, 1531 0x04659450,
1377 0xbd0256bb, 1532 0xbd0256bb,
1378 0x0475fd50, 1533 0x0475fd50,
1379 0x21f550fc, 1534 0x21f550fc,
1380 0x64b606b1, 1535 0x64b60879,
1381 0x2b11f404, 1536/* 0x0951: i2c_get_byte_done */
1382 0xb60553fd, 1537/* 0x0953: i2c_put_byte */
1383 0x1bf40142, 1538 0xf000f804,
1384 0x0137f0d8, 1539/* 0x0956: i2c_put_byte_next */
1385 0xb60076bb, 1540 0x42b60847,
1386 0x50f90465, 1541 0x3854ff01,
1387 0xbb046594,
1388 0x50bd0256,
1389 0xfc0475fd,
1390 0x7021f550,
1391 0x0464b606,
1392/* 0x0748: i2c_get_byte_done */
1393/* 0x074a: i2c_put_byte */
1394 0x47f000f8,
1395/* 0x074d: i2c_put_byte_next */
1396 0x0142b608,
1397 0xbb3854ff,
1398 0x65b60076,
1399 0x9450f904,
1400 0x56bb0465,
1401 0xfd50bd02,
1402 0x50fc0475,
1403 0x067021f5,
1404 0xf40464b6,
1405 0x46b03411,
1406 0xd81bf400,
1407 0xb60076bb, 1542 0xb60076bb,
1408 0x50f90465, 1543 0x50f90465,
1409 0xbb046594, 1544 0xbb046594,
1410 0x50bd0256, 1545 0x50bd0256,
1411 0xfc0475fd, 1546 0xfc0475fd,
1412 0xb121f550, 1547 0x7921f550,
1413 0x0464b606, 1548 0x0464b608,
1414 0xbb0f11f4, 1549 0xb03411f4,
1415 0x36b00076, 1550 0x1bf40046,
1416 0x061bf401, 1551 0x0076bbd8,
1417/* 0x07a3: i2c_put_byte_done */
1418 0xf80132f4,
1419/* 0x07a5: i2c_addr */
1420 0x0076bb00,
1421 0xf90465b6, 1552 0xf90465b6,
1422 0x04659450, 1553 0x04659450,
1423 0xbd0256bb, 1554 0xbd0256bb,
1424 0x0475fd50, 1555 0x0475fd50,
1425 0x21f550fc, 1556 0x21f550fc,
1426 0x64b605e1, 1557 0x64b608ba,
1427 0x2911f404, 1558 0x0f11f404,
1428 0x012ec3e7, 1559 0xb00076bb,
1429 0xfd0134b6, 1560 0x1bf40136,
1430 0x76bb0553, 1561 0x0132f406,
1562/* 0x09ac: i2c_put_byte_done */
1563/* 0x09ae: i2c_addr */
1564 0x76bb00f8,
1431 0x0465b600, 1565 0x0465b600,
1432 0x659450f9, 1566 0x659450f9,
1433 0x0256bb04, 1567 0x0256bb04,
1434 0x75fd50bd, 1568 0x75fd50bd,
1435 0xf550fc04, 1569 0xf550fc04,
1436 0xb6074a21, 1570 0xb607ea21,
1437/* 0x07ea: i2c_addr_done */ 1571 0x11f40464,
1438 0x00f80464, 1572 0x2ec3e729,
1439/* 0x07ec: i2c_acquire_addr */ 1573 0x0134b601,
1440 0xb6f8cec7, 1574 0xbb0553fd,
1441 0xe0b702e4,
1442 0xee980bfc,
1443/* 0x07fb: i2c_acquire */
1444 0xf500f800,
1445 0xf407ec21,
1446 0xd9f00421,
1447 0x3f21f403,
1448/* 0x080a: i2c_release */
1449 0x21f500f8,
1450 0x21f407ec,
1451 0x03daf004,
1452 0xf83f21f4,
1453/* 0x0819: i2c_recv */
1454 0x0132f400,
1455 0xb6f8c1c7,
1456 0x16b00214,
1457 0x3a1ff528,
1458 0xd413a001,
1459 0x0032980b,
1460 0x0bac13a0,
1461 0xf4003198,
1462 0xd0f90231,
1463 0xd0f9e0f9,
1464 0x000067f1,
1465 0x100063f1,
1466 0xbb016792,
1467 0x65b60076, 1575 0x65b60076,
1468 0x9450f904, 1576 0x9450f904,
1469 0x56bb0465, 1577 0x56bb0465,
1470 0xfd50bd02, 1578 0xfd50bd02,
1471 0x50fc0475, 1579 0x50fc0475,
1472 0x07fb21f5, 1580 0x095321f5,
1473 0xfc0464b6, 1581/* 0x09f3: i2c_addr_done */
1474 0x00d6b0d0, 1582 0xf80464b6,
1475 0x00b31bf5, 1583/* 0x09f5: i2c_acquire_addr */
1476 0xbb0057f0, 1584 0xf8cec700,
1585 0xb702e4b6,
1586 0x980c10e0,
1587 0x00f800ee,
1588/* 0x0a04: i2c_acquire */
1589 0x09f521f5,
1590 0xf00421f4,
1591 0x21f403d9,
1592/* 0x0a13: i2c_release */
1593 0xf500f83f,
1594 0xf409f521,
1595 0xdaf00421,
1596 0x3f21f403,
1597/* 0x0a22: i2c_recv */
1598 0x32f400f8,
1599 0xf8c1c701,
1600 0xb00214b6,
1601 0x1ff52816,
1602 0x13a0013a,
1603 0x32980be8,
1604 0xc013a000,
1605 0x0031980b,
1606 0xf90231f4,
1607 0xf9e0f9d0,
1608 0x0067f1d0,
1609 0x0063f100,
1610 0x01679210,
1611 0xb60076bb,
1612 0x50f90465,
1613 0xbb046594,
1614 0x50bd0256,
1615 0xfc0475fd,
1616 0x0421f550,
1617 0x0464b60a,
1618 0xd6b0d0fc,
1619 0xb31bf500,
1620 0x0057f000,
1621 0xb60076bb,
1622 0x50f90465,
1623 0xbb046594,
1624 0x50bd0256,
1625 0xfc0475fd,
1626 0xae21f550,
1627 0x0464b609,
1628 0x00d011f5,
1629 0xbbe0c5c7,
1477 0x65b60076, 1630 0x65b60076,
1478 0x9450f904, 1631 0x9450f904,
1479 0x56bb0465, 1632 0x56bb0465,
1480 0xfd50bd02, 1633 0xfd50bd02,
1481 0x50fc0475, 1634 0x50fc0475,
1482 0x07a521f5, 1635 0x095321f5,
1483 0xf50464b6, 1636 0xf50464b6,
1484 0xc700d011, 1637 0xf000ad11,
1485 0x76bbe0c5, 1638 0x76bb0157,
1486 0x0465b600, 1639 0x0465b600,
1487 0x659450f9, 1640 0x659450f9,
1488 0x0256bb04, 1641 0x0256bb04,
1489 0x75fd50bd, 1642 0x75fd50bd,
1490 0xf550fc04, 1643 0xf550fc04,
1491 0xb6074a21, 1644 0xb609ae21,
1492 0x11f50464, 1645 0x11f50464,
1493 0x57f000ad, 1646 0x76bb008a,
1494 0x0076bb01, 1647 0x0465b600,
1495 0xf90465b6, 1648 0x659450f9,
1496 0x04659450, 1649 0x0256bb04,
1497 0xbd0256bb, 1650 0x75fd50bd,
1498 0x0475fd50, 1651 0xf550fc04,
1499 0x21f550fc, 1652 0xb6090121,
1500 0x64b607a5, 1653 0x11f40464,
1501 0x8a11f504, 1654 0xe05bcb6a,
1502 0x0076bb00, 1655 0xb60076bb,
1503 0xf90465b6, 1656 0x50f90465,
1504 0x04659450, 1657 0xbb046594,
1505 0xbd0256bb, 1658 0x50bd0256,
1506 0x0475fd50, 1659 0xfc0475fd,
1507 0x21f550fc, 1660 0x4621f550,
1508 0x64b606f8, 1661 0x0464b608,
1509 0x6a11f404, 1662 0xbd025bb9,
1510 0xbbe05bcb, 1663 0x430ef474,
1511 0x65b60076, 1664/* 0x0b28: i2c_recv_not_rd08 */
1512 0x9450f904, 1665 0xf401d6b0,
1513 0x56bb0465, 1666 0x57f03d1b,
1514 0xfd50bd02, 1667 0xae21f500,
1515 0x50fc0475, 1668 0x3311f409,
1516 0x063d21f5, 1669 0xf5e0c5c7,
1517 0xb90464b6, 1670 0xf4095321,
1518 0x74bd025b, 1671 0x57f02911,
1519/* 0x091f: i2c_recv_not_rd08 */ 1672 0xae21f500,
1520 0xb0430ef4, 1673 0x1f11f409,
1521 0x1bf401d6, 1674 0xf5e0b5c7,
1522 0x0057f03d, 1675 0xf4095321,
1523 0x07a521f5, 1676 0x21f51511,
1524 0xc73311f4, 1677 0x74bd0846,
1525 0x21f5e0c5, 1678 0xf408c5c7,
1526 0x11f4074a, 1679 0x32f4091b,
1527 0x0057f029, 1680 0x030ef402,
1528 0x07a521f5, 1681/* 0x0b68: i2c_recv_not_wr08 */
1529 0xc71f11f4, 1682/* 0x0b68: i2c_recv_done */
1530 0x21f5e0b5, 1683 0xf5f8cec7,
1531 0x11f4074a, 1684 0xfc0a1321,
1532 0x3d21f515, 1685 0xf4d0fce0,
1533 0xc774bd06, 1686 0x7cb90a12,
1534 0x1bf408c5, 1687 0x4221f502,
1535 0x0232f409, 1688/* 0x0b7d: i2c_recv_exit */
1536/* 0x095f: i2c_recv_not_wr08 */ 1689/* 0x0b7f: i2c_init */
1537/* 0x095f: i2c_recv_done */ 1690 0xf800f803,
1538 0xc7030ef4, 1691/* 0x0b81: test_recv */
1539 0x21f5f8ce, 1692 0xd817f100,
1540 0xe0fc080a, 1693 0x0614b605,
1541 0x12f4d0fc, 1694 0xb60011cf,
1542 0x027cb90a, 1695 0x07f10110,
1543 0x02b921f5, 1696 0x04b605d8,
1544/* 0x0974: i2c_recv_exit */ 1697 0x0001d006,
1545/* 0x0976: i2c_init */ 1698 0xe7f104bd,
1699 0xe3f1d900,
1700 0x21f5134f,
1701 0x00f80262,
1702/* 0x0ba8: test_init */
1703 0x0800e7f1,
1704 0x026221f5,
1705/* 0x0bb2: idle_recv */
1546 0x00f800f8, 1706 0x00f800f8,
1547/* 0x0978: test_recv */ 1707/* 0x0bb4: idle */
1548 0x05d817f1, 1708 0xf10031f4,
1549 0xcf0614b6, 1709 0xb605d417,
1550 0x10b60011, 1710 0x11cf0614,
1551 0xd807f101, 1711 0x0110b600,
1552 0x0604b605, 1712 0x05d407f1,
1553 0xbd0001d0, 1713 0xd00604b6,
1554 0x00e7f104, 1714 0x04bd0001,
1555 0x4fe3f1d9, 1715/* 0x0bd0: idle_loop */
1556 0xf521f513, 1716 0xf45817f0,
1557/* 0x099f: test_init */ 1717/* 0x0bd6: idle_proc */
1558 0xf100f801, 1718/* 0x0bd6: idle_proc_exec */
1559 0xf50800e7, 1719 0x10f90232,
1560 0xf801f521, 1720 0xf5021eb9,
1561/* 0x09a9: idle_recv */ 1721 0xfc034b21,
1562/* 0x09ab: idle */ 1722 0x0911f410,
1563 0xf400f800, 1723 0xf40231f4,
1564 0x17f10031, 1724/* 0x0bea: idle_proc_next */
1565 0x14b605d4, 1725 0x10b6ef0e,
1566 0x0011cf06, 1726 0x061fb858,
1567 0xf10110b6, 1727 0xf4e61bf4,
1568 0xb605d407, 1728 0x28f4dd02,
1569 0x01d00604, 1729 0xbb0ef400,
1570/* 0x09c7: idle_loop */
1571 0xf004bd00,
1572 0x32f45817,
1573/* 0x09cd: idle_proc */
1574/* 0x09cd: idle_proc_exec */
1575 0xb910f902,
1576 0x21f5021e,
1577 0x10fc02c2,
1578 0xf40911f4,
1579 0x0ef40231,
1580/* 0x09e1: idle_proc_next */
1581 0x5810b6ef,
1582 0xf4061fb8,
1583 0x02f4e61b,
1584 0x0028f4dd,
1585 0x00bb0ef4,
1586 0x00000000,
1587 0x00000000,
1588 0x00000000, 1730 0x00000000,
1589}; 1731};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
index 8a89dfe41ce1..b85443261569 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#define NVKM_PPWR_CHIPSET GF119 25#define NVKM_PPWR_CHIPSET GF119
26#define HW_TICKS_PER_US 324
26 27
27//#define NVKM_FALCON_PC24 28//#define NVKM_FALCON_PC24
28#define NVKM_FALCON_UNSHIFTED_IO 29#define NVKM_FALCON_UNSHIFTED_IO
@@ -34,6 +35,7 @@
34.section #nvd0_pwr_data 35.section #nvd0_pwr_data
35#define INCLUDE_PROC 36#define INCLUDE_PROC
36#include "kernel.fuc" 37#include "kernel.fuc"
38#include "arith.fuc"
37#include "host.fuc" 39#include "host.fuc"
38#include "memx.fuc" 40#include "memx.fuc"
39#include "perf.fuc" 41#include "perf.fuc"
@@ -44,6 +46,7 @@
44 46
45#define INCLUDE_DATA 47#define INCLUDE_DATA
46#include "kernel.fuc" 48#include "kernel.fuc"
49#include "arith.fuc"
47#include "host.fuc" 50#include "host.fuc"
48#include "memx.fuc" 51#include "memx.fuc"
49#include "perf.fuc" 52#include "perf.fuc"
@@ -56,6 +59,7 @@
56.section #nvd0_pwr_code 59.section #nvd0_pwr_code
57#define INCLUDE_CODE 60#define INCLUDE_CODE
58#include "kernel.fuc" 61#include "kernel.fuc"
62#include "arith.fuc"
59#include "host.fuc" 63#include "host.fuc"
60#include "memx.fuc" 64#include "memx.fuc"
61#include "perf.fuc" 65#include "perf.fuc"
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
index 8d369b3faaba..12d86f72ad10 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -24,8 +24,8 @@ uint32_t nvd0_pwr_data[] = {
24 0x00000000, 24 0x00000000,
25/* 0x0058: proc_list_head */ 25/* 0x0058: proc_list_head */
26 0x54534f48, 26 0x54534f48,
27 0x000003be, 27 0x0000049d,
28 0x00000367, 28 0x00000446,
29 0x00000000, 29 0x00000000,
30 0x00000000, 30 0x00000000,
31 0x00000000, 31 0x00000000,
@@ -46,8 +46,8 @@ uint32_t nvd0_pwr_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x000004b8, 49 0x00000678,
50 0x000004aa, 50 0x0000066a,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvd0_pwr_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x000004bc, 71 0x0000067c,
72 0x000004ba, 72 0x0000067a,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvd0_pwr_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x000008d7, 93 0x00000a97,
94 0x0000077a, 94 0x0000093a,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvd0_pwr_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x000008fa, 115 0x00000aba,
116 0x000008d9, 116 0x00000a99,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvd0_pwr_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x00000906, 137 0x00000ac6,
138 0x00000904, 138 0x00000ac4,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -227,24 +227,31 @@ uint32_t nvd0_pwr_data[] = {
227 0x00000000, 227 0x00000000,
228 0x00000000, 228 0x00000000,
229/* 0x0370: memx_func_head */ 229/* 0x0370: memx_func_head */
230 0x00010000,
231 0x00000000,
232 0x000003f4,
233/* 0x037c: memx_func_next */
234 0x00000001, 230 0x00000001,
235 0x00000000, 231 0x00000000,
236 0x00000415, 232 0x000004d3,
233/* 0x037c: memx_func_next */
237 0x00000002, 234 0x00000002,
235 0x00000000,
236 0x00000554,
237 0x00000003,
238 0x00000002, 238 0x00000002,
239 0x00000430, 239 0x000005d8,
240 0x00040003, 240 0x00040004,
241 0x00000000,
242 0x000005f4,
243 0x00010005,
244 0x00000000,
245 0x0000060e,
246 0x00010006,
247 0x00000000,
248 0x000005d3,
249/* 0x03b8: memx_func_tail */
250/* 0x03b8: memx_ts_start */
241 0x00000000, 251 0x00000000,
242 0x0000044c, 252/* 0x03bc: memx_ts_end */
243 0x00010004,
244 0x00000000, 253 0x00000000,
245 0x00000466, 254/* 0x03c0: memx_data_head */
246/* 0x03ac: memx_func_tail */
247/* 0x03ac: memx_data_head */
248 0x00000000, 255 0x00000000,
249 0x00000000, 256 0x00000000,
250 0x00000000, 257 0x00000000,
@@ -757,8 +764,8 @@ uint32_t nvd0_pwr_data[] = {
757 0x00000000, 764 0x00000000,
758 0x00000000, 765 0x00000000,
759 0x00000000, 766 0x00000000,
760/* 0x0bac: memx_data_tail */ 767/* 0x0bc0: memx_data_tail */
761/* 0x0bac: i2c_scl_map */ 768/* 0x0bc0: i2c_scl_map */
762 0x00000400, 769 0x00000400,
763 0x00000800, 770 0x00000800,
764 0x00001000, 771 0x00001000,
@@ -769,7 +776,7 @@ uint32_t nvd0_pwr_data[] = {
769 0x00020000, 776 0x00020000,
770 0x00040000, 777 0x00040000,
771 0x00080000, 778 0x00080000,
772/* 0x0bd4: i2c_sda_map */ 779/* 0x0be8: i2c_sda_map */
773 0x00100000, 780 0x00100000,
774 0x00200000, 781 0x00200000,
775 0x00400000, 782 0x00400000,
@@ -781,10 +788,69 @@ uint32_t nvd0_pwr_data[] = {
781 0x10000000, 788 0x10000000,
782 0x20000000, 789 0x20000000,
783 0x00000000, 790 0x00000000,
791 0x00000000,
792 0x00000000,
793 0x00000000,
794 0x00000000,
795 0x00000000,
796 0x00000000,
797 0x00000000,
798 0x00000000,
799 0x00000000,
800 0x00000000,
801 0x00000000,
802 0x00000000,
803 0x00000000,
804 0x00000000,
805 0x00000000,
806 0x00000000,
807 0x00000000,
808 0x00000000,
809 0x00000000,
810 0x00000000,
811 0x00000000,
812 0x00000000,
813 0x00000000,
814 0x00000000,
815 0x00000000,
816 0x00000000,
817 0x00000000,
818 0x00000000,
819 0x00000000,
820 0x00000000,
821 0x00000000,
822 0x00000000,
823 0x00000000,
824 0x00000000,
825 0x00000000,
826 0x00000000,
827 0x00000000,
828 0x00000000,
829 0x00000000,
830 0x00000000,
831 0x00000000,
832 0x00000000,
833 0x00000000,
834 0x00000000,
835 0x00000000,
836 0x00000000,
837 0x00000000,
838 0x00000000,
839 0x00000000,
840 0x00000000,
841 0x00000000,
842 0x00000000,
843 0x00000000,
844 0x00000000,
845 0x00000000,
846 0x00000000,
847 0x00000000,
848 0x00000000,
849 0x00000000,
784}; 850};
785 851
786uint32_t nvd0_pwr_code[] = { 852uint32_t nvd0_pwr_code[] = {
787 0x02bf0ef5, 853 0x034d0ef5,
788/* 0x0004: rd32 */ 854/* 0x0004: rd32 */
789 0x07a007f1, 855 0x07a007f1,
790 0xbd000ed0, 856 0xbd000ed0,
@@ -814,17 +880,20 @@ uint32_t nvd0_pwr_code[] = {
814 0xd4f100dd, 880 0xd4f100dd,
815 0x1bf47000, 881 0x1bf47000,
816/* 0x0067: nsec */ 882/* 0x0067: nsec */
817 0xf000f8f5, 883 0xf900f8f5,
884 0xf080f990,
818 0x88cf2c87, 885 0x88cf2c87,
819/* 0x006d: nsec_loop */ 886/* 0x0071: nsec_loop */
820 0x2c97f000, 887 0x2c97f000,
821 0xbb0099cf, 888 0xbb0099cf,
822 0x9eb80298, 889 0x9eb80298,
823 0xf41ef406, 890 0xf41ef406,
824/* 0x007e: wait */ 891 0x90fc80fc,
825 0x87f000f8, 892/* 0x0086: wait */
893 0x90f900f8,
894 0x87f080f9,
826 0x0088cf2c, 895 0x0088cf2c,
827/* 0x0084: wait_loop */ 896/* 0x0090: wait_loop */
828 0xf402eeb9, 897 0xf402eeb9,
829 0xdab90421, 898 0xdab90421,
830 0x04adfd02, 899 0x04adfd02,
@@ -833,28 +902,29 @@ uint32_t nvd0_pwr_code[] = {
833 0x0099cf2c, 902 0x0099cf2c,
834 0xb80298bb, 903 0xb80298bb,
835 0x1ef4069b, 904 0x1ef4069b,
836/* 0x00a5: wait_done */ 905/* 0x00b1: wait_done */
837/* 0x00a7: intr_watchdog */ 906 0xfc80fce2,
838 0x9800f8e2, 907/* 0x00b7: intr_watchdog */
908 0x9800f890,
839 0x96b003e9, 909 0x96b003e9,
840 0x2a0bf400, 910 0x2a0bf400,
841 0xbb9a0a98, 911 0xbb9a0a98,
842 0x1cf4029a, 912 0x1cf4029a,
843 0x01d7f00f, 913 0x01d7f00f,
844 0x020621f5, 914 0x028c21f5,
845 0x0ef494bd, 915 0x0ef494bd,
846/* 0x00c5: intr_watchdog_next_time */ 916/* 0x00d5: intr_watchdog_next_time */
847 0x9b0a9815, 917 0x9b0a9815,
848 0xf400a6b0, 918 0xf400a6b0,
849 0x9ab8090b, 919 0x9ab8090b,
850 0x061cf406, 920 0x061cf406,
851/* 0x00d4: intr_watchdog_next_time_set */ 921/* 0x00e4: intr_watchdog_next_time_set */
852/* 0x00d7: intr_watchdog_next_proc */ 922/* 0x00e7: intr_watchdog_next_proc */
853 0x809b0980, 923 0x809b0980,
854 0xe0b603e9, 924 0xe0b603e9,
855 0x68e6b158, 925 0x68e6b158,
856 0xc61bf402, 926 0xc61bf402,
857/* 0x00e6: intr */ 927/* 0x00f6: intr */
858 0x00f900f8, 928 0x00f900f8,
859 0x80f904bd, 929 0x80f904bd,
860 0xa0f990f9, 930 0xa0f990f9,
@@ -872,12 +942,12 @@ uint32_t nvd0_pwr_code[] = {
872 0x0bf40289, 942 0x0bf40289,
873 0x9b008020, 943 0x9b008020,
874 0xf458e7f0, 944 0xf458e7f0,
875 0x0998a721, 945 0x0998b721,
876 0x0096b09b, 946 0x0096b09b,
877 0xf00e0bf4, 947 0xf00e0bf4,
878 0x09d03407, 948 0x09d03407,
879 0x8004bd00, 949 0x8004bd00,
880/* 0x013e: intr_skip_watchdog */ 950/* 0x014e: intr_skip_watchdog */
881 0x89e49a09, 951 0x89e49a09,
882 0x0bf40800, 952 0x0bf40800,
883 0x8897f13c, 953 0x8897f13c,
@@ -889,20 +959,20 @@ uint32_t nvd0_pwr_code[] = {
889 0xf14f48e7, 959 0xf14f48e7,
890 0xf05453e3, 960 0xf05453e3,
891 0x21f500d7, 961 0x21f500d7,
892 0xc0fc026b, 962 0xc0fc02f1,
893 0x04c007f1, 963 0x04c007f1,
894 0xbd000cd0, 964 0xbd000cd0,
895/* 0x0175: intr_subintr_skip_fifo */ 965/* 0x0185: intr_subintr_skip_fifo */
896 0x8807f104, 966 0x8807f104,
897 0x0009d006, 967 0x0009d006,
898/* 0x017e: intr_skip_subintr */ 968/* 0x018e: intr_skip_subintr */
899 0x89c404bd, 969 0x89c404bd,
900 0x070bf420, 970 0x070bf420,
901 0xffbfa4f1, 971 0xffbfa4f1,
902/* 0x0188: intr_skip_pause */ 972/* 0x0198: intr_skip_pause */
903 0xf44089c4, 973 0xf44089c4,
904 0xa4f1070b, 974 0xa4f1070b,
905/* 0x0192: intr_skip_user0 */ 975/* 0x01a2: intr_skip_user0 */
906 0x07f0ffbf, 976 0x07f0ffbf,
907 0x0008d004, 977 0x0008d004,
908 0x80fc04bd, 978 0x80fc04bd,
@@ -912,189 +982,298 @@ uint32_t nvd0_pwr_code[] = {
912 0xfca0fcb0, 982 0xfca0fcb0,
913 0xfc80fc90, 983 0xfc80fc90,
914 0x0032f400, 984 0x0032f400,
915/* 0x01b6: timer */ 985/* 0x01c6: ticks_from_ns */
916 0x32f401f8, 986 0xc0f901f8,
917 0x03f89810, 987 0xd7f1b0f9,
918 0xf40086b0, 988 0xd3f00144,
919 0xfe80421c, 989 0xb321f500,
920 0x3807f003, 990 0xe8ccec03,
991 0x00b4b003,
992 0xec120bf4,
993 0xf103e8ee,
994 0xf00144d7,
995 0x21f500d3,
996/* 0x01ee: ticks_from_ns_quit */
997 0xceb903b3,
998 0xfcb0fc02,
999/* 0x01f7: ticks_from_us */
1000 0xf900f8c0,
1001 0xf1b0f9c0,
1002 0xf00144d7,
1003 0x21f500d3,
1004 0xceb903b3,
1005 0x00b4b002,
1006 0xbd050bf4,
1007/* 0x0211: ticks_from_us_quit */
1008 0xfcb0fce4,
1009/* 0x0217: ticks_to_us */
1010 0xf100f8c0,
1011 0xf00144d7,
1012 0xedff00d3,
1013/* 0x0223: timer */
1014 0xf900f8ec,
1015 0xf480f990,
1016 0xf8981032,
1017 0x0086b003,
1018 0xbd531cf4,
1019 0x3807f084,
921 0xbd0008d0, 1020 0xbd0008d0,
922 0x0887f004, 1021 0x3487f004,
923 0xf00088cf, 1022 0x980088cf,
924 0x1bf40284, 1023 0x98bb9a09,
925 0x3487f020, 1024 0x00e9bb02,
926 0xb80088cf, 1025 0xf003fe80,
927 0x0bf406e0, 1026 0x88cf0887,
928 0x06e8b809, 1027 0x0284f000,
929/* 0x01eb: timer_reset */ 1028 0xf0201bf4,
930 0xf0191ef4, 1029 0x88cf3487,
931 0x0ed03407, 1030 0x06e0b800,
932 0x8004bd00, 1031 0xb8090bf4,
933/* 0x01f6: timer_enable */ 1032 0x1cf406e8,
934 0x87f09a0e, 1033/* 0x026d: timer_reset */
935 0x3807f001, 1034 0x3407f00e,
936 0xbd0008d0, 1035 0xbd000ed0,
937/* 0x0201: timer_done */ 1036 0x9a0e8004,
938 0x1031f404, 1037/* 0x0278: timer_enable */
939/* 0x0206: send_proc */ 1038 0xf00187f0,
940 0x80f900f8, 1039 0x08d03807,
941 0xe89890f9, 1040/* 0x0283: timer_done */
1041 0xf404bd00,
1042 0x80fc1031,
1043 0x00f890fc,
1044/* 0x028c: send_proc */
1045 0x90f980f9,
1046 0x9805e898,
1047 0x86f004e9,
1048 0x0689b804,
1049 0xc42a0bf4,
1050 0x88940398,
1051 0x1880b604,
1052 0x98008ebb,
1053 0x8a8000fa,
1054 0x018d8000,
1055 0x80028c80,
1056 0x90b6038b,
1057 0x0794f001,
1058 0xf404e980,
1059/* 0x02c6: send_done */
1060 0x90fc0231,
1061 0x00f880fc,
1062/* 0x02cc: find */
1063 0x87f080f9,
1064 0x0131f458,
1065/* 0x02d4: find_loop */
1066 0xb8008a98,
1067 0x0bf406ae,
1068 0x5880b610,
1069 0x026886b1,
1070 0xf4f01bf4,
1071/* 0x02ea: find_done */
1072 0x8eb90132,
1073 0xf880fc02,
1074/* 0x02f1: send */
1075 0xcc21f500,
1076 0x9701f402,
1077/* 0x02fa: recv */
1078 0x90f900f8,
1079 0xe89880f9,
942 0x04e99805, 1080 0x04e99805,
943 0xb80486f0, 1081 0xb80132f4,
944 0x0bf40689, 1082 0x0bf40689,
945 0x0398c42a, 1083 0x0389c43d,
946 0xb6048894, 1084 0xf00180b6,
947 0x8ebb1880, 1085 0xe8800784,
948 0x00fa9800, 1086 0x02ea9805,
949 0x80008a80, 1087 0x8ffef0f9,
950 0x8c80018d, 1088 0xb9f0f901,
951 0x038b8002, 1089 0x999402ef,
952 0xf00190b6, 1090 0x00e9bb04,
953 0xe9800794, 1091 0x9818e0b6,
954 0x0231f404,
955/* 0x0240: send_done */
956 0x80fc90fc,
957/* 0x0246: find */
958 0x80f900f8,
959 0xf45887f0,
960/* 0x024e: find_loop */
961 0x8a980131,
962 0x06aeb800,
963 0xb6100bf4,
964 0x86b15880,
965 0x1bf40268,
966 0x0132f4f0,
967/* 0x0264: find_done */
968 0xfc028eb9,
969/* 0x026b: send */
970 0xf500f880,
971 0xf4024621,
972 0x00f89701,
973/* 0x0274: recv */
974 0x9805e898,
975 0x32f404e9,
976 0x0689b801,
977 0xc43d0bf4,
978 0x80b60389,
979 0x0784f001,
980 0x9805e880,
981 0xf0f902ea,
982 0xf9018ffe,
983 0x02efb9f0,
984 0xbb049994,
985 0xe0b600e9,
986 0x03eb9818,
987 0x9802ec98,
988 0xee9801ed,
989 0xfca5f900,
990 0x00f8fef0,
991 0xfc0131f4,
992/* 0x02bd: recv_done */
993/* 0x02bf: init */
994 0xf100f8f0,
995 0xcf010817,
996 0x11e70011,
997 0x14b60109,
998 0x0014fe08,
999 0x00e017f1,
1000 0xf00013f0,
1001 0x01d01c07,
1002 0xf004bd00,
1003 0x07f0ff17,
1004 0x0001d014,
1005 0x17f004bd,
1006 0x0015f102,
1007 0x1007f008,
1008 0xbd0001d0,
1009 0xe617f104,
1010 0x0013f000,
1011 0xf40010fe,
1012 0x17f01031,
1013 0x3807f001,
1014 0xbd0001d0,
1015 0x58f7f004,
1016/* 0x0314: init_proc */
1017 0xb001f198,
1018 0x0bf40016,
1019 0xb615f9fa,
1020 0x0ef458f0,
1021/* 0x0325: host_send */
1022 0xb017f1f2,
1023 0x0011cf04,
1024 0x04a027f1,
1025 0xb80022cf,
1026 0x0bf40612,
1027 0x071ec42f,
1028 0xb704ee94,
1029 0x980270e0,
1030 0xec9803eb, 1092 0xec9803eb,
1031 0x01ed9802, 1093 0x01ed9802,
1032 0xf500ee98, 1094 0xf900ee98,
1033 0xb6026b21, 1095 0xfef0fca5,
1034 0x1ec40110, 1096 0x31f400f8,
1035 0xb007f10f, 1097/* 0x0347: recv_done */
1036 0x000ed004, 1098 0xfcf0fc01,
1037 0x0ef404bd, 1099 0xf890fc80,
1038/* 0x0365: host_send_done */ 1100/* 0x034d: init */
1039/* 0x0367: host_recv */ 1101 0x0817f100,
1040 0xf100f8c3, 1102 0x0011cf01,
1041 0xf14e4917, 1103 0x010911e7,
1042 0xb8525413, 1104 0xfe0814b6,
1043 0x0bf406e1, 1105 0x17f10014,
1044/* 0x0375: host_recv_wait */ 1106 0x13f000e0,
1045 0xcc17f1b3, 1107 0x1c07f000,
1046 0x0011cf04, 1108 0xbd0001d0,
1047 0x04c827f1, 1109 0xff17f004,
1048 0xf00022cf, 1110 0xd01407f0,
1049 0x12b80816, 1111 0x04bd0001,
1050 0xec0bf406, 1112 0xf10217f0,
1051 0xb60723c4, 1113 0xf0080015,
1052 0x30b70434, 1114 0x01d01007,
1053 0x3b8002f0, 1115 0xf104bd00,
1054 0x023c8003, 1116 0xf000f617,
1055 0x80013d80, 1117 0x10fe0013,
1056 0x20b6003e, 1118 0x1031f400,
1057 0x0f24f001, 1119 0xf00117f0,
1058 0x04c807f1, 1120 0x01d03807,
1059 0xbd0002d0, 1121 0xf004bd00,
1060 0x4027f004, 1122/* 0x03a2: init_proc */
1061 0xd00007f0, 1123 0xf19858f7,
1062 0x04bd0002, 1124 0x0016b001,
1063/* 0x03be: host_init */ 1125 0xf9fa0bf4,
1126 0x58f0b615,
1127/* 0x03b3: mulu32_32_64 */
1128 0xf9f20ef4,
1129 0xf920f910,
1130 0x9540f930,
1131 0xd29510e1,
1132 0xbdc4bd10,
1133 0xc0edffb4,
1134 0xb9301dff,
1135 0x34f10234,
1136 0x34b6ffff,
1137 0x1045b610,
1138 0xbb00c3bb,
1139 0xe2ff01b4,
1140 0x0234b930,
1141 0xffff34f1,
1142 0xb61034b6,
1143 0xc3bb1045,
1144 0x01b4bb00,
1145 0xbb3012ff,
1146 0x40fc00b3,
1147 0x20fc30fc,
1148 0x00f810fc,
1149/* 0x0404: host_send */
1150 0x04b017f1,
1151 0xf10011cf,
1152 0xcf04a027,
1153 0x12b80022,
1154 0x2f0bf406,
1155 0x94071ec4,
1156 0xe0b704ee,
1157 0xeb980270,
1158 0x02ec9803,
1159 0x9801ed98,
1160 0x21f500ee,
1161 0x10b602f1,
1162 0x0f1ec401,
1163 0x04b007f1,
1164 0xbd000ed0,
1165 0xc30ef404,
1166/* 0x0444: host_send_done */
1167/* 0x0446: host_recv */
1064 0x17f100f8, 1168 0x17f100f8,
1065 0x14b60080, 1169 0x13f14e49,
1066 0x7015f110, 1170 0xe1b85254,
1067 0xd007f102, 1171 0xb30bf406,
1068 0x0001d004, 1172/* 0x0454: host_recv_wait */
1069 0x17f104bd, 1173 0x04cc17f1,
1070 0x14b60080, 1174 0xf10011cf,
1071 0xf015f110, 1175 0xcf04c827,
1072 0xdc07f102, 1176 0x16f00022,
1073 0x0001d004, 1177 0x0612b808,
1074 0x17f004bd, 1178 0xc4ec0bf4,
1075 0xc407f101, 1179 0x34b60723,
1076 0x0001d004, 1180 0xf030b704,
1077 0x00f804bd, 1181 0x033b8002,
1078/* 0x03f4: memx_func_enter */ 1182 0x80023c80,
1183 0x3e80013d,
1184 0x0120b600,
1185 0xf10f24f0,
1186 0xd004c807,
1187 0x04bd0002,
1188 0xf04027f0,
1189 0x02d00007,
1190 0xf804bd00,
1191/* 0x049d: host_init */
1192 0x8017f100,
1193 0x1014b600,
1194 0x027015f1,
1195 0x04d007f1,
1196 0xbd0001d0,
1197 0x8017f104,
1198 0x1014b600,
1199 0x02f015f1,
1200 0x04dc07f1,
1201 0xbd0001d0,
1202 0x0117f004,
1203 0x04c407f1,
1204 0xbd0001d0,
1205/* 0x04d3: memx_func_enter */
1206 0xf100f804,
1207 0xf1162067,
1208 0xf1f55d77,
1209 0xb9ffff73,
1210 0x21f4026e,
1211 0x02d8b904,
1212 0xf90487fd,
1213 0xfc80f960,
1214 0xf4e0fcd0,
1215 0x77f13321,
1216 0x73f1fffe,
1217 0x6eb9ffff,
1218 0x0421f402,
1219 0xfd02d8b9,
1220 0x60f90487,
1221 0xd0fc80f9,
1222 0x21f4e0fc,
1223 0xf067f133,
1224 0x026eb926,
1225 0xb90421f4,
1226 0x87fd02d8,
1227 0xf960f904,
1228 0xfcd0fc80,
1229 0x3321f4e0,
1079 0xf10467f0, 1230 0xf10467f0,
1080 0xd007e007, 1231 0xd007e007,
1081 0x04bd0006, 1232 0x04bd0006,
1082/* 0x0400: memx_func_enter_wait */ 1233/* 0x053c: memx_func_enter_wait */
1083 0x07c067f1, 1234 0x07c067f1,
1084 0xf00066cf, 1235 0xf00066cf,
1085 0x0bf40464, 1236 0x0bf40464,
1086 0x001698f6, 1237 0x2c67f0f6,
1087 0xf80410b6, 1238 0x800066cf,
1088/* 0x0415: memx_func_leave */ 1239 0x00f8ee06,
1089 0x0467f000, 1240/* 0x0554: memx_func_leave */
1241 0xcf2c67f0,
1242 0x06800066,
1243 0x0467f0ef,
1090 0x07e407f1, 1244 0x07e407f1,
1091 0xbd0006d0, 1245 0xbd0006d0,
1092/* 0x0421: memx_func_leave_wait */ 1246/* 0x0569: memx_func_leave_wait */
1093 0xc067f104, 1247 0xc067f104,
1094 0x0066cf07, 1248 0x0066cf07,
1095 0xf40464f0, 1249 0xf40464f0,
1096 0x00f8f61b, 1250 0x67f1f61b,
1097/* 0x0430: memx_func_wr32 */ 1251 0x77f126f0,
1252 0x73f00001,
1253 0x026eb900,
1254 0xb90421f4,
1255 0x87fd02d8,
1256 0xf960f905,
1257 0xfcd0fc80,
1258 0x3321f4e0,
1259 0x162067f1,
1260 0xf4026eb9,
1261 0xd8b90421,
1262 0x0587fd02,
1263 0x80f960f9,
1264 0xe0fcd0fc,
1265 0xf13321f4,
1266 0xf00aa277,
1267 0x6eb90073,
1268 0x0421f402,
1269 0xfd02d8b9,
1270 0x60f90587,
1271 0xd0fc80f9,
1272 0x21f4e0fc,
1273/* 0x05d3: memx_func_wait_vblank */
1274 0xb600f833,
1275 0x00f80410,
1276/* 0x05d8: memx_func_wr32 */
1098 0x98001698, 1277 0x98001698,
1099 0x10b60115, 1278 0x10b60115,
1100 0xf960f908, 1279 0xf960f908,
@@ -1102,131 +1281,137 @@ uint32_t nvd0_pwr_code[] = {
1102 0x3321f4e0, 1281 0x3321f4e0,
1103 0xf40242b6, 1282 0xf40242b6,
1104 0x00f8e91b, 1283 0x00f8e91b,
1105/* 0x044c: memx_func_wait */ 1284/* 0x05f4: memx_func_wait */
1106 0xcf2c87f0, 1285 0xcf2c87f0,
1107 0x1e980088, 1286 0x1e980088,
1108 0x011d9800, 1287 0x011d9800,
1109 0x98021c98, 1288 0x98021c98,
1110 0x10b6031b, 1289 0x10b6031b,
1111 0x7e21f410, 1290 0x8621f410,
1112/* 0x0466: memx_func_delay */ 1291/* 0x060e: memx_func_delay */
1113 0x1e9800f8, 1292 0x1e9800f8,
1114 0x0410b600, 1293 0x0410b600,
1115 0xf86721f4, 1294 0xf86721f4,
1116/* 0x0471: memx_exec */ 1295/* 0x0619: memx_exec */
1117 0xf9e0f900, 1296 0xf9e0f900,
1118 0x02c1b9d0, 1297 0x02c1b9d0,
1119/* 0x047b: memx_exec_next */ 1298/* 0x0623: memx_exec_next */
1120 0x9802b2b9, 1299 0x9802b2b9,
1121 0x10b60013, 1300 0x10b60013,
1122 0x10349504, 1301 0xf034e704,
1302 0xe033e701,
1303 0x0132b601,
1123 0x980c30f0, 1304 0x980c30f0,
1124 0x55f9de35, 1305 0x55f9de35,
1125 0xf40612b8, 1306 0xf40612b8,
1126 0xd0fcec1e, 1307 0x0b98e41e,
1308 0xef0c98ee,
1309 0xf102cbbb,
1310 0xcf07c4b7,
1311 0xd0fc00bb,
1127 0x21f5e0fc, 1312 0x21f5e0fc,
1128 0x00f8026b, 1313 0x00f802f1,
1129/* 0x049c: memx_info */ 1314/* 0x065c: memx_info */
1130 0x03acc7f1, 1315 0x03c0c7f1,
1131 0x0800b7f1, 1316 0x0800b7f1,
1132 0x026b21f5, 1317 0x02f121f5,
1133/* 0x04aa: memx_recv */ 1318/* 0x066a: memx_recv */
1134 0xd6b000f8, 1319 0xd6b000f8,
1135 0xc40bf401, 1320 0xac0bf401,
1136 0xf400d6b0, 1321 0xf400d6b0,
1137 0x00f8e90b, 1322 0x00f8e90b,
1138/* 0x04b8: memx_init */ 1323/* 0x0678: memx_init */
1139/* 0x04ba: perf_recv */ 1324/* 0x067a: perf_recv */
1140 0x00f800f8, 1325 0x00f800f8,
1141/* 0x04bc: perf_init */ 1326/* 0x067c: perf_init */
1142/* 0x04be: i2c_drive_scl */ 1327/* 0x067e: i2c_drive_scl */
1143 0x36b000f8, 1328 0x36b000f8,
1144 0x0e0bf400, 1329 0x0e0bf400,
1145 0x07e007f1, 1330 0x07e007f1,
1146 0xbd0001d0, 1331 0xbd0001d0,
1147/* 0x04cf: i2c_drive_scl_lo */ 1332/* 0x068f: i2c_drive_scl_lo */
1148 0xf100f804, 1333 0xf100f804,
1149 0xd007e407, 1334 0xd007e407,
1150 0x04bd0001, 1335 0x04bd0001,
1151/* 0x04da: i2c_drive_sda */ 1336/* 0x069a: i2c_drive_sda */
1152 0x36b000f8, 1337 0x36b000f8,
1153 0x0e0bf400, 1338 0x0e0bf400,
1154 0x07e007f1, 1339 0x07e007f1,
1155 0xbd0002d0, 1340 0xbd0002d0,
1156/* 0x04eb: i2c_drive_sda_lo */ 1341/* 0x06ab: i2c_drive_sda_lo */
1157 0xf100f804, 1342 0xf100f804,
1158 0xd007e407, 1343 0xd007e407,
1159 0x04bd0002, 1344 0x04bd0002,
1160/* 0x04f6: i2c_sense_scl */ 1345/* 0x06b6: i2c_sense_scl */
1161 0x32f400f8, 1346 0x32f400f8,
1162 0xc437f101, 1347 0xc437f101,
1163 0x0033cf07, 1348 0x0033cf07,
1164 0xf40431fd, 1349 0xf40431fd,
1165 0x31f4060b, 1350 0x31f4060b,
1166/* 0x0509: i2c_sense_scl_done */ 1351/* 0x06c9: i2c_sense_scl_done */
1167/* 0x050b: i2c_sense_sda */ 1352/* 0x06cb: i2c_sense_sda */
1168 0xf400f801, 1353 0xf400f801,
1169 0x37f10132, 1354 0x37f10132,
1170 0x33cf07c4, 1355 0x33cf07c4,
1171 0x0432fd00, 1356 0x0432fd00,
1172 0xf4060bf4, 1357 0xf4060bf4,
1173/* 0x051e: i2c_sense_sda_done */ 1358/* 0x06de: i2c_sense_sda_done */
1174 0x00f80131, 1359 0x00f80131,
1175/* 0x0520: i2c_raise_scl */ 1360/* 0x06e0: i2c_raise_scl */
1176 0x47f140f9, 1361 0x47f140f9,
1177 0x37f00898, 1362 0x37f00898,
1178 0xbe21f501, 1363 0x7e21f501,
1179/* 0x052d: i2c_raise_scl_wait */ 1364/* 0x06ed: i2c_raise_scl_wait */
1180 0xe8e7f104, 1365 0xe8e7f106,
1181 0x6721f403, 1366 0x6721f403,
1182 0x04f621f5, 1367 0x06b621f5,
1183 0xb60901f4, 1368 0xb60901f4,
1184 0x1bf40142, 1369 0x1bf40142,
1185/* 0x0541: i2c_raise_scl_done */ 1370/* 0x0701: i2c_raise_scl_done */
1186 0xf840fcef, 1371 0xf840fcef,
1187/* 0x0545: i2c_start */ 1372/* 0x0705: i2c_start */
1188 0xf621f500, 1373 0xb621f500,
1189 0x0d11f404, 1374 0x0d11f406,
1190 0x050b21f5, 1375 0x06cb21f5,
1191 0xf40611f4, 1376 0xf40611f4,
1192/* 0x0556: i2c_start_rep */ 1377/* 0x0716: i2c_start_rep */
1193 0x37f0300e, 1378 0x37f0300e,
1194 0xbe21f500, 1379 0x7e21f500,
1195 0x0137f004, 1380 0x0137f006,
1196 0x04da21f5, 1381 0x069a21f5,
1197 0xb60076bb, 1382 0xb60076bb,
1198 0x50f90465, 1383 0x50f90465,
1199 0xbb046594, 1384 0xbb046594,
1200 0x50bd0256, 1385 0x50bd0256,
1201 0xfc0475fd, 1386 0xfc0475fd,
1202 0x2021f550, 1387 0xe021f550,
1203 0x0464b605, 1388 0x0464b606,
1204/* 0x0583: i2c_start_send */ 1389/* 0x0743: i2c_start_send */
1205 0xf01f11f4, 1390 0xf01f11f4,
1206 0x21f50037, 1391 0x21f50037,
1207 0xe7f104da, 1392 0xe7f1069a,
1208 0x21f41388, 1393 0x21f41388,
1209 0x0037f067, 1394 0x0037f067,
1210 0x04be21f5, 1395 0x067e21f5,
1211 0x1388e7f1, 1396 0x1388e7f1,
1212/* 0x059f: i2c_start_out */ 1397/* 0x075f: i2c_start_out */
1213 0xf86721f4, 1398 0xf86721f4,
1214/* 0x05a1: i2c_stop */ 1399/* 0x0761: i2c_stop */
1215 0x0037f000, 1400 0x0037f000,
1216 0x04be21f5, 1401 0x067e21f5,
1217 0xf50037f0, 1402 0xf50037f0,
1218 0xf104da21, 1403 0xf1069a21,
1219 0xf403e8e7, 1404 0xf403e8e7,
1220 0x37f06721, 1405 0x37f06721,
1221 0xbe21f501, 1406 0x7e21f501,
1222 0x88e7f104, 1407 0x88e7f106,
1223 0x6721f413, 1408 0x6721f413,
1224 0xf50137f0, 1409 0xf50137f0,
1225 0xf104da21, 1410 0xf1069a21,
1226 0xf41388e7, 1411 0xf41388e7,
1227 0x00f86721, 1412 0x00f86721,
1228/* 0x05d4: i2c_bitw */ 1413/* 0x0794: i2c_bitw */
1229 0x04da21f5, 1414 0x069a21f5,
1230 0x03e8e7f1, 1415 0x03e8e7f1,
1231 0xbb6721f4, 1416 0xbb6721f4,
1232 0x65b60076, 1417 0x65b60076,
@@ -1234,18 +1419,18 @@ uint32_t nvd0_pwr_code[] = {
1234 0x56bb0465, 1419 0x56bb0465,
1235 0xfd50bd02, 1420 0xfd50bd02,
1236 0x50fc0475, 1421 0x50fc0475,
1237 0x052021f5, 1422 0x06e021f5,
1238 0xf40464b6, 1423 0xf40464b6,
1239 0xe7f11811, 1424 0xe7f11811,
1240 0x21f41388, 1425 0x21f41388,
1241 0x0037f067, 1426 0x0037f067,
1242 0x04be21f5, 1427 0x067e21f5,
1243 0x1388e7f1, 1428 0x1388e7f1,
1244/* 0x0613: i2c_bitw_out */ 1429/* 0x07d3: i2c_bitw_out */
1245 0xf86721f4, 1430 0xf86721f4,
1246/* 0x0615: i2c_bitr */ 1431/* 0x07d5: i2c_bitr */
1247 0x0137f000, 1432 0x0137f000,
1248 0x04da21f5, 1433 0x069a21f5,
1249 0x03e8e7f1, 1434 0x03e8e7f1,
1250 0xbb6721f4, 1435 0xbb6721f4,
1251 0x65b60076, 1436 0x65b60076,
@@ -1253,19 +1438,19 @@ uint32_t nvd0_pwr_code[] = {
1253 0x56bb0465, 1438 0x56bb0465,
1254 0xfd50bd02, 1439 0xfd50bd02,
1255 0x50fc0475, 1440 0x50fc0475,
1256 0x052021f5, 1441 0x06e021f5,
1257 0xf40464b6, 1442 0xf40464b6,
1258 0x21f51b11, 1443 0x21f51b11,
1259 0x37f0050b, 1444 0x37f006cb,
1260 0xbe21f500, 1445 0x7e21f500,
1261 0x88e7f104, 1446 0x88e7f106,
1262 0x6721f413, 1447 0x6721f413,
1263 0xf4013cf0, 1448 0xf4013cf0,
1264/* 0x065a: i2c_bitr_done */ 1449/* 0x081a: i2c_bitr_done */
1265 0x00f80131, 1450 0x00f80131,
1266/* 0x065c: i2c_get_byte */ 1451/* 0x081c: i2c_get_byte */
1267 0xf00057f0, 1452 0xf00057f0,
1268/* 0x0662: i2c_get_byte_next */ 1453/* 0x0822: i2c_get_byte_next */
1269 0x54b60847, 1454 0x54b60847,
1270 0x0076bb01, 1455 0x0076bb01,
1271 0xf90465b6, 1456 0xf90465b6,
@@ -1273,7 +1458,7 @@ uint32_t nvd0_pwr_code[] = {
1273 0xbd0256bb, 1458 0xbd0256bb,
1274 0x0475fd50, 1459 0x0475fd50,
1275 0x21f550fc, 1460 0x21f550fc,
1276 0x64b60615, 1461 0x64b607d5,
1277 0x2b11f404, 1462 0x2b11f404,
1278 0xb60553fd, 1463 0xb60553fd,
1279 0x1bf40142, 1464 0x1bf40142,
@@ -1283,12 +1468,12 @@ uint32_t nvd0_pwr_code[] = {
1283 0xbb046594, 1468 0xbb046594,
1284 0x50bd0256, 1469 0x50bd0256,
1285 0xfc0475fd, 1470 0xfc0475fd,
1286 0xd421f550, 1471 0x9421f550,
1287 0x0464b605, 1472 0x0464b607,
1288/* 0x06ac: i2c_get_byte_done */ 1473/* 0x086c: i2c_get_byte_done */
1289/* 0x06ae: i2c_put_byte */ 1474/* 0x086e: i2c_put_byte */
1290 0x47f000f8, 1475 0x47f000f8,
1291/* 0x06b1: i2c_put_byte_next */ 1476/* 0x0871: i2c_put_byte_next */
1292 0x0142b608, 1477 0x0142b608,
1293 0xbb3854ff, 1478 0xbb3854ff,
1294 0x65b60076, 1479 0x65b60076,
@@ -1296,7 +1481,7 @@ uint32_t nvd0_pwr_code[] = {
1296 0x56bb0465, 1481 0x56bb0465,
1297 0xfd50bd02, 1482 0xfd50bd02,
1298 0x50fc0475, 1483 0x50fc0475,
1299 0x05d421f5, 1484 0x079421f5,
1300 0xf40464b6, 1485 0xf40464b6,
1301 0x46b03411, 1486 0x46b03411,
1302 0xd81bf400, 1487 0xd81bf400,
@@ -1305,21 +1490,21 @@ uint32_t nvd0_pwr_code[] = {
1305 0xbb046594, 1490 0xbb046594,
1306 0x50bd0256, 1491 0x50bd0256,
1307 0xfc0475fd, 1492 0xfc0475fd,
1308 0x1521f550, 1493 0xd521f550,
1309 0x0464b606, 1494 0x0464b607,
1310 0xbb0f11f4, 1495 0xbb0f11f4,
1311 0x36b00076, 1496 0x36b00076,
1312 0x061bf401, 1497 0x061bf401,
1313/* 0x0707: i2c_put_byte_done */ 1498/* 0x08c7: i2c_put_byte_done */
1314 0xf80132f4, 1499 0xf80132f4,
1315/* 0x0709: i2c_addr */ 1500/* 0x08c9: i2c_addr */
1316 0x0076bb00, 1501 0x0076bb00,
1317 0xf90465b6, 1502 0xf90465b6,
1318 0x04659450, 1503 0x04659450,
1319 0xbd0256bb, 1504 0xbd0256bb,
1320 0x0475fd50, 1505 0x0475fd50,
1321 0x21f550fc, 1506 0x21f550fc,
1322 0x64b60545, 1507 0x64b60705,
1323 0x2911f404, 1508 0x2911f404,
1324 0x012ec3e7, 1509 0x012ec3e7,
1325 0xfd0134b6, 1510 0xfd0134b6,
@@ -1329,30 +1514,30 @@ uint32_t nvd0_pwr_code[] = {
1329 0x0256bb04, 1514 0x0256bb04,
1330 0x75fd50bd, 1515 0x75fd50bd,
1331 0xf550fc04, 1516 0xf550fc04,
1332 0xb606ae21, 1517 0xb6086e21,
1333/* 0x074e: i2c_addr_done */ 1518/* 0x090e: i2c_addr_done */
1334 0x00f80464, 1519 0x00f80464,
1335/* 0x0750: i2c_acquire_addr */ 1520/* 0x0910: i2c_acquire_addr */
1336 0xb6f8cec7, 1521 0xb6f8cec7,
1337 0xe0b705e4, 1522 0xe0b705e4,
1338 0x00f8d014, 1523 0x00f8d014,
1339/* 0x075c: i2c_acquire */ 1524/* 0x091c: i2c_acquire */
1340 0x075021f5, 1525 0x091021f5,
1341 0xf00421f4, 1526 0xf00421f4,
1342 0x21f403d9, 1527 0x21f403d9,
1343/* 0x076b: i2c_release */ 1528/* 0x092b: i2c_release */
1344 0xf500f833, 1529 0xf500f833,
1345 0xf4075021, 1530 0xf4091021,
1346 0xdaf00421, 1531 0xdaf00421,
1347 0x3321f403, 1532 0x3321f403,
1348/* 0x077a: i2c_recv */ 1533/* 0x093a: i2c_recv */
1349 0x32f400f8, 1534 0x32f400f8,
1350 0xf8c1c701, 1535 0xf8c1c701,
1351 0xb00214b6, 1536 0xb00214b6,
1352 0x1ff52816, 1537 0x1ff52816,
1353 0x13a0013a, 1538 0x13a0013a,
1354 0x32980bd4, 1539 0x32980be8,
1355 0xac13a000, 1540 0xc013a000,
1356 0x0031980b, 1541 0x0031980b,
1357 0xf90231f4, 1542 0xf90231f4,
1358 0xf9e0f9d0, 1543 0xf9e0f9d0,
@@ -1364,8 +1549,8 @@ uint32_t nvd0_pwr_code[] = {
1364 0xbb046594, 1549 0xbb046594,
1365 0x50bd0256, 1550 0x50bd0256,
1366 0xfc0475fd, 1551 0xfc0475fd,
1367 0x5c21f550, 1552 0x1c21f550,
1368 0x0464b607, 1553 0x0464b609,
1369 0xd6b0d0fc, 1554 0xd6b0d0fc,
1370 0xb31bf500, 1555 0xb31bf500,
1371 0x0057f000, 1556 0x0057f000,
@@ -1374,8 +1559,8 @@ uint32_t nvd0_pwr_code[] = {
1374 0xbb046594, 1559 0xbb046594,
1375 0x50bd0256, 1560 0x50bd0256,
1376 0xfc0475fd, 1561 0xfc0475fd,
1377 0x0921f550, 1562 0xc921f550,
1378 0x0464b607, 1563 0x0464b608,
1379 0x00d011f5, 1564 0x00d011f5,
1380 0xbbe0c5c7, 1565 0xbbe0c5c7,
1381 0x65b60076, 1566 0x65b60076,
@@ -1383,7 +1568,7 @@ uint32_t nvd0_pwr_code[] = {
1383 0x56bb0465, 1568 0x56bb0465,
1384 0xfd50bd02, 1569 0xfd50bd02,
1385 0x50fc0475, 1570 0x50fc0475,
1386 0x06ae21f5, 1571 0x086e21f5,
1387 0xf50464b6, 1572 0xf50464b6,
1388 0xf000ad11, 1573 0xf000ad11,
1389 0x76bb0157, 1574 0x76bb0157,
@@ -1392,7 +1577,7 @@ uint32_t nvd0_pwr_code[] = {
1392 0x0256bb04, 1577 0x0256bb04,
1393 0x75fd50bd, 1578 0x75fd50bd,
1394 0xf550fc04, 1579 0xf550fc04,
1395 0xb6070921, 1580 0xb608c921,
1396 0x11f50464, 1581 0x11f50464,
1397 0x76bb008a, 1582 0x76bb008a,
1398 0x0465b600, 1583 0x0465b600,
@@ -1400,7 +1585,7 @@ uint32_t nvd0_pwr_code[] = {
1400 0x0256bb04, 1585 0x0256bb04,
1401 0x75fd50bd, 1586 0x75fd50bd,
1402 0xf550fc04, 1587 0xf550fc04,
1403 0xb6065c21, 1588 0xb6081c21,
1404 0x11f40464, 1589 0x11f40464,
1405 0xe05bcb6a, 1590 0xe05bcb6a,
1406 0xb60076bb, 1591 0xb60076bb,
@@ -1408,38 +1593,38 @@ uint32_t nvd0_pwr_code[] = {
1408 0xbb046594, 1593 0xbb046594,
1409 0x50bd0256, 1594 0x50bd0256,
1410 0xfc0475fd, 1595 0xfc0475fd,
1411 0xa121f550, 1596 0x6121f550,
1412 0x0464b605, 1597 0x0464b607,
1413 0xbd025bb9, 1598 0xbd025bb9,
1414 0x430ef474, 1599 0x430ef474,
1415/* 0x0880: i2c_recv_not_rd08 */ 1600/* 0x0a40: i2c_recv_not_rd08 */
1416 0xf401d6b0, 1601 0xf401d6b0,
1417 0x57f03d1b, 1602 0x57f03d1b,
1418 0x0921f500, 1603 0xc921f500,
1419 0x3311f407, 1604 0x3311f408,
1420 0xf5e0c5c7, 1605 0xf5e0c5c7,
1421 0xf406ae21, 1606 0xf4086e21,
1422 0x57f02911, 1607 0x57f02911,
1423 0x0921f500, 1608 0xc921f500,
1424 0x1f11f407, 1609 0x1f11f408,
1425 0xf5e0b5c7, 1610 0xf5e0b5c7,
1426 0xf406ae21, 1611 0xf4086e21,
1427 0x21f51511, 1612 0x21f51511,
1428 0x74bd05a1, 1613 0x74bd0761,
1429 0xf408c5c7, 1614 0xf408c5c7,
1430 0x32f4091b, 1615 0x32f4091b,
1431 0x030ef402, 1616 0x030ef402,
1432/* 0x08c0: i2c_recv_not_wr08 */ 1617/* 0x0a80: i2c_recv_not_wr08 */
1433/* 0x08c0: i2c_recv_done */ 1618/* 0x0a80: i2c_recv_done */
1434 0xf5f8cec7, 1619 0xf5f8cec7,
1435 0xfc076b21, 1620 0xfc092b21,
1436 0xf4d0fce0, 1621 0xf4d0fce0,
1437 0x7cb90a12, 1622 0x7cb90a12,
1438 0x6b21f502, 1623 0xf121f502,
1439/* 0x08d5: i2c_recv_exit */ 1624/* 0x0a95: i2c_recv_exit */
1440/* 0x08d7: i2c_init */ 1625/* 0x0a97: i2c_init */
1441 0xf800f802, 1626 0xf800f802,
1442/* 0x08d9: test_recv */ 1627/* 0x0a99: test_recv */
1443 0xd817f100, 1628 0xd817f100,
1444 0x0011cf05, 1629 0x0011cf05,
1445 0xf10110b6, 1630 0xf10110b6,
@@ -1447,29 +1632,29 @@ uint32_t nvd0_pwr_code[] = {
1447 0x04bd0001, 1632 0x04bd0001,
1448 0xd900e7f1, 1633 0xd900e7f1,
1449 0x134fe3f1, 1634 0x134fe3f1,
1450 0x01b621f5, 1635 0x022321f5,
1451/* 0x08fa: test_init */ 1636/* 0x0aba: test_init */
1452 0xe7f100f8, 1637 0xe7f100f8,
1453 0x21f50800, 1638 0x21f50800,
1454 0x00f801b6, 1639 0x00f80223,
1455/* 0x0904: idle_recv */ 1640/* 0x0ac4: idle_recv */
1456/* 0x0906: idle */ 1641/* 0x0ac6: idle */
1457 0x31f400f8, 1642 0x31f400f8,
1458 0xd417f100, 1643 0xd417f100,
1459 0x0011cf05, 1644 0x0011cf05,
1460 0xf10110b6, 1645 0xf10110b6,
1461 0xd005d407, 1646 0xd005d407,
1462 0x04bd0001, 1647 0x04bd0001,
1463/* 0x091c: idle_loop */ 1648/* 0x0adc: idle_loop */
1464 0xf45817f0, 1649 0xf45817f0,
1465/* 0x0922: idle_proc */ 1650/* 0x0ae2: idle_proc */
1466/* 0x0922: idle_proc_exec */ 1651/* 0x0ae2: idle_proc_exec */
1467 0x10f90232, 1652 0x10f90232,
1468 0xf5021eb9, 1653 0xf5021eb9,
1469 0xfc027421, 1654 0xfc02fa21,
1470 0x0911f410, 1655 0x0911f410,
1471 0xf40231f4, 1656 0xf40231f4,
1472/* 0x0936: idle_proc_next */ 1657/* 0x0af6: idle_proc_next */
1473 0x10b6ef0e, 1658 0x10b6ef0e,
1474 0x061fb858, 1659 0x061fb858,
1475 0xf4e61bf4, 1660 0xf4e61bf4,
@@ -1521,4 +1706,20 @@ uint32_t nvd0_pwr_code[] = {
1521 0x00000000, 1706 0x00000000,
1522 0x00000000, 1707 0x00000000,
1523 0x00000000, 1708 0x00000000,
1709 0x00000000,
1710 0x00000000,
1711 0x00000000,
1712 0x00000000,
1713 0x00000000,
1714 0x00000000,
1715 0x00000000,
1716 0x00000000,
1717 0x00000000,
1718 0x00000000,
1719 0x00000000,
1720 0x00000000,
1721 0x00000000,
1722 0x00000000,
1723 0x00000000,
1724 0x00000000,
1524}; 1725};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
index 574acfa44c8c..522e3079f824 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
@@ -19,11 +19,12 @@
19#define MEMX_MSG_EXEC 1 19#define MEMX_MSG_EXEC 1
20 20
21/* MEMX: script opcode definitions */ 21/* MEMX: script opcode definitions */
22#define MEMX_ENTER 0 22#define MEMX_ENTER 1
23#define MEMX_LEAVE 1 23#define MEMX_LEAVE 2
24#define MEMX_WR32 2 24#define MEMX_WR32 3
25#define MEMX_WAIT 3 25#define MEMX_WAIT 4
26#define MEMX_DELAY 4 26#define MEMX_DELAY 5
27#define MEMX_VBLANK 6
27 28
28/* I2C_: message identifiers */ 29/* I2C_: message identifiers */
29#define I2C__MSG_RD08 0 30#define I2C__MSG_RD08 0
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
index def6a9ac68cf..65eaa2546cad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
@@ -20,10 +20,11 @@ memx_out(struct nouveau_memx *memx)
20 struct nouveau_pwr *ppwr = memx->ppwr; 20 struct nouveau_pwr *ppwr = memx->ppwr;
21 int i; 21 int i;
22 22
23 if (memx->c.size) { 23 if (memx->c.mthd) {
24 nv_wr32(ppwr, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd); 24 nv_wr32(ppwr, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
25 for (i = 0; i < memx->c.size; i++) 25 for (i = 0; i < memx->c.size; i++)
26 nv_wr32(ppwr, 0x10a1c4, memx->c.data[i]); 26 nv_wr32(ppwr, 0x10a1c4, memx->c.data[i]);
27 memx->c.mthd = 0;
27 memx->c.size = 0; 28 memx->c.size = 0;
28 } 29 }
29} 30}
@@ -32,7 +33,7 @@ static void
32memx_cmd(struct nouveau_memx *memx, u32 mthd, u32 size, u32 data[]) 33memx_cmd(struct nouveau_memx *memx, u32 mthd, u32 size, u32 data[])
33{ 34{
34 if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) || 35 if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) ||
35 (memx->c.size && memx->c.mthd != mthd)) 36 (memx->c.mthd && memx->c.mthd != mthd))
36 memx_out(memx); 37 memx_out(memx);
37 memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0])); 38 memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0]));
38 memx->c.size += size; 39 memx->c.size += size;
@@ -62,8 +63,7 @@ nouveau_memx_init(struct nouveau_pwr *ppwr, struct nouveau_memx **pmemx)
62 nv_wr32(ppwr, 0x10a580, 0x00000003); 63 nv_wr32(ppwr, 0x10a580, 0x00000003);
63 } while (nv_rd32(ppwr, 0x10a580) != 0x00000003); 64 } while (nv_rd32(ppwr, 0x10a580) != 0x00000003);
64 nv_wr32(ppwr, 0x10a1c0, 0x01000000 | memx->base); 65 nv_wr32(ppwr, 0x10a1c0, 0x01000000 | memx->base);
65 nv_wr32(ppwr, 0x10a1c4, 0x00010000 | MEMX_ENTER); 66
66 nv_wr32(ppwr, 0x10a1c4, 0x00000000);
67 return 0; 67 return 0;
68} 68}
69 69
@@ -78,7 +78,6 @@ nouveau_memx_fini(struct nouveau_memx **pmemx, bool exec)
78 memx_out(memx); 78 memx_out(memx);
79 79
80 /* release data segment access */ 80 /* release data segment access */
81 nv_wr32(ppwr, 0x10a1c4, 0x00000000 | MEMX_LEAVE);
82 finish = nv_rd32(ppwr, 0x10a1c0) & 0x00ffffff; 81 finish = nv_rd32(ppwr, 0x10a1c0) & 0x00ffffff;
83 nv_wr32(ppwr, 0x10a580, 0x00000000); 82 nv_wr32(ppwr, 0x10a580, 0x00000000);
84 83
@@ -88,6 +87,8 @@ nouveau_memx_fini(struct nouveau_memx **pmemx, bool exec)
88 memx->base, finish); 87 memx->base, finish);
89 } 88 }
90 89
90 nv_debug(memx->ppwr, "Exec took %uns, PPWR_IN %08x\n",
91 reply[0], reply[1]);
91 kfree(memx); 92 kfree(memx);
92 return 0; 93 return 0;
93} 94}
@@ -117,4 +118,51 @@ nouveau_memx_nsec(struct nouveau_memx *memx, u32 nsec)
117 memx_out(memx); /* fuc can't handle multiple */ 118 memx_out(memx); /* fuc can't handle multiple */
118} 119}
119 120
121void
122nouveau_memx_wait_vblank(struct nouveau_memx *memx)
123{
124 struct nouveau_pwr *ppwr = memx->ppwr;
125 u32 heads, x, y, px = 0;
126 int i, head_sync;
127
128 if (nv_device(ppwr)->chipset < 0xd0) {
129 heads = nv_rd32(ppwr, 0x610050);
130 for (i = 0; i < 2; i++) {
131 /* Heuristic: sync to head with biggest resolution */
132 if (heads & (2 << (i << 3))) {
133 x = nv_rd32(ppwr, 0x610b40 + (0x540 * i));
134 y = (x & 0xffff0000) >> 16;
135 x &= 0x0000ffff;
136 if ((x * y) > px) {
137 px = (x * y);
138 head_sync = i;
139 }
140 }
141 }
142 }
143
144 if (px == 0) {
145 nv_debug(memx->ppwr, "WAIT VBLANK !NO ACTIVE HEAD\n");
146 return;
147 }
148
149 nv_debug(memx->ppwr, "WAIT VBLANK HEAD%d\n", head_sync);
150 memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
151 memx_out(memx); /* fuc can't handle multiple */
152}
153
154void
155nouveau_memx_block(struct nouveau_memx *memx)
156{
157 nv_debug(memx->ppwr, " HOST BLOCKED\n");
158 memx_cmd(memx, MEMX_ENTER, 0, NULL);
159}
160
161void
162nouveau_memx_unblock(struct nouveau_memx *memx)
163{
164 nv_debug(memx->ppwr, " HOST UNBLOCKED\n");
165 memx_cmd(memx, MEMX_LEAVE, 0, NULL);
166}
167
120#endif 168#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index 016990a8252c..3656d605168f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -31,6 +31,8 @@
31#include <subdev/gpio.h> 31#include <subdev/gpio.h>
32#include <subdev/timer.h> 32#include <subdev/timer.h>
33 33
34#include <subdev/bios/fan.h>
35
34static int 36static int
35nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target) 37nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
36{ 38{
@@ -275,8 +277,11 @@ nouveau_therm_fan_ctor(struct nouveau_therm *therm)
275 /* other random init... */ 277 /* other random init... */
276 nouveau_therm_fan_set_defaults(therm); 278 nouveau_therm_fan_set_defaults(therm);
277 nvbios_perf_fan_parse(bios, &priv->fan->perf); 279 nvbios_perf_fan_parse(bios, &priv->fan->perf);
278 if (nvbios_therm_fan_parse(bios, &priv->fan->bios)) 280 if (!nvbios_fan_parse(bios, &priv->fan->bios)) {
279 nv_error(therm, "parsing the thermal table failed\n"); 281 nv_debug(therm, "parsing the fan table failed\n");
282 if (nvbios_therm_fan_parse(bios, &priv->fan->bios))
283 nv_error(therm, "parsing both fan tables failed\n");
284 }
280 nouveau_therm_fan_safety_checks(therm); 285 nouveau_therm_fan_safety_checks(therm);
281 return 0; 286 return 0;
282} 287}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
index 9a5c07340263..c629d7f2a6a4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
@@ -25,6 +25,8 @@
25 25
26#include <core/option.h> 26#include <core/option.h>
27#include <subdev/gpio.h> 27#include <subdev/gpio.h>
28#include <subdev/bios.h>
29#include <subdev/bios/fan.h>
28 30
29#include "priv.h" 31#include "priv.h"
30 32
@@ -86,11 +88,15 @@ nouveau_fanpwm_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
86{ 88{
87 struct nouveau_device *device = nv_device(therm); 89 struct nouveau_device *device = nv_device(therm);
88 struct nouveau_therm_priv *tpriv = (void *)therm; 90 struct nouveau_therm_priv *tpriv = (void *)therm;
91 struct nouveau_bios *bios = nouveau_bios(therm);
89 struct nouveau_fanpwm_priv *priv; 92 struct nouveau_fanpwm_priv *priv;
93 struct nvbios_therm_fan fan;
90 u32 divs, duty; 94 u32 divs, duty;
91 95
96 nvbios_fan_parse(bios, &fan);
97
92 if (!nouveau_boolopt(device->cfgopt, "NvFanPWM", func->param) || 98 if (!nouveau_boolopt(device->cfgopt, "NvFanPWM", func->param) ||
93 !therm->pwm_ctrl || 99 !therm->pwm_ctrl || fan.type == NVBIOS_THERM_FAN_TOGGLE ||
94 therm->pwm_get(therm, func->line, &divs, &duty) == -ENODEV) 100 therm->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
95 return -ENODEV; 101 return -ENODEV;
96 102
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/therm/gm107.c
new file mode 100644
index 000000000000..668cf3322285
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/gm107.c
@@ -0,0 +1,93 @@
1/*
2 * Copyright 2014 Martin Peres
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include "priv.h"
26
27struct gm107_therm_priv {
28 struct nouveau_therm_priv base;
29};
30
31static int
32gm107_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
33{
34 /* nothing to do, it seems hardwired */
35 return 0;
36}
37
38static int
39gm107_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
40{
41 *divs = nv_rd32(therm, 0x10eb20) & 0x1fff;
42 *duty = nv_rd32(therm, 0x10eb24) & 0x1fff;
43 return 0;
44}
45
46static int
47gm107_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
48{
49 nv_mask(therm, 0x10eb10, 0x1fff, divs); /* keep the high bits */
50 nv_wr32(therm, 0x10eb14, duty | 0x80000000);
51 return 0;
52}
53
54static int
55gm107_fan_pwm_clock(struct nouveau_therm *therm, int line)
56{
57 return nv_device(therm)->crystal * 1000;
58}
59
60static int
61gm107_therm_ctor(struct nouveau_object *parent,
62 struct nouveau_object *engine,
63 struct nouveau_oclass *oclass, void *data, u32 size,
64 struct nouveau_object **pobject)
65{
66 struct gm107_therm_priv *priv;
67 int ret;
68
69 ret = nouveau_therm_create(parent, engine, oclass, &priv);
70 *pobject = nv_object(priv);
71 if (ret)
72 return ret;
73
74 priv->base.base.pwm_ctrl = gm107_fan_pwm_ctrl;
75 priv->base.base.pwm_get = gm107_fan_pwm_get;
76 priv->base.base.pwm_set = gm107_fan_pwm_set;
77 priv->base.base.pwm_clock = gm107_fan_pwm_clock;
78 priv->base.base.temp_get = nv84_temp_get;
79 priv->base.base.fan_sense = nva3_therm_fan_sense;
80 priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
81 return nouveau_therm_preinit(&priv->base.base);
82}
83
84struct nouveau_oclass
85gm107_therm_oclass = {
86 .handle = NV_SUBDEV(THERM, 0x117),
87 .ofuncs = &(struct nouveau_ofuncs) {
88 .ctor = gm107_therm_ctor,
89 .dtor = _nouveau_therm_dtor,
90 .init = nvd0_therm_init,
91 .fini = nv84_therm_fini,
92 },
93};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
index 1d15c52fad0c..14e2e09bfc24 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include "priv.h" 26#include "priv.h"
27#include <subdev/fuse.h>
27 28
28struct nv84_therm_priv { 29struct nv84_therm_priv {
29 struct nouveau_therm_priv base; 30 struct nouveau_therm_priv base;
@@ -32,7 +33,25 @@ struct nv84_therm_priv {
32int 33int
33nv84_temp_get(struct nouveau_therm *therm) 34nv84_temp_get(struct nouveau_therm *therm)
34{ 35{
35 return nv_rd32(therm, 0x20400); 36 struct nouveau_fuse *fuse = nouveau_fuse(therm);
37
38 if (nv_ro32(fuse, 0x1a8) == 1)
39 return nv_rd32(therm, 0x20400);
40 else
41 return -ENODEV;
42}
43
44void
45nv84_sensor_setup(struct nouveau_therm *therm)
46{
47 struct nouveau_fuse *fuse = nouveau_fuse(therm);
48
49 /* enable temperature reading for cards with insane defaults */
50 if (nv_ro32(fuse, 0x1a8) == 1) {
51 nv_mask(therm, 0x20008, 0x80008000, 0x80000000);
52 nv_mask(therm, 0x2000c, 0x80000003, 0x00000000);
53 mdelay(20); /* wait for the temperature to stabilize */
54 }
36} 55}
37 56
38static void 57static void
@@ -171,6 +190,21 @@ nv84_therm_intr(struct nouveau_subdev *subdev)
171} 190}
172 191
173static int 192static int
193nv84_therm_init(struct nouveau_object *object)
194{
195 struct nv84_therm_priv *priv = (void *)object;
196 int ret;
197
198 ret = nouveau_therm_init(&priv->base.base);
199 if (ret)
200 return ret;
201
202 nv84_sensor_setup(&priv->base.base);
203
204 return 0;
205}
206
207static int
174nv84_therm_ctor(struct nouveau_object *parent, 208nv84_therm_ctor(struct nouveau_object *parent,
175 struct nouveau_object *engine, 209 struct nouveau_object *engine,
176 struct nouveau_oclass *oclass, void *data, u32 size, 210 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -228,7 +262,7 @@ nv84_therm_oclass = {
228 .ofuncs = &(struct nouveau_ofuncs) { 262 .ofuncs = &(struct nouveau_ofuncs) {
229 .ctor = nv84_therm_ctor, 263 .ctor = nv84_therm_ctor,
230 .dtor = _nouveau_therm_dtor, 264 .dtor = _nouveau_therm_dtor,
231 .init = _nouveau_therm_init, 265 .init = nv84_therm_init,
232 .fini = nv84_therm_fini, 266 .fini = nv84_therm_fini,
233 }, 267 },
234}; 268};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
index 0478b2e3fb1d..7893357a7e9f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -51,6 +51,8 @@ nva3_therm_init(struct nouveau_object *object)
51 if (ret) 51 if (ret)
52 return ret; 52 return ret;
53 53
54 nv84_sensor_setup(&priv->base.base);
55
54 /* enable fan tach, count revolutions per-second */ 56 /* enable fan tach, count revolutions per-second */
55 nv_mask(priv, 0x00e720, 0x00000003, 0x00000002); 57 nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
56 if (tach->func != DCB_GPIO_UNUSED) { 58 if (tach->func != DCB_GPIO_UNUSED) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index bbf117be572f..b70f7cc649b8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -114,7 +114,7 @@ nvd0_fan_pwm_clock(struct nouveau_therm *therm, int line)
114 return nv_device(therm)->crystal * 1000 / 10; 114 return nv_device(therm)->crystal * 1000 / 10;
115} 115}
116 116
117static int 117int
118nvd0_therm_init(struct nouveau_object *object) 118nvd0_therm_init(struct nouveau_object *object)
119{ 119{
120 struct nvd0_therm_priv *priv = (void *)object; 120 struct nvd0_therm_priv *priv = (void *)object;
@@ -150,6 +150,8 @@ nvd0_therm_ctor(struct nouveau_object *parent,
150 if (ret) 150 if (ret)
151 return ret; 151 return ret;
152 152
153 nv84_sensor_setup(&priv->base.base);
154
153 priv->base.base.pwm_ctrl = nvd0_fan_pwm_ctrl; 155 priv->base.base.pwm_ctrl = nvd0_fan_pwm_ctrl;
154 priv->base.base.pwm_get = nvd0_fan_pwm_get; 156 priv->base.base.pwm_get = nvd0_fan_pwm_get;
155 priv->base.base.pwm_set = nvd0_fan_pwm_set; 157 priv->base.base.pwm_set = nvd0_fan_pwm_set;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 916fca5c7816..7dba8c281a0b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -145,10 +145,13 @@ int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
145int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32); 145int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
146int nv50_fan_pwm_clock(struct nouveau_therm *, int); 146int nv50_fan_pwm_clock(struct nouveau_therm *, int);
147int nv84_temp_get(struct nouveau_therm *therm); 147int nv84_temp_get(struct nouveau_therm *therm);
148void nv84_sensor_setup(struct nouveau_therm *therm);
148int nv84_therm_fini(struct nouveau_object *object, bool suspend); 149int nv84_therm_fini(struct nouveau_object *object, bool suspend);
149 150
150int nva3_therm_fan_sense(struct nouveau_therm *); 151int nva3_therm_fan_sense(struct nouveau_therm *);
151 152
153int nvd0_therm_init(struct nouveau_object *object);
154
152int nouveau_fanpwm_create(struct nouveau_therm *, struct dcb_gpio_func *); 155int nouveau_fanpwm_create(struct nouveau_therm *, struct dcb_gpio_func *);
153int nouveau_fantog_create(struct nouveau_therm *, struct dcb_gpio_func *); 156int nouveau_fantog_create(struct nouveau_therm *, struct dcb_gpio_func *);
154int nouveau_fannil_create(struct nouveau_therm *); 157int nouveau_fannil_create(struct nouveau_therm *);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 7dd680ff2f6f..f75a683bd47a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -296,7 +296,7 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
296 int ret; 296 int ret;
297 297
298 mutex_lock(&nv_subdev(vmm)->mutex); 298 mutex_lock(&nv_subdev(vmm)->mutex);
299 ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align, 299 ret = nouveau_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
300 &vma->node); 300 &vma->node);
301 if (unlikely(ret != 0)) { 301 if (unlikely(ret != 0)) {
302 mutex_unlock(&nv_subdev(vmm)->mutex); 302 mutex_unlock(&nv_subdev(vmm)->mutex);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index b90aa5c1f90a..fca6a1f9c20c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1127,7 +1127,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
1127 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); 1127 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
1128 1128
1129 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 1129 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
1130 0, 0x0000, NULL, &nv_crtc->cursor.nvbo); 1130 0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo);
1131 if (!ret) { 1131 if (!ret) {
1132 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); 1132 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
1133 if (!ret) { 1133 if (!ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index b36afcbbc83f..1e9056a8df94 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -97,7 +97,8 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
97 uint32_t src_w, uint32_t src_h) 97 uint32_t src_w, uint32_t src_h)
98{ 98{
99 struct nvif_device *dev = &nouveau_drm(plane->dev)->device; 99 struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
100 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; 100 struct nouveau_plane *nv_plane =
101 container_of(plane, struct nouveau_plane, base);
101 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 102 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
102 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 103 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
103 struct nouveau_bo *cur = nv_plane->cur; 104 struct nouveau_bo *cur = nv_plane->cur;
@@ -173,7 +174,8 @@ static int
173nv10_disable_plane(struct drm_plane *plane) 174nv10_disable_plane(struct drm_plane *plane)
174{ 175{
175 struct nvif_device *dev = &nouveau_drm(plane->dev)->device; 176 struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
176 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; 177 struct nouveau_plane *nv_plane =
178 container_of(plane, struct nouveau_plane, base);
177 179
178 nvif_wr32(dev, NV_PVIDEO_STOP, 1); 180 nvif_wr32(dev, NV_PVIDEO_STOP, 1);
179 if (nv_plane->cur) { 181 if (nv_plane->cur) {
@@ -224,7 +226,8 @@ nv_set_property(struct drm_plane *plane,
224 struct drm_property *property, 226 struct drm_property *property,
225 uint64_t value) 227 uint64_t value)
226{ 228{
227 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; 229 struct nouveau_plane *nv_plane =
230 container_of(plane, struct nouveau_plane, base);
228 231
229 if (property == nv_plane->props.colorkey) 232 if (property == nv_plane->props.colorkey)
230 nv_plane->colorkey = value; 233 nv_plane->colorkey = value;
@@ -344,7 +347,8 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
344 uint32_t src_w, uint32_t src_h) 347 uint32_t src_w, uint32_t src_h)
345{ 348{
346 struct nvif_device *dev = &nouveau_drm(plane->dev)->device; 349 struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
347 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; 350 struct nouveau_plane *nv_plane =
351 container_of(plane, struct nouveau_plane, base);
348 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 352 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
349 struct nouveau_bo *cur = nv_plane->cur; 353 struct nouveau_bo *cur = nv_plane->cur;
350 uint32_t overlay = 1; 354 uint32_t overlay = 1;
@@ -423,7 +427,8 @@ static int
423nv04_disable_plane(struct drm_plane *plane) 427nv04_disable_plane(struct drm_plane *plane)
424{ 428{
425 struct nvif_device *dev = &nouveau_drm(plane->dev)->device; 429 struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
426 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; 430 struct nouveau_plane *nv_plane =
431 container_of(plane, struct nouveau_plane, base);
427 432
428 nvif_mask(dev, NV_PVIDEO_OVERLAY, 1, 0); 433 nvif_mask(dev, NV_PVIDEO_OVERLAY, 1, 0);
429 nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0); 434 nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 615714c1727d..a24faa5e2a2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -448,7 +448,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
448 list_add(&ntfy->head, &chan->notifiers); 448 list_add(&ntfy->head, &chan->notifiers);
449 ntfy->handle = info->handle; 449 ntfy->handle = info->handle;
450 450
451 ret = nouveau_mm_head(&chan->heap, 1, info->size, info->size, 1, 451 ret = nouveau_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
452 &ntfy->node); 452 &ntfy->node);
453 if (ret) 453 if (ret)
454 goto done; 454 goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 01da508625f2..3d474ac03f88 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -88,13 +88,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i)
88 88
89static void 89static void
90nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, 90nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
91 struct nouveau_fence *fence) 91 struct fence *fence)
92{ 92{
93 struct nouveau_drm *drm = nouveau_drm(dev); 93 struct nouveau_drm *drm = nouveau_drm(dev);
94 94
95 if (tile) { 95 if (tile) {
96 spin_lock(&drm->tile.lock); 96 spin_lock(&drm->tile.lock);
97 tile->fence = nouveau_fence_ref(fence); 97 tile->fence = (struct nouveau_fence *)fence_get(fence);
98 tile->used = false; 98 tile->used = false;
99 spin_unlock(&drm->tile.lock); 99 spin_unlock(&drm->tile.lock);
100 } 100 }
@@ -181,7 +181,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
181int 181int
182nouveau_bo_new(struct drm_device *dev, int size, int align, 182nouveau_bo_new(struct drm_device *dev, int size, int align,
183 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, 183 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
184 struct sg_table *sg, 184 struct sg_table *sg, struct reservation_object *robj,
185 struct nouveau_bo **pnvbo) 185 struct nouveau_bo **pnvbo)
186{ 186{
187 struct nouveau_drm *drm = nouveau_drm(dev); 187 struct nouveau_drm *drm = nouveau_drm(dev);
@@ -230,7 +230,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
230 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, 230 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
231 type, &nvbo->placement, 231 type, &nvbo->placement,
232 align >> PAGE_SHIFT, false, NULL, acc_size, sg, 232 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
233 nouveau_bo_del_ttm); 233 robj, nouveau_bo_del_ttm);
234 if (ret) { 234 if (ret) {
235 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 235 /* ttm will call nouveau_bo_del_ttm if it fails.. */
236 return ret; 236 return ret;
@@ -241,16 +241,16 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
241} 241}
242 242
243static void 243static void
244set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) 244set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
245{ 245{
246 *n = 0; 246 *n = 0;
247 247
248 if (type & TTM_PL_FLAG_VRAM) 248 if (type & TTM_PL_FLAG_VRAM)
249 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; 249 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
250 if (type & TTM_PL_FLAG_TT) 250 if (type & TTM_PL_FLAG_TT)
251 pl[(*n)++] = TTM_PL_FLAG_TT | flags; 251 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
252 if (type & TTM_PL_FLAG_SYSTEM) 252 if (type & TTM_PL_FLAG_SYSTEM)
253 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; 253 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
254} 254}
255 255
256static void 256static void
@@ -258,6 +258,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
258{ 258{
259 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 259 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
260 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT; 260 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
261 unsigned i, fpfn, lpfn;
261 262
262 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && 263 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
263 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 264 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
@@ -269,11 +270,19 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
269 * at the same time. 270 * at the same time.
270 */ 271 */
271 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { 272 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
272 nvbo->placement.fpfn = vram_pages / 2; 273 fpfn = vram_pages / 2;
273 nvbo->placement.lpfn = ~0; 274 lpfn = ~0;
274 } else { 275 } else {
275 nvbo->placement.fpfn = 0; 276 fpfn = 0;
276 nvbo->placement.lpfn = vram_pages / 2; 277 lpfn = vram_pages / 2;
278 }
279 for (i = 0; i < nvbo->placement.num_placement; ++i) {
280 nvbo->placements[i].fpfn = fpfn;
281 nvbo->placements[i].lpfn = lpfn;
282 }
283 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
284 nvbo->busy_placements[i].fpfn = fpfn;
285 nvbo->busy_placements[i].lpfn = lpfn;
277 } 286 }
278 } 287 }
279} 288}
@@ -961,13 +970,14 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
961 } 970 }
962 971
963 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); 972 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
964 ret = nouveau_fence_sync(bo->sync_obj, chan); 973 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
965 if (ret == 0) { 974 if (ret == 0) {
966 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); 975 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
967 if (ret == 0) { 976 if (ret == 0) {
968 ret = nouveau_fence_new(chan, false, &fence); 977 ret = nouveau_fence_new(chan, false, &fence);
969 if (ret == 0) { 978 if (ret == 0) {
970 ret = ttm_bo_move_accel_cleanup(bo, fence, 979 ret = ttm_bo_move_accel_cleanup(bo,
980 &fence->base,
971 evict, 981 evict,
972 no_wait_gpu, 982 no_wait_gpu,
973 new_mem); 983 new_mem);
@@ -1041,12 +1051,15 @@ static int
1041nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 1051nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1042 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 1052 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1043{ 1053{
1044 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 1054 struct ttm_place placement_memtype = {
1055 .fpfn = 0,
1056 .lpfn = 0,
1057 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1058 };
1045 struct ttm_placement placement; 1059 struct ttm_placement placement;
1046 struct ttm_mem_reg tmp_mem; 1060 struct ttm_mem_reg tmp_mem;
1047 int ret; 1061 int ret;
1048 1062
1049 placement.fpfn = placement.lpfn = 0;
1050 placement.num_placement = placement.num_busy_placement = 1; 1063 placement.num_placement = placement.num_busy_placement = 1;
1051 placement.placement = placement.busy_placement = &placement_memtype; 1064 placement.placement = placement.busy_placement = &placement_memtype;
1052 1065
@@ -1074,12 +1087,15 @@ static int
1074nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, 1087nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1075 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 1088 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1076{ 1089{
1077 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 1090 struct ttm_place placement_memtype = {
1091 .fpfn = 0,
1092 .lpfn = 0,
1093 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1094 };
1078 struct ttm_placement placement; 1095 struct ttm_placement placement;
1079 struct ttm_mem_reg tmp_mem; 1096 struct ttm_mem_reg tmp_mem;
1080 int ret; 1097 int ret;
1081 1098
1082 placement.fpfn = placement.lpfn = 0;
1083 placement.num_placement = placement.num_busy_placement = 1; 1099 placement.num_placement = placement.num_busy_placement = 1;
1084 placement.placement = placement.busy_placement = &placement_memtype; 1100 placement.placement = placement.busy_placement = &placement_memtype;
1085 1101
@@ -1152,8 +1168,9 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1152{ 1168{
1153 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1169 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1154 struct drm_device *dev = drm->dev; 1170 struct drm_device *dev = drm->dev;
1171 struct fence *fence = reservation_object_get_excl(bo->resv);
1155 1172
1156 nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj); 1173 nv10_bo_put_tile_region(dev, *old_tile, fence);
1157 *old_tile = new_tile; 1174 *old_tile = new_tile;
1158} 1175}
1159 1176
@@ -1197,9 +1214,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1197 } 1214 }
1198 1215
1199 /* Fallback to software copy. */ 1216 /* Fallback to software copy. */
1200 spin_lock(&bo->bdev->fence_lock);
1201 ret = ttm_bo_wait(bo, true, intr, no_wait_gpu); 1217 ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
1202 spin_unlock(&bo->bdev->fence_lock);
1203 if (ret == 0) 1218 if (ret == 0)
1204 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 1219 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1205 1220
@@ -1294,7 +1309,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1294 struct nouveau_bo *nvbo = nouveau_bo(bo); 1309 struct nouveau_bo *nvbo = nouveau_bo(bo);
1295 struct nvif_device *device = &drm->device; 1310 struct nvif_device *device = &drm->device;
1296 u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT; 1311 u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT;
1297 int ret; 1312 int i, ret;
1298 1313
1299 /* as long as the bo isn't in vram, and isn't tiled, we've got 1314 /* as long as the bo isn't in vram, and isn't tiled, we've got
1300 * nothing to do here. 1315 * nothing to do here.
@@ -1319,9 +1334,16 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1319 bo->mem.start + bo->mem.num_pages < mappable) 1334 bo->mem.start + bo->mem.num_pages < mappable)
1320 return 0; 1335 return 0;
1321 1336
1337 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1338 nvbo->placements[i].fpfn = 0;
1339 nvbo->placements[i].lpfn = mappable;
1340 }
1341
1342 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1343 nvbo->busy_placements[i].fpfn = 0;
1344 nvbo->busy_placements[i].lpfn = mappable;
1345 }
1322 1346
1323 nvbo->placement.fpfn = 0;
1324 nvbo->placement.lpfn = mappable;
1325 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); 1347 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1326 return nouveau_bo_validate(nvbo, false, false); 1348 return nouveau_bo_validate(nvbo, false, false);
1327} 1349}
@@ -1436,47 +1458,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1436} 1458}
1437 1459
1438void 1460void
1439nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) 1461nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1440{
1441 struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
1442 struct nouveau_fence *old_fence = NULL;
1443
1444 spin_lock(&nvbo->bo.bdev->fence_lock);
1445 old_fence = nvbo->bo.sync_obj;
1446 nvbo->bo.sync_obj = new_fence;
1447 spin_unlock(&nvbo->bo.bdev->fence_lock);
1448
1449 nouveau_fence_unref(&old_fence);
1450}
1451
1452static void
1453nouveau_bo_fence_unref(void **sync_obj)
1454{
1455 nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1456}
1457
1458static void *
1459nouveau_bo_fence_ref(void *sync_obj)
1460{ 1462{
1461 return nouveau_fence_ref(sync_obj); 1463 struct reservation_object *resv = nvbo->bo.resv;
1462}
1463 1464
1464static bool 1465 if (exclusive)
1465nouveau_bo_fence_signalled(void *sync_obj) 1466 reservation_object_add_excl_fence(resv, &fence->base);
1466{ 1467 else if (fence)
1467 return nouveau_fence_done(sync_obj); 1468 reservation_object_add_shared_fence(resv, &fence->base);
1468}
1469
1470static int
1471nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
1472{
1473 return nouveau_fence_wait(sync_obj, lazy, intr);
1474}
1475
1476static int
1477nouveau_bo_fence_flush(void *sync_obj)
1478{
1479 return 0;
1480} 1469}
1481 1470
1482struct ttm_bo_driver nouveau_bo_driver = { 1471struct ttm_bo_driver nouveau_bo_driver = {
@@ -1489,11 +1478,6 @@ struct ttm_bo_driver nouveau_bo_driver = {
1489 .move_notify = nouveau_bo_move_ntfy, 1478 .move_notify = nouveau_bo_move_ntfy,
1490 .move = nouveau_bo_move, 1479 .move = nouveau_bo_move,
1491 .verify_access = nouveau_bo_verify_access, 1480 .verify_access = nouveau_bo_verify_access,
1492 .sync_obj_signaled = nouveau_bo_fence_signalled,
1493 .sync_obj_wait = nouveau_bo_fence_wait,
1494 .sync_obj_flush = nouveau_bo_fence_flush,
1495 .sync_obj_unref = nouveau_bo_fence_unref,
1496 .sync_obj_ref = nouveau_bo_fence_ref,
1497 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, 1481 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1498 .io_mem_reserve = &nouveau_ttm_io_mem_reserve, 1482 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1499 .io_mem_free = &nouveau_ttm_io_mem_free, 1483 .io_mem_free = &nouveau_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index ff17c1f432fc..22d2c764d80b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -1,6 +1,8 @@
1#ifndef __NOUVEAU_BO_H__ 1#ifndef __NOUVEAU_BO_H__
2#define __NOUVEAU_BO_H__ 2#define __NOUVEAU_BO_H__
3 3
4#include <drm/drm_gem.h>
5
4struct nouveau_channel; 6struct nouveau_channel;
5struct nouveau_fence; 7struct nouveau_fence;
6struct nouveau_vma; 8struct nouveau_vma;
@@ -9,8 +11,8 @@ struct nouveau_bo {
9 struct ttm_buffer_object bo; 11 struct ttm_buffer_object bo;
10 struct ttm_placement placement; 12 struct ttm_placement placement;
11 u32 valid_domains; 13 u32 valid_domains;
12 u32 placements[3]; 14 struct ttm_place placements[3];
13 u32 busy_placements[3]; 15 struct ttm_place busy_placements[3];
14 struct ttm_bo_kmap_obj kmap; 16 struct ttm_bo_kmap_obj kmap;
15 struct list_head head; 17 struct list_head head;
16 18
@@ -68,6 +70,7 @@ extern struct ttm_bo_driver nouveau_bo_driver;
68void nouveau_bo_move_init(struct nouveau_drm *); 70void nouveau_bo_move_init(struct nouveau_drm *);
69int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags, 71int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
70 u32 tile_mode, u32 tile_flags, struct sg_table *sg, 72 u32 tile_mode, u32 tile_flags, struct sg_table *sg,
73 struct reservation_object *robj,
71 struct nouveau_bo **); 74 struct nouveau_bo **);
72int nouveau_bo_pin(struct nouveau_bo *, u32 flags); 75int nouveau_bo_pin(struct nouveau_bo *, u32 flags);
73int nouveau_bo_unpin(struct nouveau_bo *); 76int nouveau_bo_unpin(struct nouveau_bo *);
@@ -78,7 +81,7 @@ u16 nouveau_bo_rd16(struct nouveau_bo *, unsigned index);
78void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val); 81void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
79u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index); 82u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
80void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val); 83void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
81void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); 84void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
82int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, 85int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
83 bool no_wait_gpu); 86 bool no_wait_gpu);
84 87
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 3440fc999f2f..589dbb582da2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -36,7 +36,7 @@
36#include "nouveau_abi16.h" 36#include "nouveau_abi16.h"
37 37
38MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM"); 38MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
39static int nouveau_vram_pushbuf; 39int nouveau_vram_pushbuf;
40module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); 40module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
41 41
42int 42int
@@ -106,7 +106,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
106 if (nouveau_vram_pushbuf) 106 if (nouveau_vram_pushbuf)
107 target = TTM_PL_FLAG_VRAM; 107 target = TTM_PL_FLAG_VRAM;
108 108
109 ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, 109 ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL,
110 &chan->push.buffer); 110 &chan->push.buffer);
111 if (ret == 0) { 111 if (ret == 0) {
112 ret = nouveau_bo_pin(chan->push.buffer, target); 112 ret = nouveau_bo_pin(chan->push.buffer, target);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 20163709d608..8309c24ee698 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -47,4 +47,6 @@ int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *,
47void nouveau_channel_del(struct nouveau_channel **); 47void nouveau_channel_del(struct nouveau_channel **);
48int nouveau_channel_idle(struct nouveau_channel *); 48int nouveau_channel_idle(struct nouveau_channel *);
49 49
50extern int nouveau_vram_pushbuf;
51
50#endif 52#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1ec44c83e919..c8ac9482cf2e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -45,15 +45,15 @@
45#include <nvif/event.h> 45#include <nvif/event.h>
46 46
47MODULE_PARM_DESC(tv_disable, "Disable TV-out detection"); 47MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
48static int nouveau_tv_disable = 0; 48int nouveau_tv_disable = 0;
49module_param_named(tv_disable, nouveau_tv_disable, int, 0400); 49module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
50 50
51MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status"); 51MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
52static int nouveau_ignorelid = 0; 52int nouveau_ignorelid = 0;
53module_param_named(ignorelid, nouveau_ignorelid, int, 0400); 53module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
54 54
55MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (default: enabled)"); 55MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (default: enabled)");
56static int nouveau_duallink = 1; 56int nouveau_duallink = 1;
57module_param_named(duallink, nouveau_duallink, int, 0400); 57module_param_named(duallink, nouveau_duallink, int, 0400);
58 58
59struct nouveau_encoder * 59struct nouveau_encoder *
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 68029d041dd2..629a380c7085 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -105,4 +105,8 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
105struct drm_connector * 105struct drm_connector *
106nouveau_connector_create(struct drm_device *, int index); 106nouveau_connector_create(struct drm_device *, int index);
107 107
108extern int nouveau_tv_disable;
109extern int nouveau_ignorelid;
110extern int nouveau_duallink;
111
108#endif /* __NOUVEAU_CONNECTOR_H__ */ 112#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 4a21b2b06ce2..a88e6927f571 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -126,7 +126,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
126 if (etime) *etime = ns_to_ktime(args.scan.time[1]); 126 if (etime) *etime = ns_to_ktime(args.scan.time[1]);
127 127
128 if (*vpos < 0) 128 if (*vpos < 0)
129 ret |= DRM_SCANOUTPOS_INVBL; 129 ret |= DRM_SCANOUTPOS_IN_VBLANK;
130 return ret; 130 return ret;
131} 131}
132 132
@@ -657,7 +657,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
657 spin_unlock_irqrestore(&dev->event_lock, flags); 657 spin_unlock_irqrestore(&dev->event_lock, flags);
658 658
659 /* Synchronize with the old framebuffer */ 659 /* Synchronize with the old framebuffer */
660 ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan); 660 ret = nouveau_fence_sync(old_bo, chan, false, false);
661 if (ret) 661 if (ret)
662 goto fail; 662 goto fail;
663 663
@@ -716,19 +716,24 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
716 } 716 }
717 717
718 mutex_lock(&cli->mutex); 718 mutex_lock(&cli->mutex);
719 719 ret = ttm_bo_reserve(&new_bo->bo, true, false, false, NULL);
720 /* synchronise rendering channel with the kernel's channel */
721 spin_lock(&new_bo->bo.bdev->fence_lock);
722 fence = nouveau_fence_ref(new_bo->bo.sync_obj);
723 spin_unlock(&new_bo->bo.bdev->fence_lock);
724 ret = nouveau_fence_sync(fence, chan);
725 nouveau_fence_unref(&fence);
726 if (ret) 720 if (ret)
727 goto fail_unpin; 721 goto fail_unpin;
728 722
729 ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL); 723 /* synchronise rendering channel with the kernel's channel */
730 if (ret) 724 ret = nouveau_fence_sync(new_bo, chan, false, true);
725 if (ret) {
726 ttm_bo_unreserve(&new_bo->bo);
731 goto fail_unpin; 727 goto fail_unpin;
728 }
729
730 if (new_bo != old_bo) {
731 ttm_bo_unreserve(&new_bo->bo);
732
733 ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
734 if (ret)
735 goto fail_unpin;
736 }
732 737
733 /* Initialize a page flip struct */ 738 /* Initialize a page flip struct */
734 *s = (struct nouveau_page_flip_state) 739 *s = (struct nouveau_page_flip_state)
@@ -774,7 +779,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
774 /* Update the crtc struct and cleanup */ 779 /* Update the crtc struct and cleanup */
775 crtc->primary->fb = fb; 780 crtc->primary->fb = fb;
776 781
777 nouveau_bo_fence(old_bo, fence); 782 nouveau_bo_fence(old_bo, fence, false);
778 ttm_bo_unreserve(&old_bo->bo); 783 ttm_bo_unreserve(&old_bo->bo);
779 if (old_bo != new_bo) 784 if (old_bo != new_bo)
780 nouveau_bo_unpin(old_bo); 785 nouveau_bo_unpin(old_bo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 3ed32dd90303..57238076049f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -51,6 +51,7 @@
51#include "nouveau_fence.h" 51#include "nouveau_fence.h"
52#include "nouveau_debugfs.h" 52#include "nouveau_debugfs.h"
53#include "nouveau_usif.h" 53#include "nouveau_usif.h"
54#include "nouveau_connector.h"
54 55
55MODULE_PARM_DESC(config, "option string to pass to driver core"); 56MODULE_PARM_DESC(config, "option string to pass to driver core");
56static char *nouveau_config; 57static char *nouveau_config;
@@ -73,7 +74,9 @@ MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1
73int nouveau_runtime_pm = -1; 74int nouveau_runtime_pm = -1;
74module_param_named(runpm, nouveau_runtime_pm, int, 0400); 75module_param_named(runpm, nouveau_runtime_pm, int, 0400);
75 76
76static struct drm_driver driver; 77static struct drm_driver driver_stub;
78static struct drm_driver driver_pci;
79static struct drm_driver driver_platform;
77 80
78static u64 81static u64
79nouveau_pci_name(struct pci_dev *pdev) 82nouveau_pci_name(struct pci_dev *pdev)
@@ -322,7 +325,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
322 325
323 pci_set_master(pdev); 326 pci_set_master(pdev);
324 327
325 ret = drm_get_pci_dev(pdev, pent, &driver); 328 ret = drm_get_pci_dev(pdev, pent, &driver_pci);
326 if (ret) { 329 if (ret) {
327 nouveau_object_ref(NULL, (struct nouveau_object **)&device); 330 nouveau_object_ref(NULL, (struct nouveau_object **)&device);
328 return ret; 331 return ret;
@@ -831,7 +834,7 @@ nouveau_driver_fops = {
831}; 834};
832 835
833static struct drm_driver 836static struct drm_driver
834driver = { 837driver_stub = {
835 .driver_features = 838 .driver_features =
836 DRIVER_USE_AGP | 839 DRIVER_USE_AGP |
837 DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER, 840 DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
@@ -1002,6 +1005,23 @@ static int nouveau_pmops_runtime_idle(struct device *dev)
1002 return 1; 1005 return 1;
1003} 1006}
1004 1007
1008static void nouveau_display_options(void)
1009{
1010 DRM_DEBUG_DRIVER("Loading Nouveau with parameters:\n");
1011
1012 DRM_DEBUG_DRIVER("... tv_disable : %d\n", nouveau_tv_disable);
1013 DRM_DEBUG_DRIVER("... ignorelid : %d\n", nouveau_ignorelid);
1014 DRM_DEBUG_DRIVER("... duallink : %d\n", nouveau_duallink);
1015 DRM_DEBUG_DRIVER("... nofbaccel : %d\n", nouveau_nofbaccel);
1016 DRM_DEBUG_DRIVER("... config : %s\n", nouveau_config);
1017 DRM_DEBUG_DRIVER("... debug : %s\n", nouveau_debug);
1018 DRM_DEBUG_DRIVER("... noaccel : %d\n", nouveau_noaccel);
1019 DRM_DEBUG_DRIVER("... modeset : %d\n", nouveau_modeset);
1020 DRM_DEBUG_DRIVER("... runpm : %d\n", nouveau_runtime_pm);
1021 DRM_DEBUG_DRIVER("... vram_pushbuf : %d\n", nouveau_vram_pushbuf);
1022 DRM_DEBUG_DRIVER("... pstate : %d\n", nouveau_pstate);
1023}
1024
1005static const struct dev_pm_ops nouveau_pm_ops = { 1025static const struct dev_pm_ops nouveau_pm_ops = {
1006 .suspend = nouveau_pmops_suspend, 1026 .suspend = nouveau_pmops_suspend,
1007 .resume = nouveau_pmops_resume, 1027 .resume = nouveau_pmops_resume,
@@ -1037,7 +1057,7 @@ nouveau_platform_device_create_(struct platform_device *pdev, int size,
1037 if (err) 1057 if (err)
1038 return ERR_PTR(err); 1058 return ERR_PTR(err);
1039 1059
1040 drm = drm_dev_alloc(&driver, &pdev->dev); 1060 drm = drm_dev_alloc(&driver_platform, &pdev->dev);
1041 if (!drm) { 1061 if (!drm) {
1042 err = -ENOMEM; 1062 err = -ENOMEM;
1043 goto err_free; 1063 goto err_free;
@@ -1062,6 +1082,13 @@ EXPORT_SYMBOL(nouveau_platform_device_create_);
1062static int __init 1082static int __init
1063nouveau_drm_init(void) 1083nouveau_drm_init(void)
1064{ 1084{
1085 driver_pci = driver_stub;
1086 driver_pci.set_busid = drm_pci_set_busid;
1087 driver_platform = driver_stub;
1088 driver_platform.set_busid = drm_platform_set_busid;
1089
1090 nouveau_display_options();
1091
1065 if (nouveau_modeset == -1) { 1092 if (nouveau_modeset == -1) {
1066#ifdef CONFIG_VGA_CONSOLE 1093#ifdef CONFIG_VGA_CONSOLE
1067 if (vgacon_text_force()) 1094 if (vgacon_text_force())
@@ -1073,7 +1100,7 @@ nouveau_drm_init(void)
1073 return 0; 1100 return 0;
1074 1101
1075 nouveau_register_dsm_handler(); 1102 nouveau_register_dsm_handler();
1076 return drm_pci_init(&driver, &nouveau_drm_pci_driver); 1103 return drm_pci_init(&driver_pci, &nouveau_drm_pci_driver);
1077} 1104}
1078 1105
1079static void __exit 1106static void __exit
@@ -1082,7 +1109,7 @@ nouveau_drm_exit(void)
1082 if (!nouveau_modeset) 1109 if (!nouveau_modeset)
1083 return; 1110 return;
1084 1111
1085 drm_pci_exit(&driver, &nouveau_drm_pci_driver); 1112 drm_pci_exit(&driver_pci, &nouveau_drm_pci_driver);
1086 nouveau_unregister_dsm_handler(); 1113 nouveau_unregister_dsm_handler();
1087} 1114}
1088 1115
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index b02b02452c85..8ae36f265fb8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -10,7 +10,7 @@
10 10
11#define DRIVER_MAJOR 1 11#define DRIVER_MAJOR 1
12#define DRIVER_MINOR 2 12#define DRIVER_MINOR 2
13#define DRIVER_PATCHLEVEL 0 13#define DRIVER_PATCHLEVEL 1
14 14
15/* 15/*
16 * 1.1.1: 16 * 1.1.1:
@@ -26,6 +26,8 @@
26 * 1.2.0: 26 * 1.2.0:
27 * - object api exposed to userspace 27 * - object api exposed to userspace
28 * - fermi,kepler,maxwell zbc 28 * - fermi,kepler,maxwell zbc
29 * 1.2.1:
30 * - allow concurrent access to bo's mapped read/write.
29 */ 31 */
30 32
31#include <nvif/client.h> 33#include <nvif/client.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 49fe6075cc7c..593ef8a2a069 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -52,7 +52,7 @@
52#include "nouveau_crtc.h" 52#include "nouveau_crtc.h"
53 53
54MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); 54MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
55static int nouveau_nofbaccel = 0; 55int nouveau_nofbaccel = 0;
56module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); 56module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
57 57
58static void 58static void
@@ -308,7 +308,8 @@ static int
308nouveau_fbcon_create(struct drm_fb_helper *helper, 308nouveau_fbcon_create(struct drm_fb_helper *helper,
309 struct drm_fb_helper_surface_size *sizes) 309 struct drm_fb_helper_surface_size *sizes)
310{ 310{
311 struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper; 311 struct nouveau_fbdev *fbcon =
312 container_of(helper, struct nouveau_fbdev, helper);
312 struct drm_device *dev = fbcon->dev; 313 struct drm_device *dev = fbcon->dev;
313 struct nouveau_drm *drm = nouveau_drm(dev); 314 struct nouveau_drm *drm = nouveau_drm(dev);
314 struct nvif_device *device = &drm->device; 315 struct nvif_device *device = &drm->device;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 0b465c7d3907..6208e70e4a1c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -73,5 +73,8 @@ void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
73void nouveau_fbcon_accel_restore(struct drm_device *dev); 73void nouveau_fbcon_accel_restore(struct drm_device *dev);
74 74
75void nouveau_fbcon_output_poll_changed(struct drm_device *dev); 75void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
76
77extern int nouveau_nofbaccel;
78
76#endif /* __NV50_FBCON_H__ */ 79#endif /* __NV50_FBCON_H__ */
77 80
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 0a93114158cd..515cd9aebb99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -28,6 +28,7 @@
28 28
29#include <linux/ktime.h> 29#include <linux/ktime.h>
30#include <linux/hrtimer.h> 30#include <linux/hrtimer.h>
31#include <trace/events/fence.h>
31 32
32#include <nvif/notify.h> 33#include <nvif/notify.h>
33#include <nvif/event.h> 34#include <nvif/event.h>
@@ -36,123 +37,234 @@
36#include "nouveau_dma.h" 37#include "nouveau_dma.h"
37#include "nouveau_fence.h" 38#include "nouveau_fence.h"
38 39
39struct fence_work { 40static const struct fence_ops nouveau_fence_ops_uevent;
40 struct work_struct base; 41static const struct fence_ops nouveau_fence_ops_legacy;
41 struct list_head head; 42
42 void (*func)(void *); 43static inline struct nouveau_fence *
43 void *data; 44from_fence(struct fence *fence)
44}; 45{
46 return container_of(fence, struct nouveau_fence, base);
47}
48
49static inline struct nouveau_fence_chan *
50nouveau_fctx(struct nouveau_fence *fence)
51{
52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
53}
45 54
46static void 55static void
47nouveau_fence_signal(struct nouveau_fence *fence) 56nouveau_fence_signal(struct nouveau_fence *fence)
48{ 57{
49 struct fence_work *work, *temp; 58 fence_signal_locked(&fence->base);
59 list_del(&fence->head);
60
61 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
62 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
50 63
51 list_for_each_entry_safe(work, temp, &fence->work, head) { 64 if (!--fctx->notify_ref)
52 schedule_work(&work->base); 65 nvif_notify_put(&fctx->notify);
53 list_del(&work->head);
54 } 66 }
55 67
56 fence->channel = NULL; 68 fence_put(&fence->base);
57 list_del(&fence->head); 69}
70
71static struct nouveau_fence *
72nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
73 struct nouveau_fence_priv *priv = (void*)drm->fence;
74
75 if (fence->ops != &nouveau_fence_ops_legacy &&
76 fence->ops != &nouveau_fence_ops_uevent)
77 return NULL;
78
79 if (fence->context < priv->context_base ||
80 fence->context >= priv->context_base + priv->contexts)
81 return NULL;
82
83 return from_fence(fence);
58} 84}
59 85
60void 86void
61nouveau_fence_context_del(struct nouveau_fence_chan *fctx) 87nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
62{ 88{
63 struct nouveau_fence *fence, *fnext; 89 struct nouveau_fence *fence;
64 spin_lock(&fctx->lock); 90
65 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { 91 nvif_notify_fini(&fctx->notify);
92
93 spin_lock_irq(&fctx->lock);
94 while (!list_empty(&fctx->pending)) {
95 fence = list_entry(fctx->pending.next, typeof(*fence), head);
96
66 nouveau_fence_signal(fence); 97 nouveau_fence_signal(fence);
98 fence->channel = NULL;
67 } 99 }
68 spin_unlock(&fctx->lock); 100 spin_unlock_irq(&fctx->lock);
101}
102
103static void
104nouveau_fence_context_put(struct kref *fence_ref)
105{
106 kfree(container_of(fence_ref, struct nouveau_fence_chan, fence_ref));
69} 107}
70 108
71void 109void
72nouveau_fence_context_new(struct nouveau_fence_chan *fctx) 110nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
111{
112 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
113}
114
115static void
116nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
117{
118 struct nouveau_fence *fence;
119
120 u32 seq = fctx->read(chan);
121
122 while (!list_empty(&fctx->pending)) {
123 fence = list_entry(fctx->pending.next, typeof(*fence), head);
124
125 if ((int)(seq - fence->base.seqno) < 0)
126 return;
127
128 nouveau_fence_signal(fence);
129 }
130}
131
132static int
133nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
73{ 134{
135 struct nouveau_fence_chan *fctx =
136 container_of(notify, typeof(*fctx), notify);
137 unsigned long flags;
138
139 spin_lock_irqsave(&fctx->lock, flags);
140 if (!list_empty(&fctx->pending)) {
141 struct nouveau_fence *fence;
142
143 fence = list_entry(fctx->pending.next, typeof(*fence), head);
144 nouveau_fence_update(fence->channel, fctx);
145 }
146 spin_unlock_irqrestore(&fctx->lock, flags);
147
148 /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */
149 return NVIF_NOTIFY_KEEP;
150}
151
152void
153nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
154{
155 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
156 struct nouveau_cli *cli = (void *)nvif_client(chan->object);
157 int ret;
158
74 INIT_LIST_HEAD(&fctx->flip); 159 INIT_LIST_HEAD(&fctx->flip);
75 INIT_LIST_HEAD(&fctx->pending); 160 INIT_LIST_HEAD(&fctx->pending);
76 spin_lock_init(&fctx->lock); 161 spin_lock_init(&fctx->lock);
162 fctx->context = priv->context_base + chan->chid;
163
164 if (chan == chan->drm->cechan)
165 strcpy(fctx->name, "copy engine channel");
166 else if (chan == chan->drm->channel)
167 strcpy(fctx->name, "generic kernel channel");
168 else
169 strcpy(fctx->name, nvkm_client(&cli->base)->name);
170
171 kref_init(&fctx->fence_ref);
172 if (!priv->uevent)
173 return;
174
175 ret = nvif_notify_init(chan->object, NULL,
176 nouveau_fence_wait_uevent_handler, false,
177 G82_CHANNEL_DMA_V0_NTFY_UEVENT,
178 &(struct nvif_notify_uevent_req) { },
179 sizeof(struct nvif_notify_uevent_req),
180 sizeof(struct nvif_notify_uevent_rep),
181 &fctx->notify);
182
183 WARN_ON(ret);
77} 184}
78 185
186struct nouveau_fence_work {
187 struct work_struct work;
188 struct fence_cb cb;
189 void (*func)(void *);
190 void *data;
191};
192
79static void 193static void
80nouveau_fence_work_handler(struct work_struct *kwork) 194nouveau_fence_work_handler(struct work_struct *kwork)
81{ 195{
82 struct fence_work *work = container_of(kwork, typeof(*work), base); 196 struct nouveau_fence_work *work = container_of(kwork, typeof(*work), work);
83 work->func(work->data); 197 work->func(work->data);
84 kfree(work); 198 kfree(work);
85} 199}
86 200
201static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
202{
203 struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
204
205 schedule_work(&work->work);
206}
207
87void 208void
88nouveau_fence_work(struct nouveau_fence *fence, 209nouveau_fence_work(struct fence *fence,
89 void (*func)(void *), void *data) 210 void (*func)(void *), void *data)
90{ 211{
91 struct nouveau_channel *chan = fence->channel; 212 struct nouveau_fence_work *work;
92 struct nouveau_fence_chan *fctx;
93 struct fence_work *work = NULL;
94 213
95 if (nouveau_fence_done(fence)) { 214 if (fence_is_signaled(fence))
96 func(data); 215 goto err;
97 return;
98 }
99 216
100 fctx = chan->fence;
101 work = kmalloc(sizeof(*work), GFP_KERNEL); 217 work = kmalloc(sizeof(*work), GFP_KERNEL);
102 if (!work) { 218 if (!work) {
103 WARN_ON(nouveau_fence_wait(fence, false, false)); 219 /*
104 func(data); 220 * this might not be a nouveau fence any more,
105 return; 221 * so force a lazy wait here
106 } 222 */
107 223 WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence,
108 spin_lock(&fctx->lock); 224 true, false));
109 if (!fence->channel) { 225 goto err;
110 spin_unlock(&fctx->lock);
111 kfree(work);
112 func(data);
113 return;
114 } 226 }
115 227
116 INIT_WORK(&work->base, nouveau_fence_work_handler); 228 INIT_WORK(&work->work, nouveau_fence_work_handler);
117 work->func = func; 229 work->func = func;
118 work->data = data; 230 work->data = data;
119 list_add(&work->head, &fence->work);
120 spin_unlock(&fctx->lock);
121}
122
123static void
124nouveau_fence_update(struct nouveau_channel *chan)
125{
126 struct nouveau_fence_chan *fctx = chan->fence;
127 struct nouveau_fence *fence, *fnext;
128 231
129 spin_lock(&fctx->lock); 232 if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
130 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { 233 goto err_free;
131 if (fctx->read(chan) < fence->sequence) 234 return;
132 break;
133 235
134 nouveau_fence_signal(fence); 236err_free:
135 nouveau_fence_unref(&fence); 237 kfree(work);
136 } 238err:
137 spin_unlock(&fctx->lock); 239 func(data);
138} 240}
139 241
140int 242int
141nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) 243nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
142{ 244{
143 struct nouveau_fence_chan *fctx = chan->fence; 245 struct nouveau_fence_chan *fctx = chan->fence;
246 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
144 int ret; 247 int ret;
145 248
146 fence->channel = chan; 249 fence->channel = chan;
147 fence->timeout = jiffies + (15 * HZ); 250 fence->timeout = jiffies + (15 * HZ);
148 fence->sequence = ++fctx->sequence;
149 251
252 if (priv->uevent)
253 fence_init(&fence->base, &nouveau_fence_ops_uevent,
254 &fctx->lock, fctx->context, ++fctx->sequence);
255 else
256 fence_init(&fence->base, &nouveau_fence_ops_legacy,
257 &fctx->lock, fctx->context, ++fctx->sequence);
258 kref_get(&fctx->fence_ref);
259
260 trace_fence_emit(&fence->base);
150 ret = fctx->emit(fence); 261 ret = fctx->emit(fence);
151 if (!ret) { 262 if (!ret) {
152 kref_get(&fence->kref); 263 fence_get(&fence->base);
153 spin_lock(&fctx->lock); 264 spin_lock_irq(&fctx->lock);
265 nouveau_fence_update(chan, fctx);
154 list_add_tail(&fence->head, &fctx->pending); 266 list_add_tail(&fence->head, &fctx->pending);
155 spin_unlock(&fctx->lock); 267 spin_unlock_irq(&fctx->lock);
156 } 268 }
157 269
158 return ret; 270 return ret;
@@ -161,114 +273,70 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
161bool 273bool
162nouveau_fence_done(struct nouveau_fence *fence) 274nouveau_fence_done(struct nouveau_fence *fence)
163{ 275{
164 if (fence->channel) 276 if (fence->base.ops == &nouveau_fence_ops_legacy ||
165 nouveau_fence_update(fence->channel); 277 fence->base.ops == &nouveau_fence_ops_uevent) {
166 return !fence->channel; 278 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
167} 279 unsigned long flags;
168 280
169struct nouveau_fence_wait { 281 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
170 struct nouveau_fence_priv *priv; 282 return true;
171 struct nvif_notify notify;
172};
173 283
174static int 284 spin_lock_irqsave(&fctx->lock, flags);
175nouveau_fence_wait_uevent_handler(struct nvif_notify *notify) 285 nouveau_fence_update(fence->channel, fctx);
176{ 286 spin_unlock_irqrestore(&fctx->lock, flags);
177 struct nouveau_fence_wait *wait = 287 }
178 container_of(notify, typeof(*wait), notify); 288 return fence_is_signaled(&fence->base);
179 wake_up_all(&wait->priv->waiting);
180 return NVIF_NOTIFY_KEEP;
181} 289}
182 290
183static int 291static long
184nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr) 292nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
185
186{ 293{
187 struct nouveau_channel *chan = fence->channel; 294 struct nouveau_fence *fence = from_fence(f);
188 struct nouveau_fence_priv *priv = chan->drm->fence; 295 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
189 struct nouveau_fence_wait wait = { .priv = priv }; 296 unsigned long t = jiffies, timeout = t + wait;
190 int ret = 0;
191 297
192 ret = nvif_notify_init(chan->object, NULL, 298 while (!nouveau_fence_done(fence)) {
193 nouveau_fence_wait_uevent_handler, false, 299 ktime_t kt;
194 G82_CHANNEL_DMA_V0_NTFY_UEVENT,
195 &(struct nvif_notify_uevent_req) {
196 },
197 sizeof(struct nvif_notify_uevent_req),
198 sizeof(struct nvif_notify_uevent_rep),
199 &wait.notify);
200 if (ret)
201 return ret;
202 300
203 nvif_notify_get(&wait.notify); 301 t = jiffies;
204
205 if (fence->timeout) {
206 unsigned long timeout = fence->timeout - jiffies;
207
208 if (time_before(jiffies, fence->timeout)) {
209 if (intr) {
210 ret = wait_event_interruptible_timeout(
211 priv->waiting,
212 nouveau_fence_done(fence),
213 timeout);
214 } else {
215 ret = wait_event_timeout(priv->waiting,
216 nouveau_fence_done(fence),
217 timeout);
218 }
219 }
220 302
221 if (ret >= 0) { 303 if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
222 fence->timeout = jiffies + ret; 304 __set_current_state(TASK_RUNNING);
223 if (time_after_eq(jiffies, fence->timeout)) 305 return 0;
224 ret = -EBUSY;
225 }
226 } else {
227 if (intr) {
228 ret = wait_event_interruptible(priv->waiting,
229 nouveau_fence_done(fence));
230 } else {
231 wait_event(priv->waiting, nouveau_fence_done(fence));
232 } 306 }
307
308 __set_current_state(intr ? TASK_INTERRUPTIBLE :
309 TASK_UNINTERRUPTIBLE);
310
311 kt = ktime_set(0, sleep_time);
312 schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
313 sleep_time *= 2;
314 if (sleep_time > NSEC_PER_MSEC)
315 sleep_time = NSEC_PER_MSEC;
316
317 if (intr && signal_pending(current))
318 return -ERESTARTSYS;
233 } 319 }
234 320
235 nvif_notify_fini(&wait.notify); 321 __set_current_state(TASK_RUNNING);
236 if (unlikely(ret < 0))
237 return ret;
238 322
239 return 0; 323 return timeout - t;
240} 324}
241 325
242int 326static int
243nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) 327nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
244{ 328{
245 struct nouveau_channel *chan = fence->channel;
246 struct nouveau_fence_priv *priv = chan ? chan->drm->fence : NULL;
247 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
248 ktime_t t;
249 int ret = 0; 329 int ret = 0;
250 330
251 while (priv && priv->uevent && lazy && !nouveau_fence_done(fence)) {
252 ret = nouveau_fence_wait_uevent(fence, intr);
253 if (ret < 0)
254 return ret;
255 }
256
257 while (!nouveau_fence_done(fence)) { 331 while (!nouveau_fence_done(fence)) {
258 if (fence->timeout && time_after_eq(jiffies, fence->timeout)) { 332 if (time_after_eq(jiffies, fence->timeout)) {
259 ret = -EBUSY; 333 ret = -EBUSY;
260 break; 334 break;
261 } 335 }
262 336
263 __set_current_state(intr ? TASK_INTERRUPTIBLE : 337 __set_current_state(intr ?
264 TASK_UNINTERRUPTIBLE); 338 TASK_INTERRUPTIBLE :
265 if (lazy) { 339 TASK_UNINTERRUPTIBLE);
266 t = ktime_set(0, sleep_time);
267 schedule_hrtimeout(&t, HRTIMER_MODE_REL);
268 sleep_time *= 2;
269 if (sleep_time > NSEC_PER_MSEC)
270 sleep_time = NSEC_PER_MSEC;
271 }
272 340
273 if (intr && signal_pending(current)) { 341 if (intr && signal_pending(current)) {
274 ret = -ERESTARTSYS; 342 ret = -ERESTARTSYS;
@@ -281,47 +349,86 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
281} 349}
282 350
283int 351int
284nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan) 352nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
285{ 353{
286 struct nouveau_fence_chan *fctx = chan->fence; 354 long ret;
287 struct nouveau_channel *prev;
288 int ret = 0;
289 355
290 prev = fence ? fence->channel : NULL; 356 if (!lazy)
291 if (prev) { 357 return nouveau_fence_wait_busy(fence, intr);
292 if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
293 ret = fctx->sync(fence, prev, chan);
294 if (unlikely(ret))
295 ret = nouveau_fence_wait(fence, true, false);
296 }
297 }
298 358
299 return ret; 359 ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
360 if (ret < 0)
361 return ret;
362 else if (!ret)
363 return -EBUSY;
364 else
365 return 0;
300} 366}
301 367
302static void 368int
303nouveau_fence_del(struct kref *kref) 369nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
304{ 370{
305 struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref); 371 struct nouveau_fence_chan *fctx = chan->fence;
306 kfree(fence); 372 struct fence *fence;
373 struct reservation_object *resv = nvbo->bo.resv;
374 struct reservation_object_list *fobj;
375 struct nouveau_fence *f;
376 int ret = 0, i;
377
378 if (!exclusive) {
379 ret = reservation_object_reserve_shared(resv);
380
381 if (ret)
382 return ret;
383 }
384
385 fobj = reservation_object_get_list(resv);
386 fence = reservation_object_get_excl(resv);
387
388 if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
389 struct nouveau_channel *prev = NULL;
390
391 f = nouveau_local_fence(fence, chan->drm);
392 if (f)
393 prev = f->channel;
394
395 if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
396 ret = fence_wait(fence, intr);
397
398 return ret;
399 }
400
401 if (!exclusive || !fobj)
402 return ret;
403
404 for (i = 0; i < fobj->shared_count && !ret; ++i) {
405 struct nouveau_channel *prev = NULL;
406
407 fence = rcu_dereference_protected(fobj->shared[i],
408 reservation_object_held(resv));
409
410 f = nouveau_local_fence(fence, chan->drm);
411 if (f)
412 prev = f->channel;
413
414 if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
415 ret = fence_wait(fence, intr);
416
417 if (ret)
418 break;
419 }
420
421 return ret;
307} 422}
308 423
309void 424void
310nouveau_fence_unref(struct nouveau_fence **pfence) 425nouveau_fence_unref(struct nouveau_fence **pfence)
311{ 426{
312 if (*pfence) 427 if (*pfence)
313 kref_put(&(*pfence)->kref, nouveau_fence_del); 428 fence_put(&(*pfence)->base);
314 *pfence = NULL; 429 *pfence = NULL;
315} 430}
316 431
317struct nouveau_fence *
318nouveau_fence_ref(struct nouveau_fence *fence)
319{
320 if (fence)
321 kref_get(&fence->kref);
322 return fence;
323}
324
325int 432int
326nouveau_fence_new(struct nouveau_channel *chan, bool sysmem, 433nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
327 struct nouveau_fence **pfence) 434 struct nouveau_fence **pfence)
@@ -336,9 +443,7 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
336 if (!fence) 443 if (!fence)
337 return -ENOMEM; 444 return -ENOMEM;
338 445
339 INIT_LIST_HEAD(&fence->work);
340 fence->sysmem = sysmem; 446 fence->sysmem = sysmem;
341 kref_init(&fence->kref);
342 447
343 ret = nouveau_fence_emit(fence, chan); 448 ret = nouveau_fence_emit(fence, chan);
344 if (ret) 449 if (ret)
@@ -347,3 +452,101 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
347 *pfence = fence; 452 *pfence = fence;
348 return ret; 453 return ret;
349} 454}
455
456static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
457{
458 return "nouveau";
459}
460
461static const char *nouveau_fence_get_timeline_name(struct fence *f)
462{
463 struct nouveau_fence *fence = from_fence(f);
464 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
465
466 return fence->channel ? fctx->name : "dead channel";
467}
468
469/*
470 * In an ideal world, read would not assume the channel context is still alive.
471 * This function may be called from another device, running into free memory as a
472 * result. The drm node should still be there, so we can derive the index from
473 * the fence context.
474 */
475static bool nouveau_fence_is_signaled(struct fence *f)
476{
477 struct nouveau_fence *fence = from_fence(f);
478 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
479 struct nouveau_channel *chan = fence->channel;
480
481 return (int)(fctx->read(chan) - fence->base.seqno) >= 0;
482}
483
484static bool nouveau_fence_no_signaling(struct fence *f)
485{
486 struct nouveau_fence *fence = from_fence(f);
487
488 /*
489 * caller should have a reference on the fence,
490 * else fence could get freed here
491 */
492 WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
493
494 /*
495 * This needs uevents to work correctly, but fence_add_callback relies on
496 * being able to enable signaling. It will still get signaled eventually,
497 * just not right away.
498 */
499 if (nouveau_fence_is_signaled(f)) {
500 list_del(&fence->head);
501
502 fence_put(&fence->base);
503 return false;
504 }
505
506 return true;
507}
508
509static void nouveau_fence_release(struct fence *f)
510{
511 struct nouveau_fence *fence = from_fence(f);
512 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
513
514 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
515 fence_free(&fence->base);
516}
517
518static const struct fence_ops nouveau_fence_ops_legacy = {
519 .get_driver_name = nouveau_fence_get_get_driver_name,
520 .get_timeline_name = nouveau_fence_get_timeline_name,
521 .enable_signaling = nouveau_fence_no_signaling,
522 .signaled = nouveau_fence_is_signaled,
523 .wait = nouveau_fence_wait_legacy,
524 .release = nouveau_fence_release
525};
526
527static bool nouveau_fence_enable_signaling(struct fence *f)
528{
529 struct nouveau_fence *fence = from_fence(f);
530 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
531 bool ret;
532
533 if (!fctx->notify_ref++)
534 nvif_notify_get(&fctx->notify);
535
536 ret = nouveau_fence_no_signaling(f);
537 if (ret)
538 set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
539 else if (!--fctx->notify_ref)
540 nvif_notify_put(&fctx->notify);
541
542 return ret;
543}
544
545static const struct fence_ops nouveau_fence_ops_uevent = {
546 .get_driver_name = nouveau_fence_get_get_driver_name,
547 .get_timeline_name = nouveau_fence_get_timeline_name,
548 .enable_signaling = nouveau_fence_enable_signaling,
549 .signaled = nouveau_fence_is_signaled,
550 .wait = fence_default_wait,
551 .release = NULL
552};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index c57bb61da58c..943b0b17b1fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,33 +1,37 @@
1#ifndef __NOUVEAU_FENCE_H__ 1#ifndef __NOUVEAU_FENCE_H__
2#define __NOUVEAU_FENCE_H__ 2#define __NOUVEAU_FENCE_H__
3 3
4#include <linux/fence.h>
5#include <nvif/notify.h>
6
4struct nouveau_drm; 7struct nouveau_drm;
8struct nouveau_bo;
5 9
6struct nouveau_fence { 10struct nouveau_fence {
11 struct fence base;
12
7 struct list_head head; 13 struct list_head head;
8 struct list_head work;
9 struct kref kref;
10 14
11 bool sysmem; 15 bool sysmem;
12 16
13 struct nouveau_channel *channel; 17 struct nouveau_channel *channel;
14 unsigned long timeout; 18 unsigned long timeout;
15 u32 sequence;
16}; 19};
17 20
18int nouveau_fence_new(struct nouveau_channel *, bool sysmem, 21int nouveau_fence_new(struct nouveau_channel *, bool sysmem,
19 struct nouveau_fence **); 22 struct nouveau_fence **);
20struct nouveau_fence *
21nouveau_fence_ref(struct nouveau_fence *);
22void nouveau_fence_unref(struct nouveau_fence **); 23void nouveau_fence_unref(struct nouveau_fence **);
23 24
24int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); 25int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
25bool nouveau_fence_done(struct nouveau_fence *); 26bool nouveau_fence_done(struct nouveau_fence *);
26void nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *); 27void nouveau_fence_work(struct fence *, void (*)(void *), void *);
27int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); 28int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
28int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); 29int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
29 30
30struct nouveau_fence_chan { 31struct nouveau_fence_chan {
32 spinlock_t lock;
33 struct kref fence_ref;
34
31 struct list_head pending; 35 struct list_head pending;
32 struct list_head flip; 36 struct list_head flip;
33 37
@@ -38,8 +42,12 @@ struct nouveau_fence_chan {
38 int (*emit32)(struct nouveau_channel *, u64, u32); 42 int (*emit32)(struct nouveau_channel *, u64, u32);
39 int (*sync32)(struct nouveau_channel *, u64, u32); 43 int (*sync32)(struct nouveau_channel *, u64, u32);
40 44
41 spinlock_t lock;
42 u32 sequence; 45 u32 sequence;
46 u32 context;
47 char name[32];
48
49 struct nvif_notify notify;
50 int notify_ref;
43}; 51};
44 52
45struct nouveau_fence_priv { 53struct nouveau_fence_priv {
@@ -49,14 +57,15 @@ struct nouveau_fence_priv {
49 int (*context_new)(struct nouveau_channel *); 57 int (*context_new)(struct nouveau_channel *);
50 void (*context_del)(struct nouveau_channel *); 58 void (*context_del)(struct nouveau_channel *);
51 59
52 wait_queue_head_t waiting; 60 u32 contexts, context_base;
53 bool uevent; 61 bool uevent;
54}; 62};
55 63
56#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence) 64#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
57 65
58void nouveau_fence_context_new(struct nouveau_fence_chan *); 66void nouveau_fence_context_new(struct nouveau_channel *, struct nouveau_fence_chan *);
59void nouveau_fence_context_del(struct nouveau_fence_chan *); 67void nouveau_fence_context_del(struct nouveau_fence_chan *);
68void nouveau_fence_context_free(struct nouveau_fence_chan *);
60 69
61int nv04_fence_create(struct nouveau_drm *); 70int nv04_fence_create(struct nouveau_drm *);
62int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32); 71int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 292a677bfed4..36951ee4b157 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -98,17 +98,23 @@ static void
98nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) 98nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
99{ 99{
100 const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; 100 const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
101 struct nouveau_fence *fence = NULL; 101 struct reservation_object *resv = nvbo->bo.resv;
102 struct reservation_object_list *fobj;
103 struct fence *fence = NULL;
104
105 fobj = reservation_object_get_list(resv);
102 106
103 list_del(&vma->head); 107 list_del(&vma->head);
104 108
105 if (mapped) { 109 if (fobj && fobj->shared_count > 1)
106 spin_lock(&nvbo->bo.bdev->fence_lock); 110 ttm_bo_wait(&nvbo->bo, true, false, false);
107 fence = nouveau_fence_ref(nvbo->bo.sync_obj); 111 else if (fobj && fobj->shared_count == 1)
108 spin_unlock(&nvbo->bo.bdev->fence_lock); 112 fence = rcu_dereference_protected(fobj->shared[0],
109 } 113 reservation_object_held(resv));
114 else
115 fence = reservation_object_get_excl(nvbo->bo.resv);
110 116
111 if (fence) { 117 if (fence && mapped) {
112 nouveau_fence_work(fence, nouveau_gem_object_delete, vma); 118 nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
113 } else { 119 } else {
114 if (mapped) 120 if (mapped)
@@ -116,7 +122,6 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
116 nouveau_vm_put(vma); 122 nouveau_vm_put(vma);
117 kfree(vma); 123 kfree(vma);
118 } 124 }
119 nouveau_fence_unref(&fence);
120} 125}
121 126
122void 127void
@@ -160,7 +165,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
160 flags |= TTM_PL_FLAG_SYSTEM; 165 flags |= TTM_PL_FLAG_SYSTEM;
161 166
162 ret = nouveau_bo_new(dev, size, align, flags, tile_mode, 167 ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
163 tile_flags, NULL, pnvbo); 168 tile_flags, NULL, NULL, pnvbo);
164 if (ret) 169 if (ret)
165 return ret; 170 return ret;
166 nvbo = *pnvbo; 171 nvbo = *pnvbo;
@@ -288,24 +293,23 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
288} 293}
289 294
290struct validate_op { 295struct validate_op {
291 struct list_head vram_list; 296 struct list_head list;
292 struct list_head gart_list;
293 struct list_head both_list;
294 struct ww_acquire_ctx ticket; 297 struct ww_acquire_ctx ticket;
295}; 298};
296 299
297static void 300static void
298validate_fini_list(struct list_head *list, struct nouveau_fence *fence, 301validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
299 struct ww_acquire_ctx *ticket) 302 struct drm_nouveau_gem_pushbuf_bo *pbbo)
300{ 303{
301 struct list_head *entry, *tmp;
302 struct nouveau_bo *nvbo; 304 struct nouveau_bo *nvbo;
305 struct drm_nouveau_gem_pushbuf_bo *b;
303 306
304 list_for_each_safe(entry, tmp, list) { 307 while (!list_empty(&op->list)) {
305 nvbo = list_entry(entry, struct nouveau_bo, entry); 308 nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
309 b = &pbbo[nvbo->pbbo_index];
306 310
307 if (likely(fence)) 311 if (likely(fence))
308 nouveau_bo_fence(nvbo, fence); 312 nouveau_bo_fence(nvbo, fence, !!b->write_domains);
309 313
310 if (unlikely(nvbo->validate_mapped)) { 314 if (unlikely(nvbo->validate_mapped)) {
311 ttm_bo_kunmap(&nvbo->kmap); 315 ttm_bo_kunmap(&nvbo->kmap);
@@ -314,23 +318,16 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
314 318
315 list_del(&nvbo->entry); 319 list_del(&nvbo->entry);
316 nvbo->reserved_by = NULL; 320 nvbo->reserved_by = NULL;
317 ttm_bo_unreserve_ticket(&nvbo->bo, ticket); 321 ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket);
318 drm_gem_object_unreference_unlocked(&nvbo->gem); 322 drm_gem_object_unreference_unlocked(&nvbo->gem);
319 } 323 }
320} 324}
321 325
322static void 326static void
323validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence) 327validate_fini(struct validate_op *op, struct nouveau_fence *fence,
328 struct drm_nouveau_gem_pushbuf_bo *pbbo)
324{ 329{
325 validate_fini_list(&op->vram_list, fence, &op->ticket); 330 validate_fini_no_ticket(op, fence, pbbo);
326 validate_fini_list(&op->gart_list, fence, &op->ticket);
327 validate_fini_list(&op->both_list, fence, &op->ticket);
328}
329
330static void
331validate_fini(struct validate_op *op, struct nouveau_fence *fence)
332{
333 validate_fini_no_ticket(op, fence);
334 ww_acquire_fini(&op->ticket); 331 ww_acquire_fini(&op->ticket);
335} 332}
336 333
@@ -344,6 +341,9 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
344 int trycnt = 0; 341 int trycnt = 0;
345 int ret, i; 342 int ret, i;
346 struct nouveau_bo *res_bo = NULL; 343 struct nouveau_bo *res_bo = NULL;
344 LIST_HEAD(gart_list);
345 LIST_HEAD(vram_list);
346 LIST_HEAD(both_list);
347 347
348 ww_acquire_init(&op->ticket, &reservation_ww_class); 348 ww_acquire_init(&op->ticket, &reservation_ww_class);
349retry: 349retry:
@@ -360,9 +360,8 @@ retry:
360 gem = drm_gem_object_lookup(dev, file_priv, b->handle); 360 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
361 if (!gem) { 361 if (!gem) {
362 NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle); 362 NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle);
363 ww_acquire_done(&op->ticket); 363 ret = -ENOENT;
364 validate_fini(op, NULL); 364 break;
365 return -ENOENT;
366 } 365 }
367 nvbo = nouveau_gem_object(gem); 366 nvbo = nouveau_gem_object(gem);
368 if (nvbo == res_bo) { 367 if (nvbo == res_bo) {
@@ -375,14 +374,16 @@ retry:
375 NV_PRINTK(error, cli, "multiple instances of buffer %d on " 374 NV_PRINTK(error, cli, "multiple instances of buffer %d on "
376 "validation list\n", b->handle); 375 "validation list\n", b->handle);
377 drm_gem_object_unreference_unlocked(gem); 376 drm_gem_object_unreference_unlocked(gem);
378 ww_acquire_done(&op->ticket); 377 ret = -EINVAL;
379 validate_fini(op, NULL); 378 break;
380 return -EINVAL;
381 } 379 }
382 380
383 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket); 381 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
384 if (ret) { 382 if (ret) {
385 validate_fini_no_ticket(op, NULL); 383 list_splice_tail_init(&vram_list, &op->list);
384 list_splice_tail_init(&gart_list, &op->list);
385 list_splice_tail_init(&both_list, &op->list);
386 validate_fini_no_ticket(op, NULL, NULL);
386 if (unlikely(ret == -EDEADLK)) { 387 if (unlikely(ret == -EDEADLK)) {
387 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, 388 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
388 &op->ticket); 389 &op->ticket);
@@ -390,12 +391,9 @@ retry:
390 res_bo = nvbo; 391 res_bo = nvbo;
391 } 392 }
392 if (unlikely(ret)) { 393 if (unlikely(ret)) {
393 ww_acquire_done(&op->ticket);
394 ww_acquire_fini(&op->ticket);
395 drm_gem_object_unreference_unlocked(gem);
396 if (ret != -ERESTARTSYS) 394 if (ret != -ERESTARTSYS)
397 NV_PRINTK(error, cli, "fail reserve\n"); 395 NV_PRINTK(error, cli, "fail reserve\n");
398 return ret; 396 break;
399 } 397 }
400 } 398 }
401 399
@@ -404,45 +402,32 @@ retry:
404 nvbo->pbbo_index = i; 402 nvbo->pbbo_index = i;
405 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 403 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
406 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) 404 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
407 list_add_tail(&nvbo->entry, &op->both_list); 405 list_add_tail(&nvbo->entry, &both_list);
408 else 406 else
409 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) 407 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
410 list_add_tail(&nvbo->entry, &op->vram_list); 408 list_add_tail(&nvbo->entry, &vram_list);
411 else 409 else
412 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) 410 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
413 list_add_tail(&nvbo->entry, &op->gart_list); 411 list_add_tail(&nvbo->entry, &gart_list);
414 else { 412 else {
415 NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n", 413 NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n",
416 b->valid_domains); 414 b->valid_domains);
417 list_add_tail(&nvbo->entry, &op->both_list); 415 list_add_tail(&nvbo->entry, &both_list);
418 ww_acquire_done(&op->ticket); 416 ret = -EINVAL;
419 validate_fini(op, NULL); 417 break;
420 return -EINVAL;
421 } 418 }
422 if (nvbo == res_bo) 419 if (nvbo == res_bo)
423 goto retry; 420 goto retry;
424 } 421 }
425 422
426 ww_acquire_done(&op->ticket); 423 ww_acquire_done(&op->ticket);
427 return 0; 424 list_splice_tail(&vram_list, &op->list);
428} 425 list_splice_tail(&gart_list, &op->list);
429 426 list_splice_tail(&both_list, &op->list);
430static int 427 if (ret)
431validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) 428 validate_fini(op, NULL, NULL);
432{
433 struct nouveau_fence *fence = NULL;
434 int ret = 0;
435
436 spin_lock(&nvbo->bo.bdev->fence_lock);
437 fence = nouveau_fence_ref(nvbo->bo.sync_obj);
438 spin_unlock(&nvbo->bo.bdev->fence_lock);
439
440 if (fence) {
441 ret = nouveau_fence_sync(fence, chan);
442 nouveau_fence_unref(&fence);
443 }
444
445 return ret; 429 return ret;
430
446} 431}
447 432
448static int 433static int
@@ -474,9 +459,10 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
474 return ret; 459 return ret;
475 } 460 }
476 461
477 ret = validate_sync(chan, nvbo); 462 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
478 if (unlikely(ret)) { 463 if (unlikely(ret)) {
479 NV_PRINTK(error, cli, "fail post-validate sync\n"); 464 if (ret != -ERESTARTSYS)
465 NV_PRINTK(error, cli, "fail post-validate sync\n");
480 return ret; 466 return ret;
481 } 467 }
482 468
@@ -513,11 +499,9 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
513 struct validate_op *op, int *apply_relocs) 499 struct validate_op *op, int *apply_relocs)
514{ 500{
515 struct nouveau_cli *cli = nouveau_cli(file_priv); 501 struct nouveau_cli *cli = nouveau_cli(file_priv);
516 int ret, relocs = 0; 502 int ret;
517 503
518 INIT_LIST_HEAD(&op->vram_list); 504 INIT_LIST_HEAD(&op->list);
519 INIT_LIST_HEAD(&op->gart_list);
520 INIT_LIST_HEAD(&op->both_list);
521 505
522 if (nr_buffers == 0) 506 if (nr_buffers == 0)
523 return 0; 507 return 0;
@@ -529,34 +513,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
529 return ret; 513 return ret;
530 } 514 }
531 515
532 ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers); 516 ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
533 if (unlikely(ret < 0)) {
534 if (ret != -ERESTARTSYS)
535 NV_PRINTK(error, cli, "validate vram_list\n");
536 validate_fini(op, NULL);
537 return ret;
538 }
539 relocs += ret;
540
541 ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
542 if (unlikely(ret < 0)) {
543 if (ret != -ERESTARTSYS)
544 NV_PRINTK(error, cli, "validate gart_list\n");
545 validate_fini(op, NULL);
546 return ret;
547 }
548 relocs += ret;
549
550 ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
551 if (unlikely(ret < 0)) { 517 if (unlikely(ret < 0)) {
552 if (ret != -ERESTARTSYS) 518 if (ret != -ERESTARTSYS)
553 NV_PRINTK(error, cli, "validate both_list\n"); 519 NV_PRINTK(error, cli, "validating bo list\n");
554 validate_fini(op, NULL); 520 validate_fini(op, NULL, NULL);
555 return ret; 521 return ret;
556 } 522 }
557 relocs += ret; 523 *apply_relocs = ret;
558
559 *apply_relocs = relocs;
560 return 0; 524 return 0;
561} 525}
562 526
@@ -659,9 +623,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
659 data |= r->vor; 623 data |= r->vor;
660 } 624 }
661 625
662 spin_lock(&nvbo->bo.bdev->fence_lock); 626 ret = ttm_bo_wait(&nvbo->bo, true, false, false);
663 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
664 spin_unlock(&nvbo->bo.bdev->fence_lock);
665 if (ret) { 627 if (ret) {
666 NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret); 628 NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret);
667 break; 629 break;
@@ -839,7 +801,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
839 } 801 }
840 802
841out: 803out:
842 validate_fini(&op, fence); 804 validate_fini(&op, fence, bo);
843 nouveau_fence_unref(&fence); 805 nouveau_fence_unref(&fence);
844 806
845out_prevalid: 807out_prevalid:
@@ -884,17 +846,29 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
884 struct drm_gem_object *gem; 846 struct drm_gem_object *gem;
885 struct nouveau_bo *nvbo; 847 struct nouveau_bo *nvbo;
886 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); 848 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
887 int ret = -EINVAL; 849 bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
850 int ret;
888 851
889 gem = drm_gem_object_lookup(dev, file_priv, req->handle); 852 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
890 if (!gem) 853 if (!gem)
891 return -ENOENT; 854 return -ENOENT;
892 nvbo = nouveau_gem_object(gem); 855 nvbo = nouveau_gem_object(gem);
893 856
894 spin_lock(&nvbo->bo.bdev->fence_lock); 857 if (no_wait)
895 ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait); 858 ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
896 spin_unlock(&nvbo->bo.bdev->fence_lock); 859 else {
860 long lret;
861
862 lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
863 if (!lret)
864 ret = -EBUSY;
865 else if (lret > 0)
866 ret = 0;
867 else
868 ret = lret;
869 }
897 drm_gem_object_unreference_unlocked(gem); 870 drm_gem_object_unreference_unlocked(gem);
871
898 return ret; 872 return ret;
899} 873}
900 874
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index ddab762d81fe..e4049faca780 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -39,7 +39,7 @@ struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *);
39extern void nouveau_gem_prime_unpin(struct drm_gem_object *); 39extern void nouveau_gem_prime_unpin(struct drm_gem_object *);
40extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *); 40extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
41extern struct drm_gem_object *nouveau_gem_prime_import_sg_table( 41extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
42 struct drm_device *, size_t size, struct sg_table *); 42 struct drm_device *, struct dma_buf_attachment *, struct sg_table *);
43extern void *nouveau_gem_prime_vmap(struct drm_gem_object *); 43extern void *nouveau_gem_prime_vmap(struct drm_gem_object *);
44extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *); 44extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *);
45 45
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 1f51008e4d26..228226ab27fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include <linux/dma-buf.h>
26 27
27#include "nouveau_drm.h" 28#include "nouveau_drm.h"
28#include "nouveau_gem.h" 29#include "nouveau_gem.h"
@@ -56,17 +57,20 @@ void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
56} 57}
57 58
58struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, 59struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
59 size_t size, 60 struct dma_buf_attachment *attach,
60 struct sg_table *sg) 61 struct sg_table *sg)
61{ 62{
62 struct nouveau_bo *nvbo; 63 struct nouveau_bo *nvbo;
64 struct reservation_object *robj = attach->dmabuf->resv;
63 u32 flags = 0; 65 u32 flags = 0;
64 int ret; 66 int ret;
65 67
66 flags = TTM_PL_FLAG_TT; 68 flags = TTM_PL_FLAG_TT;
67 69
68 ret = nouveau_bo_new(dev, size, 0, flags, 0, 0, 70 ww_mutex_lock(&robj->lock, NULL);
69 sg, &nvbo); 71 ret = nouveau_bo_new(dev, attach->dmabuf->size, 0, flags, 0, 0,
72 sg, robj, &nvbo);
73 ww_mutex_unlock(&robj->lock);
70 if (ret) 74 if (ret)
71 return ERR_PTR(ret); 75 return ERR_PTR(ret);
72 76
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
index 3c6962d15b26..8fbbf3093d86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -29,7 +29,7 @@
29#include "nouveau_sysfs.h" 29#include "nouveau_sysfs.h"
30 30
31MODULE_PARM_DESC(pstate, "enable sysfs pstate file, which will be moved in the future"); 31MODULE_PARM_DESC(pstate, "enable sysfs pstate file, which will be moved in the future");
32static int nouveau_pstate; 32int nouveau_pstate;
33module_param_named(pstate, nouveau_pstate, int, 0400); 33module_param_named(pstate, nouveau_pstate, int, 0400);
34 34
35static inline struct drm_device * 35static inline struct drm_device *
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.h b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
index f973378160f8..4e5ea9241b28 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
@@ -16,4 +16,6 @@ nouveau_sysfs(struct drm_device *dev)
16int nouveau_sysfs_init(struct drm_device *); 16int nouveau_sysfs_init(struct drm_device *);
17void nouveau_sysfs_fini(struct drm_device *); 17void nouveau_sysfs_fini(struct drm_device *);
18 18
19extern int nouveau_pstate;
20
19#endif 21#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 53874b76b031..753a6def61e7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -71,8 +71,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
71static int 71static int
72nouveau_vram_manager_new(struct ttm_mem_type_manager *man, 72nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
73 struct ttm_buffer_object *bo, 73 struct ttm_buffer_object *bo,
74 struct ttm_placement *placement, 74 const struct ttm_place *place,
75 uint32_t flags,
76 struct ttm_mem_reg *mem) 75 struct ttm_mem_reg *mem)
77{ 76{
78 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 77 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
@@ -158,8 +157,7 @@ nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
158static int 157static int
159nouveau_gart_manager_new(struct ttm_mem_type_manager *man, 158nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
160 struct ttm_buffer_object *bo, 159 struct ttm_buffer_object *bo,
161 struct ttm_placement *placement, 160 const struct ttm_place *place,
162 uint32_t flags,
163 struct ttm_mem_reg *mem) 161 struct ttm_mem_reg *mem)
164{ 162{
165 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 163 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -239,8 +237,7 @@ nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
239static int 237static int
240nv04_gart_manager_new(struct ttm_mem_type_manager *man, 238nv04_gart_manager_new(struct ttm_mem_type_manager *man,
241 struct ttm_buffer_object *bo, 239 struct ttm_buffer_object *bo,
242 struct ttm_placement *placement, 240 const struct ttm_place *place,
243 uint32_t flags,
244 struct ttm_mem_reg *mem) 241 struct ttm_mem_reg *mem)
245{ 242{
246 struct nouveau_mem *node; 243 struct nouveau_mem *node;
@@ -284,7 +281,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
284 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); 281 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
285 282
286 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 283 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
287 return drm_mmap(filp, vma); 284 return -EINVAL;
288 285
289 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); 286 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
290} 287}
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 239c2c5a9615..f9859deb108a 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -41,7 +41,7 @@ nv04_fence_emit(struct nouveau_fence *fence)
41 int ret = RING_SPACE(chan, 2); 41 int ret = RING_SPACE(chan, 2);
42 if (ret == 0) { 42 if (ret == 0) {
43 BEGIN_NV04(chan, NvSubSw, 0x0150, 1); 43 BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
44 OUT_RING (chan, fence->sequence); 44 OUT_RING (chan, fence->base.seqno);
45 FIRE_RING (chan); 45 FIRE_RING (chan);
46 } 46 }
47 return ret; 47 return ret;
@@ -67,7 +67,7 @@ nv04_fence_context_del(struct nouveau_channel *chan)
67 struct nv04_fence_chan *fctx = chan->fence; 67 struct nv04_fence_chan *fctx = chan->fence;
68 nouveau_fence_context_del(&fctx->base); 68 nouveau_fence_context_del(&fctx->base);
69 chan->fence = NULL; 69 chan->fence = NULL;
70 kfree(fctx); 70 nouveau_fence_context_free(&fctx->base);
71} 71}
72 72
73static int 73static int
@@ -75,7 +75,7 @@ nv04_fence_context_new(struct nouveau_channel *chan)
75{ 75{
76 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL); 76 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
77 if (fctx) { 77 if (fctx) {
78 nouveau_fence_context_new(&fctx->base); 78 nouveau_fence_context_new(chan, &fctx->base);
79 fctx->base.emit = nv04_fence_emit; 79 fctx->base.emit = nv04_fence_emit;
80 fctx->base.sync = nv04_fence_sync; 80 fctx->base.sync = nv04_fence_sync;
81 fctx->base.read = nv04_fence_read; 81 fctx->base.read = nv04_fence_read;
@@ -105,5 +105,7 @@ nv04_fence_create(struct nouveau_drm *drm)
105 priv->base.dtor = nv04_fence_destroy; 105 priv->base.dtor = nv04_fence_destroy;
106 priv->base.context_new = nv04_fence_context_new; 106 priv->base.context_new = nv04_fence_context_new;
107 priv->base.context_del = nv04_fence_context_del; 107 priv->base.context_del = nv04_fence_context_del;
108 priv->base.contexts = 15;
109 priv->base.context_base = fence_context_alloc(priv->base.contexts);
108 return 0; 110 return 0;
109} 111}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 4faaf0acf5d7..5e1ea1cdce75 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -33,7 +33,7 @@ nv10_fence_emit(struct nouveau_fence *fence)
33 int ret = RING_SPACE(chan, 2); 33 int ret = RING_SPACE(chan, 2);
34 if (ret == 0) { 34 if (ret == 0) {
35 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1); 35 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
36 OUT_RING (chan, fence->sequence); 36 OUT_RING (chan, fence->base.seqno);
37 FIRE_RING (chan); 37 FIRE_RING (chan);
38 } 38 }
39 return ret; 39 return ret;
@@ -63,7 +63,7 @@ nv10_fence_context_del(struct nouveau_channel *chan)
63 nvif_object_fini(&fctx->head[i]); 63 nvif_object_fini(&fctx->head[i]);
64 nvif_object_fini(&fctx->sema); 64 nvif_object_fini(&fctx->sema);
65 chan->fence = NULL; 65 chan->fence = NULL;
66 kfree(fctx); 66 nouveau_fence_context_free(&fctx->base);
67} 67}
68 68
69int 69int
@@ -75,7 +75,7 @@ nv10_fence_context_new(struct nouveau_channel *chan)
75 if (!fctx) 75 if (!fctx)
76 return -ENOMEM; 76 return -ENOMEM;
77 77
78 nouveau_fence_context_new(&fctx->base); 78 nouveau_fence_context_new(chan, &fctx->base);
79 fctx->base.emit = nv10_fence_emit; 79 fctx->base.emit = nv10_fence_emit;
80 fctx->base.read = nv10_fence_read; 80 fctx->base.read = nv10_fence_read;
81 fctx->base.sync = nv10_fence_sync; 81 fctx->base.sync = nv10_fence_sync;
@@ -106,6 +106,8 @@ nv10_fence_create(struct nouveau_drm *drm)
106 priv->base.dtor = nv10_fence_destroy; 106 priv->base.dtor = nv10_fence_destroy;
107 priv->base.context_new = nv10_fence_context_new; 107 priv->base.context_new = nv10_fence_context_new;
108 priv->base.context_del = nv10_fence_context_del; 108 priv->base.context_del = nv10_fence_context_del;
109 priv->base.contexts = 31;
110 priv->base.context_base = fence_context_alloc(priv->base.contexts);
109 spin_lock_init(&priv->lock); 111 spin_lock_init(&priv->lock);
110 return 0; 112 return 0;
111} 113}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index ca907479f92f..40b461c7d5c5 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -84,7 +84,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
84 if (!fctx) 84 if (!fctx)
85 return -ENOMEM; 85 return -ENOMEM;
86 86
87 nouveau_fence_context_new(&fctx->base); 87 nouveau_fence_context_new(chan, &fctx->base);
88 fctx->base.emit = nv10_fence_emit; 88 fctx->base.emit = nv10_fence_emit;
89 fctx->base.read = nv10_fence_read; 89 fctx->base.read = nv10_fence_read;
90 fctx->base.sync = nv17_fence_sync; 90 fctx->base.sync = nv17_fence_sync;
@@ -124,10 +124,12 @@ nv17_fence_create(struct nouveau_drm *drm)
124 priv->base.resume = nv17_fence_resume; 124 priv->base.resume = nv17_fence_resume;
125 priv->base.context_new = nv17_fence_context_new; 125 priv->base.context_new = nv17_fence_context_new;
126 priv->base.context_del = nv10_fence_context_del; 126 priv->base.context_del = nv10_fence_context_del;
127 priv->base.contexts = 31;
128 priv->base.context_base = fence_context_alloc(priv->base.contexts);
127 spin_lock_init(&priv->lock); 129 spin_lock_init(&priv->lock);
128 130
129 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 131 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
130 0, 0x0000, NULL, &priv->bo); 132 0, 0x0000, NULL, NULL, &priv->bo);
131 if (!ret) { 133 if (!ret) {
132 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 134 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
133 if (!ret) { 135 if (!ret) {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 03949eaa629f..ae873d1a8d46 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1066,7 +1066,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
1066 u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1; 1066 u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
1067 u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks; 1067 u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
1068 u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks; 1068 u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
1069 u32 vblan2e = 0, vblan2s = 1; 1069 u32 vblan2e = 0, vblan2s = 1, vblankus = 0;
1070 u32 *push; 1070 u32 *push;
1071 int ret; 1071 int ret;
1072 1072
@@ -1083,6 +1083,11 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
1083 vblanke = vsynce + vbackp; 1083 vblanke = vsynce + vbackp;
1084 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; 1084 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
1085 vblanks = vactive - vfrontp - 1; 1085 vblanks = vactive - vfrontp - 1;
1086 /* XXX: Safe underestimate, even "0" works */
1087 vblankus = (vactive - mode->vdisplay - 2) * hactive;
1088 vblankus *= 1000;
1089 vblankus /= mode->clock;
1090
1086 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1091 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1087 vblan2e = vactive + vsynce + vbackp; 1092 vblan2e = vactive + vsynce + vbackp;
1088 vblan2s = vblan2e + (mode->vdisplay * vscan / ilace); 1093 vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
@@ -1099,14 +1104,14 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
1099 evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2); 1104 evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
1100 evo_data(push, 0x00800000 | mode->clock); 1105 evo_data(push, 0x00800000 | mode->clock);
1101 evo_data(push, (ilace == 2) ? 2 : 0); 1106 evo_data(push, (ilace == 2) ? 2 : 0);
1102 evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6); 1107 evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 8);
1103 evo_data(push, 0x00000000); 1108 evo_data(push, 0x00000000);
1104 evo_data(push, (vactive << 16) | hactive); 1109 evo_data(push, (vactive << 16) | hactive);
1105 evo_data(push, ( vsynce << 16) | hsynce); 1110 evo_data(push, ( vsynce << 16) | hsynce);
1106 evo_data(push, (vblanke << 16) | hblanke); 1111 evo_data(push, (vblanke << 16) | hblanke);
1107 evo_data(push, (vblanks << 16) | hblanks); 1112 evo_data(push, (vblanks << 16) | hblanks);
1108 evo_data(push, (vblan2e << 16) | vblan2s); 1113 evo_data(push, (vblan2e << 16) | vblan2s);
1109 evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1); 1114 evo_data(push, vblankus);
1110 evo_data(push, 0x00000000); 1115 evo_data(push, 0x00000000);
1111 evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2); 1116 evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
1112 evo_data(push, 0x00000311); 1117 evo_data(push, 0x00000311);
@@ -1378,7 +1383,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
1378 drm_mode_crtc_set_gamma_size(crtc, 256); 1383 drm_mode_crtc_set_gamma_size(crtc, 256);
1379 1384
1380 ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, 1385 ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
1381 0, 0x0000, NULL, &head->base.lut.nvbo); 1386 0, 0x0000, NULL, NULL, &head->base.lut.nvbo);
1382 if (!ret) { 1387 if (!ret) {
1383 ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM); 1388 ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
1384 if (!ret) { 1389 if (!ret) {
@@ -1401,7 +1406,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
1401 goto out; 1406 goto out;
1402 1407
1403 ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM, 1408 ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
1404 0, 0x0000, NULL, &head->base.cursor.nvbo); 1409 0, 0x0000, NULL, NULL, &head->base.cursor.nvbo);
1405 if (!ret) { 1410 if (!ret) {
1406 ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM); 1411 ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
1407 if (!ret) { 1412 if (!ret) {
@@ -1651,17 +1656,21 @@ static void
1651nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) 1656nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1652{ 1657{
1653 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1658 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1659 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1654 struct nouveau_connector *nv_connector; 1660 struct nouveau_connector *nv_connector;
1655 struct nv50_disp *disp = nv50_disp(encoder->dev); 1661 struct nv50_disp *disp = nv50_disp(encoder->dev);
1656 struct { 1662 struct __packed {
1657 struct nv50_disp_mthd_v1 base; 1663 struct {
1658 struct nv50_disp_sor_hda_eld_v0 eld; 1664 struct nv50_disp_mthd_v1 mthd;
1665 struct nv50_disp_sor_hda_eld_v0 eld;
1666 } base;
1659 u8 data[sizeof(nv_connector->base.eld)]; 1667 u8 data[sizeof(nv_connector->base.eld)];
1660 } args = { 1668 } args = {
1661 .base.version = 1, 1669 .base.mthd.version = 1,
1662 .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD, 1670 .base.mthd.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
1663 .base.hasht = nv_encoder->dcb->hasht, 1671 .base.mthd.hasht = nv_encoder->dcb->hasht,
1664 .base.hashm = nv_encoder->dcb->hashm, 1672 .base.mthd.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
1673 (0x0100 << nv_crtc->index),
1665 }; 1674 };
1666 1675
1667 nv_connector = nouveau_encoder_connector_get(nv_encoder); 1676 nv_connector = nouveau_encoder_connector_get(nv_encoder);
@@ -1671,11 +1680,11 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1671 drm_edid_to_eld(&nv_connector->base, nv_connector->edid); 1680 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
1672 memcpy(args.data, nv_connector->base.eld, sizeof(args.data)); 1681 memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
1673 1682
1674 nvif_mthd(disp->disp, 0, &args, sizeof(args)); 1683 nvif_mthd(disp->disp, 0, &args, sizeof(args.base) + args.data[2] * 4);
1675} 1684}
1676 1685
1677static void 1686static void
1678nv50_audio_disconnect(struct drm_encoder *encoder) 1687nv50_audio_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
1679{ 1688{
1680 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1689 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1681 struct nv50_disp *disp = nv50_disp(encoder->dev); 1690 struct nv50_disp *disp = nv50_disp(encoder->dev);
@@ -1686,7 +1695,8 @@ nv50_audio_disconnect(struct drm_encoder *encoder)
1686 .base.version = 1, 1695 .base.version = 1,
1687 .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD, 1696 .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
1688 .base.hasht = nv_encoder->dcb->hasht, 1697 .base.hasht = nv_encoder->dcb->hasht,
1689 .base.hashm = nv_encoder->dcb->hashm, 1698 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
1699 (0x0100 << nv_crtc->index),
1690 }; 1700 };
1691 1701
1692 nvif_mthd(disp->disp, 0, &args, sizeof(args)); 1702 nvif_mthd(disp->disp, 0, &args, sizeof(args));
@@ -1745,8 +1755,6 @@ nv50_hdmi_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
1745 (0x0100 << nv_crtc->index), 1755 (0x0100 << nv_crtc->index),
1746 }; 1756 };
1747 1757
1748 nv50_audio_disconnect(encoder);
1749
1750 nvif_mthd(disp->disp, 0, &args, sizeof(args)); 1758 nvif_mthd(disp->disp, 0, &args, sizeof(args));
1751} 1759}
1752 1760
@@ -1855,6 +1863,7 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
1855 if (nv_crtc) { 1863 if (nv_crtc) {
1856 nv50_crtc_prepare(&nv_crtc->base); 1864 nv50_crtc_prepare(&nv_crtc->base);
1857 nv50_sor_ctrl(nv_encoder, 1 << nv_crtc->index, 0); 1865 nv50_sor_ctrl(nv_encoder, 1 << nv_crtc->index, 0);
1866 nv50_audio_disconnect(encoder, nv_crtc);
1858 nv50_hdmi_disconnect(&nv_encoder->base.base, nv_crtc); 1867 nv50_hdmi_disconnect(&nv_encoder->base.base, nv_crtc);
1859 } 1868 }
1860} 1869}
@@ -1954,6 +1963,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1954 proto = 0x8; 1963 proto = 0x8;
1955 else 1964 else
1956 proto = 0x9; 1965 proto = 0x9;
1966 nv50_audio_mode_set(encoder, mode);
1957 break; 1967 break;
1958 default: 1968 default:
1959 BUG_ON(1); 1969 BUG_ON(1);
@@ -2458,7 +2468,7 @@ nv50_display_create(struct drm_device *dev)
2458 2468
2459 /* small shared memory area we use for notifiers and semaphores */ 2469 /* small shared memory area we use for notifiers and semaphores */
2460 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 2470 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
2461 0, 0x0000, NULL, &disp->sync); 2471 0, 0x0000, NULL, NULL, &disp->sync);
2462 if (!ret) { 2472 if (!ret) {
2463 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM); 2473 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
2464 if (!ret) { 2474 if (!ret) {
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 195cf51a7c31..22d242b37962 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -46,7 +46,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
46 if (!fctx) 46 if (!fctx)
47 return -ENOMEM; 47 return -ENOMEM;
48 48
49 nouveau_fence_context_new(&fctx->base); 49 nouveau_fence_context_new(chan, &fctx->base);
50 fctx->base.emit = nv10_fence_emit; 50 fctx->base.emit = nv10_fence_emit;
51 fctx->base.read = nv10_fence_read; 51 fctx->base.read = nv10_fence_read;
52 fctx->base.sync = nv17_fence_sync; 52 fctx->base.sync = nv17_fence_sync;
@@ -95,10 +95,12 @@ nv50_fence_create(struct nouveau_drm *drm)
95 priv->base.resume = nv17_fence_resume; 95 priv->base.resume = nv17_fence_resume;
96 priv->base.context_new = nv50_fence_context_new; 96 priv->base.context_new = nv50_fence_context_new;
97 priv->base.context_del = nv10_fence_context_del; 97 priv->base.context_del = nv10_fence_context_del;
98 priv->base.contexts = 127;
99 priv->base.context_base = fence_context_alloc(priv->base.contexts);
98 spin_lock_init(&priv->lock); 100 spin_lock_init(&priv->lock);
99 101
100 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 102 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
101 0, 0x0000, NULL, &priv->bo); 103 0, 0x0000, NULL, NULL, &priv->bo);
102 if (!ret) { 104 if (!ret) {
103 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 105 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
104 if (!ret) { 106 if (!ret) {
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 933a779c93ab..d6c6c87c3f07 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -82,7 +82,7 @@ nv84_fence_emit(struct nouveau_fence *fence)
82 else 82 else
83 addr += fctx->vma.offset; 83 addr += fctx->vma.offset;
84 84
85 return fctx->base.emit32(chan, addr, fence->sequence); 85 return fctx->base.emit32(chan, addr, fence->base.seqno);
86} 86}
87 87
88static int 88static int
@@ -97,7 +97,7 @@ nv84_fence_sync(struct nouveau_fence *fence,
97 else 97 else
98 addr += fctx->vma.offset; 98 addr += fctx->vma.offset;
99 99
100 return fctx->base.sync32(chan, addr, fence->sequence); 100 return fctx->base.sync32(chan, addr, fence->base.seqno);
101} 101}
102 102
103static u32 103static u32
@@ -120,11 +120,12 @@ nv84_fence_context_del(struct nouveau_channel *chan)
120 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]); 120 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
121 } 121 }
122 122
123 nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
123 nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); 124 nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
124 nouveau_bo_vma_del(priv->bo, &fctx->vma); 125 nouveau_bo_vma_del(priv->bo, &fctx->vma);
125 nouveau_fence_context_del(&fctx->base); 126 nouveau_fence_context_del(&fctx->base);
126 chan->fence = NULL; 127 chan->fence = NULL;
127 kfree(fctx); 128 nouveau_fence_context_free(&fctx->base);
128} 129}
129 130
130int 131int
@@ -139,12 +140,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
139 if (!fctx) 140 if (!fctx)
140 return -ENOMEM; 141 return -ENOMEM;
141 142
142 nouveau_fence_context_new(&fctx->base); 143 nouveau_fence_context_new(chan, &fctx->base);
143 fctx->base.emit = nv84_fence_emit; 144 fctx->base.emit = nv84_fence_emit;
144 fctx->base.sync = nv84_fence_sync; 145 fctx->base.sync = nv84_fence_sync;
145 fctx->base.read = nv84_fence_read; 146 fctx->base.read = nv84_fence_read;
146 fctx->base.emit32 = nv84_fence_emit32; 147 fctx->base.emit32 = nv84_fence_emit32;
147 fctx->base.sync32 = nv84_fence_sync32; 148 fctx->base.sync32 = nv84_fence_sync32;
149 fctx->base.sequence = nv84_fence_read(chan);
148 150
149 ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); 151 ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
150 if (ret == 0) { 152 if (ret == 0) {
@@ -158,8 +160,6 @@ nv84_fence_context_new(struct nouveau_channel *chan)
158 ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]); 160 ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]);
159 } 161 }
160 162
161 nouveau_bo_wr32(priv->bo, chan->chid * 16/4, 0x00000000);
162
163 if (ret) 163 if (ret)
164 nv84_fence_context_del(chan); 164 nv84_fence_context_del(chan);
165 return ret; 165 return ret;
@@ -168,13 +168,12 @@ nv84_fence_context_new(struct nouveau_channel *chan)
168static bool 168static bool
169nv84_fence_suspend(struct nouveau_drm *drm) 169nv84_fence_suspend(struct nouveau_drm *drm)
170{ 170{
171 struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device);
172 struct nv84_fence_priv *priv = drm->fence; 171 struct nv84_fence_priv *priv = drm->fence;
173 int i; 172 int i;
174 173
175 priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32)); 174 priv->suspend = vmalloc(priv->base.contexts * sizeof(u32));
176 if (priv->suspend) { 175 if (priv->suspend) {
177 for (i = 0; i <= pfifo->max; i++) 176 for (i = 0; i < priv->base.contexts; i++)
178 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4); 177 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
179 } 178 }
180 179
@@ -184,12 +183,11 @@ nv84_fence_suspend(struct nouveau_drm *drm)
184static void 183static void
185nv84_fence_resume(struct nouveau_drm *drm) 184nv84_fence_resume(struct nouveau_drm *drm)
186{ 185{
187 struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device);
188 struct nv84_fence_priv *priv = drm->fence; 186 struct nv84_fence_priv *priv = drm->fence;
189 int i; 187 int i;
190 188
191 if (priv->suspend) { 189 if (priv->suspend) {
192 for (i = 0; i <= pfifo->max; i++) 190 for (i = 0; i < priv->base.contexts; i++)
193 nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]); 191 nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]);
194 vfree(priv->suspend); 192 vfree(priv->suspend);
195 priv->suspend = NULL; 193 priv->suspend = NULL;
@@ -229,11 +227,12 @@ nv84_fence_create(struct nouveau_drm *drm)
229 priv->base.context_new = nv84_fence_context_new; 227 priv->base.context_new = nv84_fence_context_new;
230 priv->base.context_del = nv84_fence_context_del; 228 priv->base.context_del = nv84_fence_context_del;
231 229
232 init_waitqueue_head(&priv->base.waiting); 230 priv->base.contexts = pfifo->max + 1;
231 priv->base.context_base = fence_context_alloc(priv->base.contexts);
233 priv->base.uevent = true; 232 priv->base.uevent = true;
234 233
235 ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0, 234 ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
236 TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo); 235 TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL, &priv->bo);
237 if (ret == 0) { 236 if (ret == 0) {
238 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 237 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
239 if (ret == 0) { 238 if (ret == 0) {
@@ -246,8 +245,8 @@ nv84_fence_create(struct nouveau_drm *drm)
246 } 245 }
247 246
248 if (ret == 0) 247 if (ret == 0)
249 ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0, 248 ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
250 TTM_PL_FLAG_TT, 0, 0, NULL, 249 TTM_PL_FLAG_TT, 0, 0, NULL, NULL,
251 &priv->bo_gart); 250 &priv->bo_gart);
252 if (ret == 0) { 251 if (ret == 0) {
253 ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT); 252 ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT);
diff --git a/drivers/gpu/drm/nouveau/nvif/class.h b/drivers/gpu/drm/nouveau/nvif/class.h
index 573491f84792..e5a27df0672b 100644
--- a/drivers/gpu/drm/nouveau/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/nvif/class.h
@@ -479,6 +479,8 @@ struct nv50_disp_core_channel_dma_v0 {
479 __u32 pushbuf; 479 __u32 pushbuf;
480}; 480};
481 481
482#define NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
483
482/* cursor immediate */ 484/* cursor immediate */
483struct nv50_disp_cursor_v0 { 485struct nv50_disp_cursor_v0 {
484 __u8 version; 486 __u8 version;
@@ -486,6 +488,8 @@ struct nv50_disp_cursor_v0 {
486 __u8 pad02[6]; 488 __u8 pad02[6];
487}; 489};
488 490
491#define NV50_DISP_CURSOR_V0_NTFY_UEVENT 0x00
492
489/* base */ 493/* base */
490struct nv50_disp_base_channel_dma_v0 { 494struct nv50_disp_base_channel_dma_v0 {
491 __u8 version; 495 __u8 version;
@@ -494,6 +498,8 @@ struct nv50_disp_base_channel_dma_v0 {
494 __u32 pushbuf; 498 __u32 pushbuf;
495}; 499};
496 500
501#define NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
502
497/* overlay */ 503/* overlay */
498struct nv50_disp_overlay_channel_dma_v0 { 504struct nv50_disp_overlay_channel_dma_v0 {
499 __u8 version; 505 __u8 version;
@@ -502,6 +508,8 @@ struct nv50_disp_overlay_channel_dma_v0 {
502 __u32 pushbuf; 508 __u32 pushbuf;
503}; 509};
504 510
511#define NV50_DISP_OVERLAY_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
512
505/* overlay immediate */ 513/* overlay immediate */
506struct nv50_disp_overlay_v0 { 514struct nv50_disp_overlay_v0 {
507 __u8 version; 515 __u8 version;
@@ -509,6 +517,7 @@ struct nv50_disp_overlay_v0 {
509 __u8 pad02[6]; 517 __u8 pad02[6];
510}; 518};
511 519
520#define NV50_DISP_OVERLAY_V0_NTFY_UEVENT 0x00
512 521
513/******************************************************************************* 522/*******************************************************************************
514 * fermi 523 * fermi
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 002b9721e85a..862ba03c236c 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -629,6 +629,7 @@ static struct drm_driver omap_drm_driver = {
629 .lastclose = dev_lastclose, 629 .lastclose = dev_lastclose,
630 .preclose = dev_preclose, 630 .preclose = dev_preclose,
631 .postclose = dev_postclose, 631 .postclose = dev_postclose,
632 .set_busid = drm_platform_set_busid,
632 .get_vblank_counter = drm_vblank_count, 633 .get_vblank_counter = drm_vblank_count,
633 .enable_vblank = omap_irq_enable_vblank, 634 .enable_vblank = omap_irq_enable_vblank,
634 .disable_vblank = omap_irq_disable_vblank, 635 .disable_vblank = omap_irq_disable_vblank,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 84d73a61b34b..60e47b33c801 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -26,6 +26,7 @@
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
28#include <drm/omap_drm.h> 28#include <drm/omap_drm.h>
29#include <drm/drm_gem.h>
29#include <linux/platform_data/omap_drm.h> 30#include <linux/platform_data/omap_drm.h>
30 31
31 32
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 4ce1db0a68ff..23de22f8c820 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -352,6 +352,30 @@ static const struct panel_desc auo_b101aw03 = {
352 }, 352 },
353}; 353};
354 354
355static const struct drm_display_mode auo_b101xtn01_mode = {
356 .clock = 72000,
357 .hdisplay = 1366,
358 .hsync_start = 1366 + 20,
359 .hsync_end = 1366 + 20 + 70,
360 .htotal = 1366 + 20 + 70,
361 .vdisplay = 768,
362 .vsync_start = 768 + 14,
363 .vsync_end = 768 + 14 + 42,
364 .vtotal = 768 + 14 + 42,
365 .vrefresh = 60,
366 .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
367};
368
369static const struct panel_desc auo_b101xtn01 = {
370 .modes = &auo_b101xtn01_mode,
371 .num_modes = 1,
372 .bpc = 6,
373 .size = {
374 .width = 223,
375 .height = 125,
376 },
377};
378
355static const struct drm_display_mode auo_b133xtn01_mode = { 379static const struct drm_display_mode auo_b133xtn01_mode = {
356 .clock = 69500, 380 .clock = 69500,
357 .hdisplay = 1366, 381 .hdisplay = 1366,
@@ -616,6 +640,9 @@ static const struct of_device_id platform_of_match[] = {
616 .compatible = "auo,b101aw03", 640 .compatible = "auo,b101aw03",
617 .data = &auo_b101aw03, 641 .data = &auo_b101aw03,
618 }, { 642 }, {
643 .compatible = "auo,b101xtn01",
644 .data = &auo_b101xtn01,
645 }, {
619 .compatible = "auo,b133htn01", 646 .compatible = "auo,b133htn01",
620 .data = &auo_b133htn01, 647 .data = &auo_b133htn01,
621 }, { 648 }, {
diff --git a/drivers/gpu/drm/qxl/Makefile b/drivers/gpu/drm/qxl/Makefile
index ea046ba691d2..bacc4aff1201 100644
--- a/drivers/gpu/drm/qxl/Makefile
+++ b/drivers/gpu/drm/qxl/Makefile
@@ -4,6 +4,6 @@
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6 6
7qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_fence.o qxl_release.o 7qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_release.o qxl_prime.o
8 8
9obj-$(CONFIG_DRM_QXL)+= qxl.o 9obj-$(CONFIG_DRM_QXL)+= qxl.o
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index eb89653a7a17..97823644d347 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -620,17 +620,10 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal
620 if (ret == -EBUSY) 620 if (ret == -EBUSY)
621 return -EBUSY; 621 return -EBUSY;
622 622
623 if (surf->fence.num_active_releases > 0 && stall == false) {
624 qxl_bo_unreserve(surf);
625 return -EBUSY;
626 }
627
628 if (stall) 623 if (stall)
629 mutex_unlock(&qdev->surf_evict_mutex); 624 mutex_unlock(&qdev->surf_evict_mutex);
630 625
631 spin_lock(&surf->tbo.bdev->fence_lock);
632 ret = ttm_bo_wait(&surf->tbo, true, true, !stall); 626 ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
633 spin_unlock(&surf->tbo.bdev->fence_lock);
634 627
635 if (stall) 628 if (stall)
636 mutex_lock(&qdev->surf_evict_mutex); 629 mutex_lock(&qdev->surf_evict_mutex);
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index c3c2bbdc6674..6911b8c44492 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -58,9 +58,17 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
58 struct qxl_bo *bo; 58 struct qxl_bo *bo;
59 59
60 list_for_each_entry(bo, &qdev->gem.objects, list) { 60 list_for_each_entry(bo, &qdev->gem.objects, list) {
61 seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n", 61 struct reservation_object_list *fobj;
62 (unsigned long)bo->gem_base.size, bo->pin_count, 62 int rel;
63 bo->tbo.sync_obj, bo->fence.num_active_releases); 63
64 rcu_read_lock();
65 fobj = rcu_dereference(bo->tbo.resv->fence);
66 rel = fobj ? fobj->shared_count : 0;
67 rcu_read_unlock();
68
69 seq_printf(m, "size %ld, pc %d, num releases %d\n",
70 (unsigned long)bo->gem_base.size,
71 bo->pin_count, rel);
64 } 72 }
65 return 0; 73 return 0;
66} 74}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index b8ced08b6291..af9e78546688 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -187,6 +187,54 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
187 kfree(qxl_crtc); 187 kfree(qxl_crtc);
188} 188}
189 189
190static int qxl_crtc_page_flip(struct drm_crtc *crtc,
191 struct drm_framebuffer *fb,
192 struct drm_pending_vblank_event *event,
193 uint32_t page_flip_flags)
194{
195 struct drm_device *dev = crtc->dev;
196 struct qxl_device *qdev = dev->dev_private;
197 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
198 struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb);
199 struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb);
200 struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj);
201 struct qxl_bo *bo = gem_to_qxl_bo(qfb_src->obj);
202 unsigned long flags;
203 struct drm_clip_rect norect = {
204 .x1 = 0,
205 .y1 = 0,
206 .x2 = fb->width,
207 .y2 = fb->height
208 };
209 int inc = 1;
210 int one_clip_rect = 1;
211 int ret = 0;
212
213 crtc->primary->fb = fb;
214 bo_old->is_primary = false;
215 bo->is_primary = true;
216
217 ret = qxl_bo_reserve(bo, false);
218 if (ret)
219 return ret;
220
221 qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0,
222 &norect, one_clip_rect, inc);
223
224 drm_vblank_get(dev, qcrtc->index);
225
226 if (event) {
227 spin_lock_irqsave(&dev->event_lock, flags);
228 drm_send_vblank_event(dev, qcrtc->index, event);
229 spin_unlock_irqrestore(&dev->event_lock, flags);
230 }
231 drm_vblank_put(dev, qcrtc->index);
232
233 qxl_bo_unreserve(bo);
234
235 return 0;
236}
237
190static int 238static int
191qxl_hide_cursor(struct qxl_device *qdev) 239qxl_hide_cursor(struct qxl_device *qdev)
192{ 240{
@@ -374,6 +422,7 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
374 .cursor_move = qxl_crtc_cursor_move, 422 .cursor_move = qxl_crtc_cursor_move,
375 .set_config = drm_crtc_helper_set_config, 423 .set_config = drm_crtc_helper_set_config,
376 .destroy = qxl_crtc_destroy, 424 .destroy = qxl_crtc_destroy,
425 .page_flip = qxl_crtc_page_flip,
377}; 426};
378 427
379static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb) 428static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index a3fd92029a14..1d9b80c91a15 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -84,6 +84,7 @@ static const struct file_operations qxl_fops = {
84 .release = drm_release, 84 .release = drm_release,
85 .unlocked_ioctl = drm_ioctl, 85 .unlocked_ioctl = drm_ioctl,
86 .poll = drm_poll, 86 .poll = drm_poll,
87 .read = drm_read,
87 .mmap = qxl_mmap, 88 .mmap = qxl_mmap,
88}; 89};
89 90
@@ -195,6 +196,20 @@ static int qxl_pm_restore(struct device *dev)
195 return qxl_drm_resume(drm_dev, false); 196 return qxl_drm_resume(drm_dev, false);
196} 197}
197 198
199static u32 qxl_noop_get_vblank_counter(struct drm_device *dev, int crtc)
200{
201 return dev->vblank[crtc].count.counter;
202}
203
204static int qxl_noop_enable_vblank(struct drm_device *dev, int crtc)
205{
206 return 0;
207}
208
209static void qxl_noop_disable_vblank(struct drm_device *dev, int crtc)
210{
211}
212
198static const struct dev_pm_ops qxl_pm_ops = { 213static const struct dev_pm_ops qxl_pm_ops = {
199 .suspend = qxl_pm_suspend, 214 .suspend = qxl_pm_suspend,
200 .resume = qxl_pm_resume, 215 .resume = qxl_pm_resume,
@@ -212,10 +227,15 @@ static struct pci_driver qxl_pci_driver = {
212}; 227};
213 228
214static struct drm_driver qxl_driver = { 229static struct drm_driver qxl_driver = {
215 .driver_features = DRIVER_GEM | DRIVER_MODESET | 230 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
216 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 231 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
217 .load = qxl_driver_load, 232 .load = qxl_driver_load,
218 .unload = qxl_driver_unload, 233 .unload = qxl_driver_unload,
234 .get_vblank_counter = qxl_noop_get_vblank_counter,
235 .enable_vblank = qxl_noop_enable_vblank,
236 .disable_vblank = qxl_noop_disable_vblank,
237
238 .set_busid = drm_pci_set_busid,
219 239
220 .dumb_create = qxl_mode_dumb_create, 240 .dumb_create = qxl_mode_dumb_create,
221 .dumb_map_offset = qxl_mode_dumb_mmap, 241 .dumb_map_offset = qxl_mode_dumb_mmap,
@@ -224,6 +244,17 @@ static struct drm_driver qxl_driver = {
224 .debugfs_init = qxl_debugfs_init, 244 .debugfs_init = qxl_debugfs_init,
225 .debugfs_cleanup = qxl_debugfs_takedown, 245 .debugfs_cleanup = qxl_debugfs_takedown,
226#endif 246#endif
247 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
248 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
249 .gem_prime_export = drm_gem_prime_export,
250 .gem_prime_import = drm_gem_prime_import,
251 .gem_prime_pin = qxl_gem_prime_pin,
252 .gem_prime_unpin = qxl_gem_prime_unpin,
253 .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
254 .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
255 .gem_prime_vmap = qxl_gem_prime_vmap,
256 .gem_prime_vunmap = qxl_gem_prime_vunmap,
257 .gem_prime_mmap = qxl_gem_prime_mmap,
227 .gem_free_object = qxl_gem_object_free, 258 .gem_free_object = qxl_gem_object_free,
228 .gem_open_object = qxl_gem_object_open, 259 .gem_open_object = qxl_gem_object_open,
229 .gem_close_object = qxl_gem_object_close, 260 .gem_close_object = qxl_gem_object_close,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 36ed40ba773f..7c6cafe21f5f 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -31,6 +31,7 @@
31 * Definitions taken from spice-protocol, plus kernel driver specific bits. 31 * Definitions taken from spice-protocol, plus kernel driver specific bits.
32 */ 32 */
33 33
34#include <linux/fence.h>
34#include <linux/workqueue.h> 35#include <linux/workqueue.h>
35#include <linux/firmware.h> 36#include <linux/firmware.h>
36#include <linux/platform_device.h> 37#include <linux/platform_device.h>
@@ -42,6 +43,8 @@
42#include <ttm/ttm_placement.h> 43#include <ttm/ttm_placement.h>
43#include <ttm/ttm_module.h> 44#include <ttm/ttm_module.h>
44 45
46#include <drm/drm_gem.h>
47
45/* just for ttm_validate_buffer */ 48/* just for ttm_validate_buffer */
46#include <ttm/ttm_execbuf_util.h> 49#include <ttm/ttm_execbuf_util.h>
47 50
@@ -95,31 +98,24 @@ enum {
95 QXL_INTERRUPT_IO_CMD |\ 98 QXL_INTERRUPT_IO_CMD |\
96 QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) 99 QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
97 100
98struct qxl_fence {
99 struct qxl_device *qdev;
100 uint32_t num_active_releases;
101 uint32_t *release_ids;
102 struct radix_tree_root tree;
103};
104
105struct qxl_bo { 101struct qxl_bo {
106 /* Protected by gem.mutex */ 102 /* Protected by gem.mutex */
107 struct list_head list; 103 struct list_head list;
108 /* Protected by tbo.reserved */ 104 /* Protected by tbo.reserved */
109 u32 placements[3]; 105 struct ttm_place placements[3];
110 struct ttm_placement placement; 106 struct ttm_placement placement;
111 struct ttm_buffer_object tbo; 107 struct ttm_buffer_object tbo;
112 struct ttm_bo_kmap_obj kmap; 108 struct ttm_bo_kmap_obj kmap;
113 unsigned pin_count; 109 unsigned pin_count;
114 void *kptr; 110 void *kptr;
115 int type; 111 int type;
112
116 /* Constant after initialization */ 113 /* Constant after initialization */
117 struct drm_gem_object gem_base; 114 struct drm_gem_object gem_base;
118 bool is_primary; /* is this now a primary surface */ 115 bool is_primary; /* is this now a primary surface */
119 bool hw_surf_alloc; 116 bool hw_surf_alloc;
120 struct qxl_surface surf; 117 struct qxl_surface surf;
121 uint32_t surface_id; 118 uint32_t surface_id;
122 struct qxl_fence fence; /* per bo fence - list of releases */
123 struct qxl_release *surf_create; 119 struct qxl_release *surf_create;
124}; 120};
125#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) 121#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
@@ -191,6 +187,8 @@ enum {
191 * spice-protocol/qxl_dev.h */ 187 * spice-protocol/qxl_dev.h */
192#define QXL_MAX_RES 96 188#define QXL_MAX_RES 96
193struct qxl_release { 189struct qxl_release {
190 struct fence base;
191
194 int id; 192 int id;
195 int type; 193 int type;
196 uint32_t release_offset; 194 uint32_t release_offset;
@@ -284,7 +282,9 @@ struct qxl_device {
284 uint8_t slot_gen_bits; 282 uint8_t slot_gen_bits;
285 uint64_t va_slot_mask; 283 uint64_t va_slot_mask;
286 284
285 spinlock_t release_lock;
287 struct idr release_idr; 286 struct idr release_idr;
287 uint32_t release_seqno;
288 spinlock_t release_idr_lock; 288 spinlock_t release_idr_lock;
289 struct mutex async_io_mutex; 289 struct mutex async_io_mutex;
290 unsigned int last_sent_io_cmd; 290 unsigned int last_sent_io_cmd;
@@ -532,6 +532,18 @@ int qxl_garbage_collect(struct qxl_device *qdev);
532int qxl_debugfs_init(struct drm_minor *minor); 532int qxl_debugfs_init(struct drm_minor *minor);
533void qxl_debugfs_takedown(struct drm_minor *minor); 533void qxl_debugfs_takedown(struct drm_minor *minor);
534 534
535/* qxl_prime.c */
536int qxl_gem_prime_pin(struct drm_gem_object *obj);
537void qxl_gem_prime_unpin(struct drm_gem_object *obj);
538struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj);
539struct drm_gem_object *qxl_gem_prime_import_sg_table(
540 struct drm_device *dev, struct dma_buf_attachment *attach,
541 struct sg_table *sgt);
542void *qxl_gem_prime_vmap(struct drm_gem_object *obj);
543void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
544int qxl_gem_prime_mmap(struct drm_gem_object *obj,
545 struct vm_area_struct *vma);
546
535/* qxl_irq.c */ 547/* qxl_irq.c */
536int qxl_irq_init(struct qxl_device *qdev); 548int qxl_irq_init(struct qxl_device *qdev);
537irqreturn_t qxl_irq_handler(int irq, void *arg); 549irqreturn_t qxl_irq_handler(int irq, void *arg);
@@ -561,10 +573,4 @@ qxl_surface_lookup(struct drm_device *dev, int surface_id);
561void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing); 573void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
562int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); 574int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
563 575
564/* qxl_fence.c */
565void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id);
566int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
567int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
568void qxl_fence_fini(struct qxl_fence *qfence);
569
570#endif 576#endif
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index df567888bb1e..3d7c1d00a424 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -625,7 +625,8 @@ static int qxl_fb_find_or_create_single(
625 struct drm_fb_helper *helper, 625 struct drm_fb_helper *helper,
626 struct drm_fb_helper_surface_size *sizes) 626 struct drm_fb_helper_surface_size *sizes)
627{ 627{
628 struct qxl_fbdev *qfbdev = (struct qxl_fbdev *)helper; 628 struct qxl_fbdev *qfbdev =
629 container_of(helper, struct qxl_fbdev, helper);
629 int new_fb = 0; 630 int new_fb = 0;
630 int ret; 631 int ret;
631 632
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
deleted file mode 100644
index ae59e91cfb9a..000000000000
--- a/drivers/gpu/drm/qxl/qxl_fence.c
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26
27#include "qxl_drv.h"
28
29/* QXL fencing-
30
31 When we submit operations to the GPU we pass a release reference to the GPU
32 with them, the release reference is then added to the release ring when
33 the GPU is finished with that particular operation and has removed it from
34 its tree.
35
36 So we have can have multiple outstanding non linear fences per object.
37
38 From a TTM POV we only care if the object has any outstanding releases on
39 it.
40
41 we wait until all outstanding releases are processeed.
42
43 sync object is just a list of release ids that represent that fence on
44 that buffer.
45
46 we just add new releases onto the sync object attached to the object.
47
48 This currently uses a radix tree to store the list of release ids.
49
50 For some reason every so often qxl hw fails to release, things go wrong.
51*/
52/* must be called with the fence lock held */
53void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id)
54{
55 radix_tree_insert(&qfence->tree, rel_id, qfence);
56 qfence->num_active_releases++;
57}
58
59int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
60{
61 void *ret;
62 int retval = 0;
63 struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
64
65 spin_lock(&bo->tbo.bdev->fence_lock);
66
67 ret = radix_tree_delete(&qfence->tree, rel_id);
68 if (ret == qfence)
69 qfence->num_active_releases--;
70 else {
71 DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id);
72 retval = -ENOENT;
73 }
74 spin_unlock(&bo->tbo.bdev->fence_lock);
75 return retval;
76}
77
78
79int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence)
80{
81 qfence->qdev = qdev;
82 qfence->num_active_releases = 0;
83 INIT_RADIX_TREE(&qfence->tree, GFP_ATOMIC);
84 return 0;
85}
86
87void qxl_fence_fini(struct qxl_fence *qfence)
88{
89 kfree(qfence->release_ids);
90 qfence->num_active_releases = 0;
91}
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index fd88eb4a3f79..b2977a181935 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -223,6 +223,7 @@ static int qxl_device_init(struct qxl_device *qdev,
223 223
224 idr_init(&qdev->release_idr); 224 idr_init(&qdev->release_idr);
225 spin_lock_init(&qdev->release_idr_lock); 225 spin_lock_init(&qdev->release_idr_lock);
226 spin_lock_init(&qdev->release_lock);
226 227
227 idr_init(&qdev->surf_id_idr); 228 idr_init(&qdev->surf_id_idr);
228 spin_lock_init(&qdev->surf_id_idr_lock); 229 spin_lock_init(&qdev->surf_id_idr_lock);
@@ -297,6 +298,9 @@ int qxl_driver_unload(struct drm_device *dev)
297 298
298 if (qdev == NULL) 299 if (qdev == NULL)
299 return 0; 300 return 0;
301
302 drm_vblank_cleanup(dev);
303
300 qxl_modeset_fini(qdev); 304 qxl_modeset_fini(qdev);
301 qxl_device_fini(qdev); 305 qxl_device_fini(qdev);
302 306
@@ -324,15 +328,20 @@ int qxl_driver_load(struct drm_device *dev, unsigned long flags)
324 if (r) 328 if (r)
325 goto out; 329 goto out;
326 330
331 r = drm_vblank_init(dev, 1);
332 if (r)
333 goto unload;
334
327 r = qxl_modeset_init(qdev); 335 r = qxl_modeset_init(qdev);
328 if (r) { 336 if (r)
329 qxl_driver_unload(dev); 337 goto unload;
330 goto out;
331 }
332 338
333 drm_kms_helper_poll_init(qdev->ddev); 339 drm_kms_helper_poll_init(qdev->ddev);
334 340
335 return 0; 341 return 0;
342unload:
343 qxl_driver_unload(dev);
344
336out: 345out:
337 kfree(qdev); 346 kfree(qdev);
338 return r; 347 return r;
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index b95f144f0b49..cdeaf08fdc74 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -36,7 +36,6 @@ static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
36 qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 36 qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
37 37
38 qxl_surface_evict(qdev, bo, false); 38 qxl_surface_evict(qdev, bo, false);
39 qxl_fence_fini(&bo->fence);
40 mutex_lock(&qdev->gem.mutex); 39 mutex_lock(&qdev->gem.mutex);
41 list_del_init(&bo->list); 40 list_del_init(&bo->list);
42 mutex_unlock(&qdev->gem.mutex); 41 mutex_unlock(&qdev->gem.mutex);
@@ -55,21 +54,24 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55{ 54{
56 u32 c = 0; 55 u32 c = 0;
57 u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; 56 u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
57 unsigned i;
58 58
59 qbo->placement.fpfn = 0;
60 qbo->placement.lpfn = 0;
61 qbo->placement.placement = qbo->placements; 59 qbo->placement.placement = qbo->placements;
62 qbo->placement.busy_placement = qbo->placements; 60 qbo->placement.busy_placement = qbo->placements;
63 if (domain == QXL_GEM_DOMAIN_VRAM) 61 if (domain == QXL_GEM_DOMAIN_VRAM)
64 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; 62 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
65 if (domain == QXL_GEM_DOMAIN_SURFACE) 63 if (domain == QXL_GEM_DOMAIN_SURFACE)
66 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag; 64 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
67 if (domain == QXL_GEM_DOMAIN_CPU) 65 if (domain == QXL_GEM_DOMAIN_CPU)
68 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; 66 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
69 if (!c) 67 if (!c)
70 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 68 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
71 qbo->placement.num_placement = c; 69 qbo->placement.num_placement = c;
72 qbo->placement.num_busy_placement = c; 70 qbo->placement.num_busy_placement = c;
71 for (i = 0; i < c; ++i) {
72 qbo->placements[i].fpfn = 0;
73 qbo->placements[i].lpfn = 0;
74 }
73} 75}
74 76
75 77
@@ -99,7 +101,6 @@ int qxl_bo_create(struct qxl_device *qdev,
99 bo->type = domain; 101 bo->type = domain;
100 bo->pin_count = pinned ? 1 : 0; 102 bo->pin_count = pinned ? 1 : 0;
101 bo->surface_id = 0; 103 bo->surface_id = 0;
102 qxl_fence_init(qdev, &bo->fence);
103 INIT_LIST_HEAD(&bo->list); 104 INIT_LIST_HEAD(&bo->list);
104 105
105 if (surf) 106 if (surf)
@@ -109,7 +110,7 @@ int qxl_bo_create(struct qxl_device *qdev,
109 110
110 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, 111 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
111 &bo->placement, 0, !kernel, NULL, size, 112 &bo->placement, 0, !kernel, NULL, size,
112 NULL, &qxl_ttm_bo_destroy); 113 NULL, NULL, &qxl_ttm_bo_destroy);
113 if (unlikely(r != 0)) { 114 if (unlikely(r != 0)) {
114 if (r != -ERESTARTSYS) 115 if (r != -ERESTARTSYS)
115 dev_err(qdev->dev, 116 dev_err(qdev->dev,
@@ -259,7 +260,7 @@ int qxl_bo_unpin(struct qxl_bo *bo)
259 if (bo->pin_count) 260 if (bo->pin_count)
260 return 0; 261 return 0;
261 for (i = 0; i < bo->placement.num_placement; i++) 262 for (i = 0; i < bo->placement.num_placement; i++)
262 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 263 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
263 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 264 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
264 if (unlikely(r != 0)) 265 if (unlikely(r != 0))
265 dev_err(qdev->dev, "%p validate failed for unpin\n", bo); 266 dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 83a423293afd..37af1bc0dd00 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -76,12 +76,10 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
76 } 76 }
77 return r; 77 return r;
78 } 78 }
79 spin_lock(&bo->tbo.bdev->fence_lock);
80 if (mem_type) 79 if (mem_type)
81 *mem_type = bo->tbo.mem.mem_type; 80 *mem_type = bo->tbo.mem.mem_type;
82 if (bo->tbo.sync_obj) 81
83 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 82 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
84 spin_unlock(&bo->tbo.bdev->fence_lock);
85 ttm_bo_unreserve(&bo->tbo); 83 ttm_bo_unreserve(&bo->tbo);
86 return r; 84 return r;
87} 85}
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
new file mode 100644
index 000000000000..3d031b50a8fd
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -0,0 +1,72 @@
1/*
2 * Copyright 2014 Canonical
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Andreas Pokorny
23 */
24
25#include "qxl_drv.h"
26
27/* Empty Implementations as there should not be any other driver for a virtual
28 * device that might share buffers with qxl */
29
30int qxl_gem_prime_pin(struct drm_gem_object *obj)
31{
32 WARN_ONCE(1, "not implemented");
33 return -ENOSYS;
34}
35
36void qxl_gem_prime_unpin(struct drm_gem_object *obj)
37{
38 WARN_ONCE(1, "not implemented");
39}
40
41
42struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
43{
44 WARN_ONCE(1, "not implemented");
45 return ERR_PTR(-ENOSYS);
46}
47
48struct drm_gem_object *qxl_gem_prime_import_sg_table(
49 struct drm_device *dev, struct dma_buf_attachment *attach,
50 struct sg_table *table)
51{
52 WARN_ONCE(1, "not implemented");
53 return ERR_PTR(-ENOSYS);
54}
55
56void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
57{
58 WARN_ONCE(1, "not implemented");
59 return ERR_PTR(-ENOSYS);
60}
61
62void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
63{
64 WARN_ONCE(1, "not implemented");
65}
66
67int qxl_gem_prime_mmap(struct drm_gem_object *obj,
68 struct vm_area_struct *area)
69{
70 WARN_ONCE(1, "not implemented");
71 return ENOSYS;
72}
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 14e776f1d14e..446e71ca36cb 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -21,6 +21,7 @@
21 */ 21 */
22#include "qxl_drv.h" 22#include "qxl_drv.h"
23#include "qxl_object.h" 23#include "qxl_object.h"
24#include <trace/events/fence.h>
24 25
25/* 26/*
26 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate 27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
@@ -39,6 +40,88 @@
39static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; 40static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
40static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; 41static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
41 42
43static const char *qxl_get_driver_name(struct fence *fence)
44{
45 return "qxl";
46}
47
48static const char *qxl_get_timeline_name(struct fence *fence)
49{
50 return "release";
51}
52
53static bool qxl_nop_signaling(struct fence *fence)
54{
55 /* fences are always automatically signaled, so just pretend we did this.. */
56 return true;
57}
58
59static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
60{
61 struct qxl_device *qdev;
62 struct qxl_release *release;
63 int count = 0, sc = 0;
64 bool have_drawable_releases;
65 unsigned long cur, end = jiffies + timeout;
66
67 qdev = container_of(fence->lock, struct qxl_device, release_lock);
68 release = container_of(fence, struct qxl_release, base);
69 have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
70
71retry:
72 sc++;
73
74 if (fence_is_signaled(fence))
75 goto signaled;
76
77 qxl_io_notify_oom(qdev);
78
79 for (count = 0; count < 11; count++) {
80 if (!qxl_queue_garbage_collect(qdev, true))
81 break;
82
83 if (fence_is_signaled(fence))
84 goto signaled;
85 }
86
87 if (fence_is_signaled(fence))
88 goto signaled;
89
90 if (have_drawable_releases || sc < 4) {
91 if (sc > 2)
92 /* back off */
93 usleep_range(500, 1000);
94
95 if (time_after(jiffies, end))
96 return 0;
97
98 if (have_drawable_releases && sc > 300) {
99 FENCE_WARN(fence, "failed to wait on release %d "
100 "after spincount %d\n",
101 fence->context & ~0xf0000000, sc);
102 goto signaled;
103 }
104 goto retry;
105 }
106 /*
107 * yeah, original sync_obj_wait gave up after 3 spins when
108 * have_drawable_releases is not set.
109 */
110
111signaled:
112 cur = jiffies;
113 if (time_after(cur, end))
114 return 0;
115 return end - cur;
116}
117
118static const struct fence_ops qxl_fence_ops = {
119 .get_driver_name = qxl_get_driver_name,
120 .get_timeline_name = qxl_get_timeline_name,
121 .enable_signaling = qxl_nop_signaling,
122 .wait = qxl_fence_wait,
123};
124
42static uint64_t 125static uint64_t
43qxl_release_alloc(struct qxl_device *qdev, int type, 126qxl_release_alloc(struct qxl_device *qdev, int type,
44 struct qxl_release **ret) 127 struct qxl_release **ret)
@@ -46,13 +129,13 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
46 struct qxl_release *release; 129 struct qxl_release *release;
47 int handle; 130 int handle;
48 size_t size = sizeof(*release); 131 size_t size = sizeof(*release);
49 int idr_ret;
50 132
51 release = kmalloc(size, GFP_KERNEL); 133 release = kmalloc(size, GFP_KERNEL);
52 if (!release) { 134 if (!release) {
53 DRM_ERROR("Out of memory\n"); 135 DRM_ERROR("Out of memory\n");
54 return 0; 136 return 0;
55 } 137 }
138 release->base.ops = NULL;
56 release->type = type; 139 release->type = type;
57 release->release_offset = 0; 140 release->release_offset = 0;
58 release->surface_release_id = 0; 141 release->surface_release_id = 0;
@@ -60,44 +143,61 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
60 143
61 idr_preload(GFP_KERNEL); 144 idr_preload(GFP_KERNEL);
62 spin_lock(&qdev->release_idr_lock); 145 spin_lock(&qdev->release_idr_lock);
63 idr_ret = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); 146 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
147 release->base.seqno = ++qdev->release_seqno;
64 spin_unlock(&qdev->release_idr_lock); 148 spin_unlock(&qdev->release_idr_lock);
65 idr_preload_end(); 149 idr_preload_end();
66 handle = idr_ret; 150 if (handle < 0) {
67 if (idr_ret < 0) 151 kfree(release);
68 goto release_fail; 152 *ret = NULL;
153 return handle;
154 }
69 *ret = release; 155 *ret = release;
70 QXL_INFO(qdev, "allocated release %lld\n", handle); 156 QXL_INFO(qdev, "allocated release %lld\n", handle);
71 release->id = handle; 157 release->id = handle;
72release_fail:
73
74 return handle; 158 return handle;
75} 159}
76 160
161static void
162qxl_release_free_list(struct qxl_release *release)
163{
164 while (!list_empty(&release->bos)) {
165 struct qxl_bo_list *entry;
166 struct qxl_bo *bo;
167
168 entry = container_of(release->bos.next,
169 struct qxl_bo_list, tv.head);
170 bo = to_qxl_bo(entry->tv.bo);
171 qxl_bo_unref(&bo);
172 list_del(&entry->tv.head);
173 kfree(entry);
174 }
175}
176
77void 177void
78qxl_release_free(struct qxl_device *qdev, 178qxl_release_free(struct qxl_device *qdev,
79 struct qxl_release *release) 179 struct qxl_release *release)
80{ 180{
81 struct qxl_bo_list *entry, *tmp;
82 QXL_INFO(qdev, "release %d, type %d\n", release->id, 181 QXL_INFO(qdev, "release %d, type %d\n", release->id,
83 release->type); 182 release->type);
84 183
85 if (release->surface_release_id) 184 if (release->surface_release_id)
86 qxl_surface_id_dealloc(qdev, release->surface_release_id); 185 qxl_surface_id_dealloc(qdev, release->surface_release_id);
87 186
88 list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
89 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
90 QXL_INFO(qdev, "release %llx\n",
91 drm_vma_node_offset_addr(&entry->tv.bo->vma_node)
92 - DRM_FILE_OFFSET);
93 qxl_fence_remove_release(&bo->fence, release->id);
94 qxl_bo_unref(&bo);
95 kfree(entry);
96 }
97 spin_lock(&qdev->release_idr_lock); 187 spin_lock(&qdev->release_idr_lock);
98 idr_remove(&qdev->release_idr, release->id); 188 idr_remove(&qdev->release_idr, release->id);
99 spin_unlock(&qdev->release_idr_lock); 189 spin_unlock(&qdev->release_idr_lock);
100 kfree(release); 190
191 if (release->base.ops) {
192 WARN_ON(list_empty(&release->bos));
193 qxl_release_free_list(release);
194
195 fence_signal(&release->base);
196 fence_put(&release->base);
197 } else {
198 qxl_release_free_list(release);
199 kfree(release);
200 }
101} 201}
102 202
103static int qxl_release_bo_alloc(struct qxl_device *qdev, 203static int qxl_release_bo_alloc(struct qxl_device *qdev,
@@ -126,6 +226,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
126 226
127 qxl_bo_ref(bo); 227 qxl_bo_ref(bo);
128 entry->tv.bo = &bo->tbo; 228 entry->tv.bo = &bo->tbo;
229 entry->tv.shared = false;
129 list_add_tail(&entry->tv.head, &release->bos); 230 list_add_tail(&entry->tv.head, &release->bos);
130 return 0; 231 return 0;
131} 232}
@@ -142,6 +243,10 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
142 return ret; 243 return ret;
143 } 244 }
144 245
246 ret = reservation_object_reserve_shared(bo->tbo.resv);
247 if (ret)
248 return ret;
249
145 /* allocate a surface for reserved + validated buffers */ 250 /* allocate a surface for reserved + validated buffers */
146 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); 251 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
147 if (ret) 252 if (ret)
@@ -159,7 +264,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
159 if (list_is_singular(&release->bos)) 264 if (list_is_singular(&release->bos))
160 return 0; 265 return 0;
161 266
162 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos); 267 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr);
163 if (ret) 268 if (ret)
164 return ret; 269 return ret;
165 270
@@ -199,6 +304,8 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
199 304
200 /* stash the release after the create command */ 305 /* stash the release after the create command */
201 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 306 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
307 if (idr_ret < 0)
308 return idr_ret;
202 bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); 309 bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
203 310
204 (*release)->release_offset = create_rel->release_offset + 64; 311 (*release)->release_offset = create_rel->release_offset + 64;
@@ -239,6 +346,11 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
239 } 346 }
240 347
241 idr_ret = qxl_release_alloc(qdev, type, release); 348 idr_ret = qxl_release_alloc(qdev, type, release);
349 if (idr_ret < 0) {
350 if (rbo)
351 *rbo = NULL;
352 return idr_ret;
353 }
242 354
243 mutex_lock(&qdev->release_mutex); 355 mutex_lock(&qdev->release_mutex);
244 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { 356 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
@@ -319,40 +431,44 @@ void qxl_release_unmap(struct qxl_device *qdev,
319 431
320void qxl_release_fence_buffer_objects(struct qxl_release *release) 432void qxl_release_fence_buffer_objects(struct qxl_release *release)
321{ 433{
322 struct ttm_validate_buffer *entry;
323 struct ttm_buffer_object *bo; 434 struct ttm_buffer_object *bo;
324 struct ttm_bo_global *glob; 435 struct ttm_bo_global *glob;
325 struct ttm_bo_device *bdev; 436 struct ttm_bo_device *bdev;
326 struct ttm_bo_driver *driver; 437 struct ttm_bo_driver *driver;
327 struct qxl_bo *qbo; 438 struct qxl_bo *qbo;
439 struct ttm_validate_buffer *entry;
440 struct qxl_device *qdev;
328 441
329 /* if only one object on the release its the release itself 442 /* if only one object on the release its the release itself
330 since these objects are pinned no need to reserve */ 443 since these objects are pinned no need to reserve */
331 if (list_is_singular(&release->bos)) 444 if (list_is_singular(&release->bos) || list_empty(&release->bos))
332 return; 445 return;
333 446
334 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; 447 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
335 bdev = bo->bdev; 448 bdev = bo->bdev;
449 qdev = container_of(bdev, struct qxl_device, mman.bdev);
450
451 /*
452 * Since we never really allocated a context and we don't want to conflict,
453 * set the highest bits. This will break if we really allow exporting of dma-bufs.
454 */
455 fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
456 release->id | 0xf0000000, release->base.seqno);
457 trace_fence_emit(&release->base);
458
336 driver = bdev->driver; 459 driver = bdev->driver;
337 glob = bo->glob; 460 glob = bo->glob;
338 461
339 spin_lock(&glob->lru_lock); 462 spin_lock(&glob->lru_lock);
340 spin_lock(&bdev->fence_lock);
341 463
342 list_for_each_entry(entry, &release->bos, head) { 464 list_for_each_entry(entry, &release->bos, head) {
343 bo = entry->bo; 465 bo = entry->bo;
344 qbo = to_qxl_bo(bo); 466 qbo = to_qxl_bo(bo);
345 467
346 if (!entry->bo->sync_obj) 468 reservation_object_add_shared_fence(bo->resv, &release->base);
347 entry->bo->sync_obj = &qbo->fence;
348
349 qxl_fence_add_release_locked(&qbo->fence, release->id);
350
351 ttm_bo_add_to_lru(bo); 469 ttm_bo_add_to_lru(bo);
352 __ttm_bo_unreserve(bo); 470 __ttm_bo_unreserve(bo);
353 entry->reserved = false;
354 } 471 }
355 spin_unlock(&bdev->fence_lock);
356 spin_unlock(&glob->lru_lock); 472 spin_unlock(&glob->lru_lock);
357 ww_acquire_fini(&release->ticket); 473 ww_acquire_fini(&release->ticket);
358} 474}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 71a1baeac14e..0cbc4c987164 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -127,7 +127,7 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
127 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { 127 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
128 pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n", 128 pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n",
129 __func__, vma->vm_pgoff); 129 __func__, vma->vm_pgoff);
130 return drm_mmap(filp, vma); 130 return -EINVAL;
131 } 131 }
132 132
133 file_priv = filp->private_data; 133 file_priv = filp->private_data;
@@ -188,11 +188,13 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
188 struct ttm_placement *placement) 188 struct ttm_placement *placement)
189{ 189{
190 struct qxl_bo *qbo; 190 struct qxl_bo *qbo;
191 static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 191 static struct ttm_place placements = {
192 .fpfn = 0,
193 .lpfn = 0,
194 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
195 };
192 196
193 if (!qxl_ttm_bo_is_qxl_bo(bo)) { 197 if (!qxl_ttm_bo_is_qxl_bo(bo)) {
194 placement->fpfn = 0;
195 placement->lpfn = 0;
196 placement->placement = &placements; 198 placement->placement = &placements;
197 placement->busy_placement = &placements; 199 placement->busy_placement = &placements;
198 placement->num_placement = 1; 200 placement->num_placement = 1;
@@ -355,92 +357,6 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
355 return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 357 return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
356} 358}
357 359
358
359static int qxl_sync_obj_wait(void *sync_obj,
360 bool lazy, bool interruptible)
361{
362 struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
363 int count = 0, sc = 0;
364 struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
365
366 if (qfence->num_active_releases == 0)
367 return 0;
368
369retry:
370 if (sc == 0) {
371 if (bo->type == QXL_GEM_DOMAIN_SURFACE)
372 qxl_update_surface(qfence->qdev, bo);
373 } else if (sc >= 1) {
374 qxl_io_notify_oom(qfence->qdev);
375 }
376
377 sc++;
378
379 for (count = 0; count < 10; count++) {
380 bool ret;
381 ret = qxl_queue_garbage_collect(qfence->qdev, true);
382 if (ret == false)
383 break;
384
385 if (qfence->num_active_releases == 0)
386 return 0;
387 }
388
389 if (qfence->num_active_releases) {
390 bool have_drawable_releases = false;
391 void **slot;
392 struct radix_tree_iter iter;
393 int release_id;
394
395 radix_tree_for_each_slot(slot, &qfence->tree, &iter, 0) {
396 struct qxl_release *release;
397
398 release_id = iter.index;
399 release = qxl_release_from_id_locked(qfence->qdev, release_id);
400 if (release == NULL)
401 continue;
402
403 if (release->type == QXL_RELEASE_DRAWABLE)
404 have_drawable_releases = true;
405 }
406
407 qxl_queue_garbage_collect(qfence->qdev, true);
408
409 if (have_drawable_releases || sc < 4) {
410 if (sc > 2)
411 /* back off */
412 usleep_range(500, 1000);
413 if (have_drawable_releases && sc > 300) {
414 WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, qfence->num_active_releases);
415 return -EBUSY;
416 }
417 goto retry;
418 }
419 }
420 return 0;
421}
422
423static int qxl_sync_obj_flush(void *sync_obj)
424{
425 return 0;
426}
427
428static void qxl_sync_obj_unref(void **sync_obj)
429{
430 *sync_obj = NULL;
431}
432
433static void *qxl_sync_obj_ref(void *sync_obj)
434{
435 return sync_obj;
436}
437
438static bool qxl_sync_obj_signaled(void *sync_obj)
439{
440 struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
441 return (qfence->num_active_releases == 0);
442}
443
444static void qxl_bo_move_notify(struct ttm_buffer_object *bo, 360static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
445 struct ttm_mem_reg *new_mem) 361 struct ttm_mem_reg *new_mem)
446{ 362{
@@ -467,16 +383,9 @@ static struct ttm_bo_driver qxl_bo_driver = {
467 .verify_access = &qxl_verify_access, 383 .verify_access = &qxl_verify_access,
468 .io_mem_reserve = &qxl_ttm_io_mem_reserve, 384 .io_mem_reserve = &qxl_ttm_io_mem_reserve,
469 .io_mem_free = &qxl_ttm_io_mem_free, 385 .io_mem_free = &qxl_ttm_io_mem_free,
470 .sync_obj_signaled = &qxl_sync_obj_signaled,
471 .sync_obj_wait = &qxl_sync_obj_wait,
472 .sync_obj_flush = &qxl_sync_obj_flush,
473 .sync_obj_unref = &qxl_sync_obj_unref,
474 .sync_obj_ref = &qxl_sync_obj_ref,
475 .move_notify = &qxl_bo_move_notify, 386 .move_notify = &qxl_bo_move_notify,
476}; 387};
477 388
478
479
480int qxl_ttm_init(struct qxl_device *qdev) 389int qxl_ttm_init(struct qxl_device *qdev)
481{ 390{
482 int r; 391 int r;
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index 59459fe4e8c5..2c45ac9c1dc3 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -452,7 +452,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
452 dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) | 452 dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
453 (dev_priv->span_offset >> 5)); 453 (dev_priv->span_offset >> 5));
454 454
455 dev_priv->sarea = drm_getsarea(dev); 455 dev_priv->sarea = drm_legacy_getsarea(dev);
456 if (!dev_priv->sarea) { 456 if (!dev_priv->sarea) {
457 DRM_ERROR("could not find sarea!\n"); 457 DRM_ERROR("could not find sarea!\n");
458 dev->dev_private = (void *)dev_priv; 458 dev->dev_private = (void *)dev_priv;
@@ -460,21 +460,21 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
460 return -EINVAL; 460 return -EINVAL;
461 } 461 }
462 462
463 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 463 dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset);
464 if (!dev_priv->mmio) { 464 if (!dev_priv->mmio) {
465 DRM_ERROR("could not find mmio region!\n"); 465 DRM_ERROR("could not find mmio region!\n");
466 dev->dev_private = (void *)dev_priv; 466 dev->dev_private = (void *)dev_priv;
467 r128_do_cleanup_cce(dev); 467 r128_do_cleanup_cce(dev);
468 return -EINVAL; 468 return -EINVAL;
469 } 469 }
470 dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset); 470 dev_priv->cce_ring = drm_legacy_findmap(dev, init->ring_offset);
471 if (!dev_priv->cce_ring) { 471 if (!dev_priv->cce_ring) {
472 DRM_ERROR("could not find cce ring region!\n"); 472 DRM_ERROR("could not find cce ring region!\n");
473 dev->dev_private = (void *)dev_priv; 473 dev->dev_private = (void *)dev_priv;
474 r128_do_cleanup_cce(dev); 474 r128_do_cleanup_cce(dev);
475 return -EINVAL; 475 return -EINVAL;
476 } 476 }
477 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 477 dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset);
478 if (!dev_priv->ring_rptr) { 478 if (!dev_priv->ring_rptr) {
479 DRM_ERROR("could not find ring read pointer!\n"); 479 DRM_ERROR("could not find ring read pointer!\n");
480 dev->dev_private = (void *)dev_priv; 480 dev->dev_private = (void *)dev_priv;
@@ -482,7 +482,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
482 return -EINVAL; 482 return -EINVAL;
483 } 483 }
484 dev->agp_buffer_token = init->buffers_offset; 484 dev->agp_buffer_token = init->buffers_offset;
485 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 485 dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset);
486 if (!dev->agp_buffer_map) { 486 if (!dev->agp_buffer_map) {
487 DRM_ERROR("could not find dma buffer region!\n"); 487 DRM_ERROR("could not find dma buffer region!\n");
488 dev->dev_private = (void *)dev_priv; 488 dev->dev_private = (void *)dev_priv;
@@ -492,7 +492,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
492 492
493 if (!dev_priv->is_pci) { 493 if (!dev_priv->is_pci) {
494 dev_priv->agp_textures = 494 dev_priv->agp_textures =
495 drm_core_findmap(dev, init->agp_textures_offset); 495 drm_legacy_findmap(dev, init->agp_textures_offset);
496 if (!dev_priv->agp_textures) { 496 if (!dev_priv->agp_textures) {
497 DRM_ERROR("could not find agp texture region!\n"); 497 DRM_ERROR("could not find agp texture region!\n");
498 dev->dev_private = (void *)dev_priv; 498 dev->dev_private = (void *)dev_priv;
@@ -507,9 +507,9 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
507 507
508#if __OS_HAS_AGP 508#if __OS_HAS_AGP
509 if (!dev_priv->is_pci) { 509 if (!dev_priv->is_pci) {
510 drm_core_ioremap_wc(dev_priv->cce_ring, dev); 510 drm_legacy_ioremap_wc(dev_priv->cce_ring, dev);
511 drm_core_ioremap_wc(dev_priv->ring_rptr, dev); 511 drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev);
512 drm_core_ioremap_wc(dev->agp_buffer_map, dev); 512 drm_legacy_ioremap_wc(dev->agp_buffer_map, dev);
513 if (!dev_priv->cce_ring->handle || 513 if (!dev_priv->cce_ring->handle ||
514 !dev_priv->ring_rptr->handle || 514 !dev_priv->ring_rptr->handle ||
515 !dev->agp_buffer_map->handle) { 515 !dev->agp_buffer_map->handle) {
@@ -603,11 +603,11 @@ int r128_do_cleanup_cce(struct drm_device *dev)
603#if __OS_HAS_AGP 603#if __OS_HAS_AGP
604 if (!dev_priv->is_pci) { 604 if (!dev_priv->is_pci) {
605 if (dev_priv->cce_ring != NULL) 605 if (dev_priv->cce_ring != NULL)
606 drm_core_ioremapfree(dev_priv->cce_ring, dev); 606 drm_legacy_ioremapfree(dev_priv->cce_ring, dev);
607 if (dev_priv->ring_rptr != NULL) 607 if (dev_priv->ring_rptr != NULL)
608 drm_core_ioremapfree(dev_priv->ring_rptr, dev); 608 drm_legacy_ioremapfree(dev_priv->ring_rptr, dev);
609 if (dev->agp_buffer_map != NULL) { 609 if (dev->agp_buffer_map != NULL) {
610 drm_core_ioremapfree(dev->agp_buffer_map, dev); 610 drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
611 dev->agp_buffer_map = NULL; 611 dev->agp_buffer_map = NULL;
612 } 612 }
613 } else 613 } else
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 5bd307cd8da1..c57b4de63caf 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -46,7 +46,7 @@ static const struct file_operations r128_driver_fops = {
46 .open = drm_open, 46 .open = drm_open,
47 .release = drm_release, 47 .release = drm_release,
48 .unlocked_ioctl = drm_ioctl, 48 .unlocked_ioctl = drm_ioctl,
49 .mmap = drm_mmap, 49 .mmap = drm_legacy_mmap,
50 .poll = drm_poll, 50 .poll = drm_poll,
51#ifdef CONFIG_COMPAT 51#ifdef CONFIG_COMPAT
52 .compat_ioctl = r128_compat_ioctl, 52 .compat_ioctl = r128_compat_ioctl,
@@ -62,6 +62,7 @@ static struct drm_driver driver = {
62 .load = r128_driver_load, 62 .load = r128_driver_load,
63 .preclose = r128_driver_preclose, 63 .preclose = r128_driver_preclose,
64 .lastclose = r128_driver_lastclose, 64 .lastclose = r128_driver_lastclose,
65 .set_busid = drm_pci_set_busid,
65 .get_vblank_counter = r128_get_vblank_counter, 66 .get_vblank_counter = r128_get_vblank_counter,
66 .enable_vblank = r128_enable_vblank, 67 .enable_vblank = r128_enable_vblank,
67 .disable_vblank = r128_disable_vblank, 68 .disable_vblank = r128_disable_vblank,
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 5bf3f5ff805d..723e5d6f10a4 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -35,6 +35,9 @@
35#ifndef __R128_DRV_H__ 35#ifndef __R128_DRV_H__
36#define __R128_DRV_H__ 36#define __R128_DRV_H__
37 37
38#include <drm/ati_pcigart.h>
39#include <drm/drm_legacy.h>
40
38/* General customization: 41/* General customization:
39 */ 42 */
40#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." 43#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index f77b7135ee4c..d01b87991422 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -60,7 +60,7 @@ radeon-y := radeon_drv.o
60 60
61# add UMS driver 61# add UMS driver
62radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \ 62radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
63 radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o 63 radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o drm_buffer.o
64 64
65# add KMS driver 65# add KMS driver
66radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ 66radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
@@ -72,7 +72,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
72 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 72 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
73 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 73 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
74 r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \ 74 r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
75 radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o dce3_1_afmt.o \ 75 radeon_pm.o atombios_dp.o r600_hdmi.o dce3_1_afmt.o \
76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o \ 76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ 77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ 78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
@@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ 80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ 81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ 82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
83 ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o 83 ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o
84 84
85# add async DMA block 85# add async DMA block
86radeon-y += \ 86radeon-y += \
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index ac14b67621d3..95d5d4ab3335 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -232,8 +232,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
232 232
233/***** general DP utility functions *****/ 233/***** general DP utility functions *****/
234 234
235#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 235#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
236#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5 236#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPH_LEVEL_3
237 237
238static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], 238static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
239 int lane_count, 239 int lane_count,
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index a7f2ddf09a9d..b8cd7975f797 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -291,29 +291,6 @@ static void radeon_atom_backlight_exit(struct radeon_encoder *encoder)
291bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, 291bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
292 struct drm_display_mode *mode); 292 struct drm_display_mode *mode);
293 293
294
295static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
296{
297 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
298 switch (radeon_encoder->encoder_id) {
299 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
300 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
301 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
302 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
303 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
304 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
305 case ENCODER_OBJECT_ID_INTERNAL_DDI:
306 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
307 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
308 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
309 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
310 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
311 return true;
312 default:
313 return false;
314 }
315}
316
317static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, 294static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
318 const struct drm_display_mode *mode, 295 const struct drm_display_mode *mode,
319 struct drm_display_mode *adjusted_mode) 296 struct drm_display_mode *adjusted_mode)
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index f81d7ca134db..300d971187c4 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -1170,23 +1170,6 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
1170 { 25000, 30000, RADEON_SCLK_UP } 1170 { 25000, 30000, RADEON_SCLK_UP }
1171}; 1171};
1172 1172
1173void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
1174 u32 *max_clock)
1175{
1176 u32 i, clock = 0;
1177
1178 if ((table == NULL) || (table->count == 0)) {
1179 *max_clock = clock;
1180 return;
1181 }
1182
1183 for (i = 0; i < table->count; i++) {
1184 if (clock < table->entries[i].clk)
1185 clock = table->entries[i].clk;
1186 }
1187 *max_clock = clock;
1188}
1189
1190void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 1173void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
1191 u32 clock, u16 max_voltage, u16 *voltage) 1174 u32 clock, u16 max_voltage, u16 *voltage)
1192{ 1175{
@@ -2099,7 +2082,6 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
2099 bool disable_mclk_switching; 2082 bool disable_mclk_switching;
2100 u32 mclk, sclk; 2083 u32 mclk, sclk;
2101 u16 vddc, vddci; 2084 u16 vddc, vddci;
2102 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2103 2085
2104 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2086 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
2105 btc_dpm_vblank_too_short(rdev)) 2087 btc_dpm_vblank_too_short(rdev))
@@ -2141,39 +2123,6 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
2141 ps->low.vddci = max_limits->vddci; 2123 ps->low.vddci = max_limits->vddci;
2142 } 2124 }
2143 2125
2144 /* limit clocks to max supported clocks based on voltage dependency tables */
2145 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2146 &max_sclk_vddc);
2147 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2148 &max_mclk_vddci);
2149 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2150 &max_mclk_vddc);
2151
2152 if (max_sclk_vddc) {
2153 if (ps->low.sclk > max_sclk_vddc)
2154 ps->low.sclk = max_sclk_vddc;
2155 if (ps->medium.sclk > max_sclk_vddc)
2156 ps->medium.sclk = max_sclk_vddc;
2157 if (ps->high.sclk > max_sclk_vddc)
2158 ps->high.sclk = max_sclk_vddc;
2159 }
2160 if (max_mclk_vddci) {
2161 if (ps->low.mclk > max_mclk_vddci)
2162 ps->low.mclk = max_mclk_vddci;
2163 if (ps->medium.mclk > max_mclk_vddci)
2164 ps->medium.mclk = max_mclk_vddci;
2165 if (ps->high.mclk > max_mclk_vddci)
2166 ps->high.mclk = max_mclk_vddci;
2167 }
2168 if (max_mclk_vddc) {
2169 if (ps->low.mclk > max_mclk_vddc)
2170 ps->low.mclk = max_mclk_vddc;
2171 if (ps->medium.mclk > max_mclk_vddc)
2172 ps->medium.mclk = max_mclk_vddc;
2173 if (ps->high.mclk > max_mclk_vddc)
2174 ps->high.mclk = max_mclk_vddc;
2175 }
2176
2177 /* XXX validate the min clocks required for display */ 2126 /* XXX validate the min clocks required for display */
2178 2127
2179 if (disable_mclk_switching) { 2128 if (disable_mclk_switching) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h
index 3b6f12b7760b..1a15e0e41950 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.h
+++ b/drivers/gpu/drm/radeon/btc_dpm.h
@@ -46,8 +46,6 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev,
46 struct rv7xx_pl *pl); 46 struct rv7xx_pl *pl);
47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
48 u32 clock, u16 max_voltage, u16 *voltage); 48 u32 clock, u16 max_voltage, u16 *voltage);
49void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
50 u32 *max_clock);
51void btc_apply_voltage_delta_rules(struct radeon_device *rdev, 49void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
52 u16 max_vddc, u16 max_vddci, 50 u16 max_vddc, u16 max_vddci,
53 u16 *vddc, u16 *vddci); 51 u16 *vddc, u16 *vddci);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index d416bb2ff48d..f5c8c0445a94 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -162,8 +162,6 @@ static const struct ci_pt_config_reg didt_config_ci[] =
162}; 162};
163 163
164extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); 164extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
165extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
166 u32 *max_clock);
167extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, 165extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
168 u32 arb_freq_src, u32 arb_freq_dest); 166 u32 arb_freq_src, u32 arb_freq_dest);
169extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); 167extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
@@ -748,7 +746,6 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
748 struct radeon_clock_and_voltage_limits *max_limits; 746 struct radeon_clock_and_voltage_limits *max_limits;
749 bool disable_mclk_switching; 747 bool disable_mclk_switching;
750 u32 sclk, mclk; 748 u32 sclk, mclk;
751 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
752 int i; 749 int i;
753 750
754 if (rps->vce_active) { 751 if (rps->vce_active) {
@@ -784,29 +781,6 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
784 } 781 }
785 } 782 }
786 783
787 /* limit clocks to max supported clocks based on voltage dependency tables */
788 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
789 &max_sclk_vddc);
790 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
791 &max_mclk_vddci);
792 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
793 &max_mclk_vddc);
794
795 for (i = 0; i < ps->performance_level_count; i++) {
796 if (max_sclk_vddc) {
797 if (ps->performance_levels[i].sclk > max_sclk_vddc)
798 ps->performance_levels[i].sclk = max_sclk_vddc;
799 }
800 if (max_mclk_vddci) {
801 if (ps->performance_levels[i].mclk > max_mclk_vddci)
802 ps->performance_levels[i].mclk = max_mclk_vddci;
803 }
804 if (max_mclk_vddc) {
805 if (ps->performance_levels[i].mclk > max_mclk_vddc)
806 ps->performance_levels[i].mclk = max_mclk_vddc;
807 }
808 }
809
810 /* XXX validate the min clocks required for display */ 784 /* XXX validate the min clocks required for display */
811 785
812 if (disable_mclk_switching) { 786 if (disable_mclk_switching) {
@@ -5293,9 +5267,13 @@ int ci_dpm_init(struct radeon_device *rdev)
5293void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 5267void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5294 struct seq_file *m) 5268 struct seq_file *m)
5295{ 5269{
5270 struct ci_power_info *pi = ci_get_pi(rdev);
5271 struct radeon_ps *rps = &pi->current_rps;
5296 u32 sclk = ci_get_average_sclk_freq(rdev); 5272 u32 sclk = ci_get_average_sclk_freq(rdev);
5297 u32 mclk = ci_get_average_mclk_freq(rdev); 5273 u32 mclk = ci_get_average_mclk_freq(rdev);
5298 5274
5275 seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
5276 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
5299 seq_printf(m, "power level avg sclk: %u mclk: %u\n", 5277 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
5300 sclk, mclk); 5278 sclk, mclk);
5301} 5279}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 3d546c606b43..377afa504d2b 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3959,18 +3959,19 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
3959 * @src_offset: src GPU address 3959 * @src_offset: src GPU address
3960 * @dst_offset: dst GPU address 3960 * @dst_offset: dst GPU address
3961 * @num_gpu_pages: number of GPU pages to xfer 3961 * @num_gpu_pages: number of GPU pages to xfer
3962 * @fence: radeon fence object 3962 * @resv: reservation object to sync to
3963 * 3963 *
3964 * Copy GPU paging using the CP DMA engine (CIK+). 3964 * Copy GPU paging using the CP DMA engine (CIK+).
3965 * Used by the radeon ttm implementation to move pages if 3965 * Used by the radeon ttm implementation to move pages if
3966 * registered as the asic copy callback. 3966 * registered as the asic copy callback.
3967 */ 3967 */
3968int cik_copy_cpdma(struct radeon_device *rdev, 3968struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
3969 uint64_t src_offset, uint64_t dst_offset, 3969 uint64_t src_offset, uint64_t dst_offset,
3970 unsigned num_gpu_pages, 3970 unsigned num_gpu_pages,
3971 struct radeon_fence **fence) 3971 struct reservation_object *resv)
3972{ 3972{
3973 struct radeon_semaphore *sem = NULL; 3973 struct radeon_semaphore *sem = NULL;
3974 struct radeon_fence *fence;
3974 int ring_index = rdev->asic->copy.blit_ring_index; 3975 int ring_index = rdev->asic->copy.blit_ring_index;
3975 struct radeon_ring *ring = &rdev->ring[ring_index]; 3976 struct radeon_ring *ring = &rdev->ring[ring_index];
3976 u32 size_in_bytes, cur_size_in_bytes, control; 3977 u32 size_in_bytes, cur_size_in_bytes, control;
@@ -3980,7 +3981,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
3980 r = radeon_semaphore_create(rdev, &sem); 3981 r = radeon_semaphore_create(rdev, &sem);
3981 if (r) { 3982 if (r) {
3982 DRM_ERROR("radeon: moving bo (%d).\n", r); 3983 DRM_ERROR("radeon: moving bo (%d).\n", r);
3983 return r; 3984 return ERR_PTR(r);
3984 } 3985 }
3985 3986
3986 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 3987 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
@@ -3989,10 +3990,10 @@ int cik_copy_cpdma(struct radeon_device *rdev,
3989 if (r) { 3990 if (r) {
3990 DRM_ERROR("radeon: moving bo (%d).\n", r); 3991 DRM_ERROR("radeon: moving bo (%d).\n", r);
3991 radeon_semaphore_free(rdev, &sem, NULL); 3992 radeon_semaphore_free(rdev, &sem, NULL);
3992 return r; 3993 return ERR_PTR(r);
3993 } 3994 }
3994 3995
3995 radeon_semaphore_sync_to(sem, *fence); 3996 radeon_semaphore_sync_resv(rdev, sem, resv, false);
3996 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 3997 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
3997 3998
3998 for (i = 0; i < num_loops; i++) { 3999 for (i = 0; i < num_loops; i++) {
@@ -4014,17 +4015,17 @@ int cik_copy_cpdma(struct radeon_device *rdev,
4014 dst_offset += cur_size_in_bytes; 4015 dst_offset += cur_size_in_bytes;
4015 } 4016 }
4016 4017
4017 r = radeon_fence_emit(rdev, fence, ring->idx); 4018 r = radeon_fence_emit(rdev, &fence, ring->idx);
4018 if (r) { 4019 if (r) {
4019 radeon_ring_unlock_undo(rdev, ring); 4020 radeon_ring_unlock_undo(rdev, ring);
4020 radeon_semaphore_free(rdev, &sem, NULL); 4021 radeon_semaphore_free(rdev, &sem, NULL);
4021 return r; 4022 return ERR_PTR(r);
4022 } 4023 }
4023 4024
4024 radeon_ring_unlock_commit(rdev, ring, false); 4025 radeon_ring_unlock_commit(rdev, ring, false);
4025 radeon_semaphore_free(rdev, &sem, *fence); 4026 radeon_semaphore_free(rdev, &sem, fence);
4026 4027
4027 return r; 4028 return fence;
4028} 4029}
4029 4030
4030/* 4031/*
@@ -4234,7 +4235,7 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
4234 WREG32(CP_PFP_UCODE_ADDR, 0); 4235 WREG32(CP_PFP_UCODE_ADDR, 0);
4235 for (i = 0; i < fw_size; i++) 4236 for (i = 0; i < fw_size; i++)
4236 WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); 4237 WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
4237 WREG32(CP_PFP_UCODE_ADDR, 0); 4238 WREG32(CP_PFP_UCODE_ADDR, le32_to_cpu(pfp_hdr->header.ucode_version));
4238 4239
4239 /* CE */ 4240 /* CE */
4240 fw_data = (const __le32 *) 4241 fw_data = (const __le32 *)
@@ -4243,7 +4244,7 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
4243 WREG32(CP_CE_UCODE_ADDR, 0); 4244 WREG32(CP_CE_UCODE_ADDR, 0);
4244 for (i = 0; i < fw_size; i++) 4245 for (i = 0; i < fw_size; i++)
4245 WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); 4246 WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
4246 WREG32(CP_CE_UCODE_ADDR, 0); 4247 WREG32(CP_CE_UCODE_ADDR, le32_to_cpu(ce_hdr->header.ucode_version));
4247 4248
4248 /* ME */ 4249 /* ME */
4249 fw_data = (const __be32 *) 4250 fw_data = (const __be32 *)
@@ -4252,7 +4253,8 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
4252 WREG32(CP_ME_RAM_WADDR, 0); 4253 WREG32(CP_ME_RAM_WADDR, 0);
4253 for (i = 0; i < fw_size; i++) 4254 for (i = 0; i < fw_size; i++)
4254 WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++)); 4255 WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
4255 WREG32(CP_ME_RAM_WADDR, 0); 4256 WREG32(CP_ME_RAM_WADDR, le32_to_cpu(me_hdr->header.ucode_version));
4257 WREG32(CP_ME_RAM_RADDR, le32_to_cpu(me_hdr->header.ucode_version));
4256 } else { 4258 } else {
4257 const __be32 *fw_data; 4259 const __be32 *fw_data;
4258 4260
@@ -4278,10 +4280,6 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
4278 WREG32(CP_ME_RAM_WADDR, 0); 4280 WREG32(CP_ME_RAM_WADDR, 0);
4279 } 4281 }
4280 4282
4281 WREG32(CP_PFP_UCODE_ADDR, 0);
4282 WREG32(CP_CE_UCODE_ADDR, 0);
4283 WREG32(CP_ME_RAM_WADDR, 0);
4284 WREG32(CP_ME_RAM_RADDR, 0);
4285 return 0; 4283 return 0;
4286} 4284}
4287 4285
@@ -4563,7 +4561,7 @@ static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
4563 WREG32(CP_MEC_ME1_UCODE_ADDR, 0); 4561 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
4564 for (i = 0; i < fw_size; i++) 4562 for (i = 0; i < fw_size; i++)
4565 WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++)); 4563 WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
4566 WREG32(CP_MEC_ME1_UCODE_ADDR, 0); 4564 WREG32(CP_MEC_ME1_UCODE_ADDR, le32_to_cpu(mec_hdr->header.ucode_version));
4567 4565
4568 /* MEC2 */ 4566 /* MEC2 */
4569 if (rdev->family == CHIP_KAVERI) { 4567 if (rdev->family == CHIP_KAVERI) {
@@ -4577,7 +4575,7 @@ static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
4577 WREG32(CP_MEC_ME2_UCODE_ADDR, 0); 4575 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
4578 for (i = 0; i < fw_size; i++) 4576 for (i = 0; i < fw_size; i++)
4579 WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++)); 4577 WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
4580 WREG32(CP_MEC_ME2_UCODE_ADDR, 0); 4578 WREG32(CP_MEC_ME2_UCODE_ADDR, le32_to_cpu(mec2_hdr->header.ucode_version));
4581 } 4579 }
4582 } else { 4580 } else {
4583 const __be32 *fw_data; 4581 const __be32 *fw_data;
@@ -4689,7 +4687,7 @@ static int cik_mec_init(struct radeon_device *rdev)
4689 r = radeon_bo_create(rdev, 4687 r = radeon_bo_create(rdev,
4690 rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2, 4688 rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
4691 PAGE_SIZE, true, 4689 PAGE_SIZE, true,
4692 RADEON_GEM_DOMAIN_GTT, 0, NULL, 4690 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
4693 &rdev->mec.hpd_eop_obj); 4691 &rdev->mec.hpd_eop_obj);
4694 if (r) { 4692 if (r) {
4695 dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r); 4693 dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -4860,7 +4858,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
4860 sizeof(struct bonaire_mqd), 4858 sizeof(struct bonaire_mqd),
4861 PAGE_SIZE, true, 4859 PAGE_SIZE, true,
4862 RADEON_GEM_DOMAIN_GTT, 0, NULL, 4860 RADEON_GEM_DOMAIN_GTT, 0, NULL,
4863 &rdev->ring[idx].mqd_obj); 4861 NULL, &rdev->ring[idx].mqd_obj);
4864 if (r) { 4862 if (r) {
4865 dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r); 4863 dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
4866 return r; 4864 return r;
@@ -6226,7 +6224,7 @@ static int cik_rlc_resume(struct radeon_device *rdev)
6226 WREG32(RLC_GPM_UCODE_ADDR, 0); 6224 WREG32(RLC_GPM_UCODE_ADDR, 0);
6227 for (i = 0; i < size; i++) 6225 for (i = 0; i < size; i++)
6228 WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); 6226 WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
6229 WREG32(RLC_GPM_UCODE_ADDR, 0); 6227 WREG32(RLC_GPM_UCODE_ADDR, le32_to_cpu(hdr->header.ucode_version));
6230 } else { 6228 } else {
6231 const __be32 *fw_data; 6229 const __be32 *fw_data;
6232 6230
@@ -8255,8 +8253,10 @@ restart_ih:
8255 } 8253 }
8256 if (queue_hotplug) 8254 if (queue_hotplug)
8257 schedule_work(&rdev->hotplug_work); 8255 schedule_work(&rdev->hotplug_work);
8258 if (queue_reset) 8256 if (queue_reset) {
8259 schedule_work(&rdev->reset_work); 8257 rdev->needs_reset = true;
8258 wake_up_all(&rdev->fence_queue);
8259 }
8260 if (queue_thermal) 8260 if (queue_thermal)
8261 schedule_work(&rdev->pm.dpm.thermal.work); 8261 schedule_work(&rdev->pm.dpm.thermal.work);
8262 rdev->ih.rptr = rptr; 8262 rdev->ih.rptr = rptr;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index c4ffa54b1e3d..c77dad1a4576 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -530,18 +530,19 @@ void cik_sdma_fini(struct radeon_device *rdev)
530 * @src_offset: src GPU address 530 * @src_offset: src GPU address
531 * @dst_offset: dst GPU address 531 * @dst_offset: dst GPU address
532 * @num_gpu_pages: number of GPU pages to xfer 532 * @num_gpu_pages: number of GPU pages to xfer
533 * @fence: radeon fence object 533 * @resv: reservation object to sync to
534 * 534 *
535 * Copy GPU paging using the DMA engine (CIK). 535 * Copy GPU paging using the DMA engine (CIK).
536 * Used by the radeon ttm implementation to move pages if 536 * Used by the radeon ttm implementation to move pages if
537 * registered as the asic copy callback. 537 * registered as the asic copy callback.
538 */ 538 */
539int cik_copy_dma(struct radeon_device *rdev, 539struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
540 uint64_t src_offset, uint64_t dst_offset, 540 uint64_t src_offset, uint64_t dst_offset,
541 unsigned num_gpu_pages, 541 unsigned num_gpu_pages,
542 struct radeon_fence **fence) 542 struct reservation_object *resv)
543{ 543{
544 struct radeon_semaphore *sem = NULL; 544 struct radeon_semaphore *sem = NULL;
545 struct radeon_fence *fence;
545 int ring_index = rdev->asic->copy.dma_ring_index; 546 int ring_index = rdev->asic->copy.dma_ring_index;
546 struct radeon_ring *ring = &rdev->ring[ring_index]; 547 struct radeon_ring *ring = &rdev->ring[ring_index];
547 u32 size_in_bytes, cur_size_in_bytes; 548 u32 size_in_bytes, cur_size_in_bytes;
@@ -551,7 +552,7 @@ int cik_copy_dma(struct radeon_device *rdev,
551 r = radeon_semaphore_create(rdev, &sem); 552 r = radeon_semaphore_create(rdev, &sem);
552 if (r) { 553 if (r) {
553 DRM_ERROR("radeon: moving bo (%d).\n", r); 554 DRM_ERROR("radeon: moving bo (%d).\n", r);
554 return r; 555 return ERR_PTR(r);
555 } 556 }
556 557
557 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 558 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
@@ -560,10 +561,10 @@ int cik_copy_dma(struct radeon_device *rdev,
560 if (r) { 561 if (r) {
561 DRM_ERROR("radeon: moving bo (%d).\n", r); 562 DRM_ERROR("radeon: moving bo (%d).\n", r);
562 radeon_semaphore_free(rdev, &sem, NULL); 563 radeon_semaphore_free(rdev, &sem, NULL);
563 return r; 564 return ERR_PTR(r);
564 } 565 }
565 566
566 radeon_semaphore_sync_to(sem, *fence); 567 radeon_semaphore_sync_resv(rdev, sem, resv, false);
567 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 568 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
568 569
569 for (i = 0; i < num_loops; i++) { 570 for (i = 0; i < num_loops; i++) {
@@ -582,17 +583,17 @@ int cik_copy_dma(struct radeon_device *rdev,
582 dst_offset += cur_size_in_bytes; 583 dst_offset += cur_size_in_bytes;
583 } 584 }
584 585
585 r = radeon_fence_emit(rdev, fence, ring->idx); 586 r = radeon_fence_emit(rdev, &fence, ring->idx);
586 if (r) { 587 if (r) {
587 radeon_ring_unlock_undo(rdev, ring); 588 radeon_ring_unlock_undo(rdev, ring);
588 radeon_semaphore_free(rdev, &sem, NULL); 589 radeon_semaphore_free(rdev, &sem, NULL);
589 return r; 590 return ERR_PTR(r);
590 } 591 }
591 592
592 radeon_ring_unlock_commit(rdev, ring, false); 593 radeon_ring_unlock_commit(rdev, ring, false);
593 radeon_semaphore_free(rdev, &sem, *fence); 594 radeon_semaphore_free(rdev, &sem, fence);
594 595
595 return r; 596 return fence;
596} 597}
597 598
598/** 599/**
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index 51800e340a57..950af153f30e 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -165,7 +165,7 @@ void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *m
165 165
166 /* disable audio prior to setting up hw */ 166 /* disable audio prior to setting up hw */
167 dig->afmt->pin = r600_audio_get_pin(rdev); 167 dig->afmt->pin = r600_audio_get_pin(rdev);
168 r600_audio_enable(rdev, dig->afmt->pin, false); 168 r600_audio_enable(rdev, dig->afmt->pin, 0);
169 169
170 r600_audio_set_dto(encoder, mode->clock); 170 r600_audio_set_dto(encoder, mode->clock);
171 171
@@ -240,5 +240,5 @@ void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *m
240 r600_hdmi_audio_workaround(encoder); 240 r600_hdmi_audio_workaround(encoder);
241 241
242 /* enable audio after to setting up hw */ 242 /* enable audio after to setting up hw */
243 r600_audio_enable(rdev, dig->afmt->pin, true); 243 r600_audio_enable(rdev, dig->afmt->pin, 0xf);
244} 244}
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index ab29f953a767..c0bbf68dbc27 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -284,13 +284,13 @@ static int dce6_audio_chipset_supported(struct radeon_device *rdev)
284 284
285void dce6_audio_enable(struct radeon_device *rdev, 285void dce6_audio_enable(struct radeon_device *rdev,
286 struct r600_audio_pin *pin, 286 struct r600_audio_pin *pin,
287 bool enable) 287 u8 enable_mask)
288{ 288{
289 if (!pin) 289 if (!pin)
290 return; 290 return;
291 291
292 WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL, 292 WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
293 enable ? AUDIO_ENABLED : 0); 293 enable_mask ? AUDIO_ENABLED : 0);
294} 294}
295 295
296static const u32 pin_offsets[7] = 296static const u32 pin_offsets[7] =
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/radeon/drm_buffer.c
index 86a4a4a60afc..f4e0f3a3d7b1 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/radeon/drm_buffer.c
@@ -33,7 +33,7 @@
33 */ 33 */
34 34
35#include <linux/export.h> 35#include <linux/export.h>
36#include <drm/drm_buffer.h> 36#include "drm_buffer.h"
37 37
38/** 38/**
39 * Allocate the drm buffer object. 39 * Allocate the drm buffer object.
@@ -86,7 +86,6 @@ error_out:
86 kfree(*buf); 86 kfree(*buf);
87 return -ENOMEM; 87 return -ENOMEM;
88} 88}
89EXPORT_SYMBOL(drm_buffer_alloc);
90 89
91/** 90/**
92 * Copy the user data to the begin of the buffer and reset the processing 91 * Copy the user data to the begin of the buffer and reset the processing
@@ -123,7 +122,6 @@ int drm_buffer_copy_from_user(struct drm_buffer *buf,
123 buf->iterator = 0; 122 buf->iterator = 0;
124 return 0; 123 return 0;
125} 124}
126EXPORT_SYMBOL(drm_buffer_copy_from_user);
127 125
128/** 126/**
129 * Free the drm buffer object 127 * Free the drm buffer object
@@ -141,7 +139,6 @@ void drm_buffer_free(struct drm_buffer *buf)
141 kfree(buf); 139 kfree(buf);
142 } 140 }
143} 141}
144EXPORT_SYMBOL(drm_buffer_free);
145 142
146/** 143/**
147 * Read an object from buffer that may be split to multiple parts. If object 144 * Read an object from buffer that may be split to multiple parts. If object
@@ -178,4 +175,3 @@ void *drm_buffer_read_object(struct drm_buffer *buf,
178 drm_buffer_advance(buf, objsize); 175 drm_buffer_advance(buf, objsize);
179 return obj; 176 return obj;
180} 177}
181EXPORT_SYMBOL(drm_buffer_read_object);
diff --git a/include/drm/drm_buffer.h b/drivers/gpu/drm/radeon/drm_buffer.h
index c80d3a340b94..c80d3a340b94 100644
--- a/include/drm/drm_buffer.h
+++ b/drivers/gpu/drm/radeon/drm_buffer.h
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e50807c29f69..a31f1ca40c6a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -22,7 +22,6 @@
22 * Authors: Alex Deucher 22 * Authors: Alex Deucher
23 */ 23 */
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <drm/drmP.h> 26#include <drm/drmP.h>
28#include "radeon.h" 27#include "radeon.h"
@@ -4023,7 +4022,7 @@ int sumo_rlc_init(struct radeon_device *rdev)
4023 if (rdev->rlc.save_restore_obj == NULL) { 4022 if (rdev->rlc.save_restore_obj == NULL) {
4024 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, 4023 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
4025 RADEON_GEM_DOMAIN_VRAM, 0, NULL, 4024 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
4026 &rdev->rlc.save_restore_obj); 4025 NULL, &rdev->rlc.save_restore_obj);
4027 if (r) { 4026 if (r) {
4028 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); 4027 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
4029 return r; 4028 return r;
@@ -4102,7 +4101,7 @@ int sumo_rlc_init(struct radeon_device *rdev)
4102 if (rdev->rlc.clear_state_obj == NULL) { 4101 if (rdev->rlc.clear_state_obj == NULL) {
4103 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, 4102 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
4104 RADEON_GEM_DOMAIN_VRAM, 0, NULL, 4103 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
4105 &rdev->rlc.clear_state_obj); 4104 NULL, &rdev->rlc.clear_state_obj);
4106 if (r) { 4105 if (r) {
4107 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); 4106 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
4108 sumo_rlc_fini(rdev); 4107 sumo_rlc_fini(rdev);
@@ -4179,7 +4178,7 @@ int sumo_rlc_init(struct radeon_device *rdev)
4179 r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, 4178 r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
4180 PAGE_SIZE, true, 4179 PAGE_SIZE, true,
4181 RADEON_GEM_DOMAIN_VRAM, 0, NULL, 4180 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
4182 &rdev->rlc.cp_table_obj); 4181 NULL, &rdev->rlc.cp_table_obj);
4183 if (r) { 4182 if (r) {
4184 dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r); 4183 dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
4185 sumo_rlc_fini(rdev); 4184 sumo_rlc_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index afaba388c36d..66bcfadeedd1 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -104,12 +104,14 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
104 * Used by the radeon ttm implementation to move pages if 104 * Used by the radeon ttm implementation to move pages if
105 * registered as the asic copy callback. 105 * registered as the asic copy callback.
106 */ 106 */
107int evergreen_copy_dma(struct radeon_device *rdev, 107struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
108 uint64_t src_offset, uint64_t dst_offset, 108 uint64_t src_offset,
109 unsigned num_gpu_pages, 109 uint64_t dst_offset,
110 struct radeon_fence **fence) 110 unsigned num_gpu_pages,
111 struct reservation_object *resv)
111{ 112{
112 struct radeon_semaphore *sem = NULL; 113 struct radeon_semaphore *sem = NULL;
114 struct radeon_fence *fence;
113 int ring_index = rdev->asic->copy.dma_ring_index; 115 int ring_index = rdev->asic->copy.dma_ring_index;
114 struct radeon_ring *ring = &rdev->ring[ring_index]; 116 struct radeon_ring *ring = &rdev->ring[ring_index];
115 u32 size_in_dw, cur_size_in_dw; 117 u32 size_in_dw, cur_size_in_dw;
@@ -119,7 +121,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
119 r = radeon_semaphore_create(rdev, &sem); 121 r = radeon_semaphore_create(rdev, &sem);
120 if (r) { 122 if (r) {
121 DRM_ERROR("radeon: moving bo (%d).\n", r); 123 DRM_ERROR("radeon: moving bo (%d).\n", r);
122 return r; 124 return ERR_PTR(r);
123 } 125 }
124 126
125 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 127 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
@@ -128,10 +130,10 @@ int evergreen_copy_dma(struct radeon_device *rdev,
128 if (r) { 130 if (r) {
129 DRM_ERROR("radeon: moving bo (%d).\n", r); 131 DRM_ERROR("radeon: moving bo (%d).\n", r);
130 radeon_semaphore_free(rdev, &sem, NULL); 132 radeon_semaphore_free(rdev, &sem, NULL);
131 return r; 133 return ERR_PTR(r);
132 } 134 }
133 135
134 radeon_semaphore_sync_to(sem, *fence); 136 radeon_semaphore_sync_resv(rdev, sem, resv, false);
135 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 137 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
136 138
137 for (i = 0; i < num_loops; i++) { 139 for (i = 0; i < num_loops; i++) {
@@ -148,17 +150,17 @@ int evergreen_copy_dma(struct radeon_device *rdev,
148 dst_offset += cur_size_in_dw * 4; 150 dst_offset += cur_size_in_dw * 4;
149 } 151 }
150 152
151 r = radeon_fence_emit(rdev, fence, ring->idx); 153 r = radeon_fence_emit(rdev, &fence, ring->idx);
152 if (r) { 154 if (r) {
153 radeon_ring_unlock_undo(rdev, ring); 155 radeon_ring_unlock_undo(rdev, ring);
154 radeon_semaphore_free(rdev, &sem, NULL); 156 radeon_semaphore_free(rdev, &sem, NULL);
155 return r; 157 return ERR_PTR(r);
156 } 158 }
157 159
158 radeon_ring_unlock_commit(rdev, ring, false); 160 radeon_ring_unlock_commit(rdev, ring, false);
159 radeon_semaphore_free(rdev, &sem, *fence); 161 radeon_semaphore_free(rdev, &sem, fence);
160 162
161 return r; 163 return fence;
162} 164}
163 165
164/** 166/**
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 278c7a139d74..2514d659b1ba 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -38,6 +38,37 @@ extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
38extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, 38extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
39 struct drm_display_mode *mode); 39 struct drm_display_mode *mode);
40 40
41/* enable the audio stream */
42static void dce4_audio_enable(struct radeon_device *rdev,
43 struct r600_audio_pin *pin,
44 u8 enable_mask)
45{
46 u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL);
47
48 if (!pin)
49 return;
50
51 if (enable_mask) {
52 tmp |= AUDIO_ENABLED;
53 if (enable_mask & 1)
54 tmp |= PIN0_AUDIO_ENABLED;
55 if (enable_mask & 2)
56 tmp |= PIN1_AUDIO_ENABLED;
57 if (enable_mask & 4)
58 tmp |= PIN2_AUDIO_ENABLED;
59 if (enable_mask & 8)
60 tmp |= PIN3_AUDIO_ENABLED;
61 } else {
62 tmp &= ~(AUDIO_ENABLED |
63 PIN0_AUDIO_ENABLED |
64 PIN1_AUDIO_ENABLED |
65 PIN2_AUDIO_ENABLED |
66 PIN3_AUDIO_ENABLED);
67 }
68
69 WREG32(AZ_HOT_PLUG_CONTROL, tmp);
70}
71
41/* 72/*
42 * update the N and CTS parameters for a given pixel clock rate 73 * update the N and CTS parameters for a given pixel clock rate
43 */ 74 */
@@ -318,10 +349,10 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
318 /* disable audio prior to setting up hw */ 349 /* disable audio prior to setting up hw */
319 if (ASIC_IS_DCE6(rdev)) { 350 if (ASIC_IS_DCE6(rdev)) {
320 dig->afmt->pin = dce6_audio_get_pin(rdev); 351 dig->afmt->pin = dce6_audio_get_pin(rdev);
321 dce6_audio_enable(rdev, dig->afmt->pin, false); 352 dce6_audio_enable(rdev, dig->afmt->pin, 0);
322 } else { 353 } else {
323 dig->afmt->pin = r600_audio_get_pin(rdev); 354 dig->afmt->pin = r600_audio_get_pin(rdev);
324 r600_audio_enable(rdev, dig->afmt->pin, false); 355 dce4_audio_enable(rdev, dig->afmt->pin, 0);
325 } 356 }
326 357
327 evergreen_audio_set_dto(encoder, mode->clock); 358 evergreen_audio_set_dto(encoder, mode->clock);
@@ -463,13 +494,15 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
463 494
464 /* enable audio after to setting up hw */ 495 /* enable audio after to setting up hw */
465 if (ASIC_IS_DCE6(rdev)) 496 if (ASIC_IS_DCE6(rdev))
466 dce6_audio_enable(rdev, dig->afmt->pin, true); 497 dce6_audio_enable(rdev, dig->afmt->pin, 1);
467 else 498 else
468 r600_audio_enable(rdev, dig->afmt->pin, true); 499 dce4_audio_enable(rdev, dig->afmt->pin, 0xf);
469} 500}
470 501
471void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) 502void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
472{ 503{
504 struct drm_device *dev = encoder->dev;
505 struct radeon_device *rdev = dev->dev_private;
473 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 506 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
474 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 507 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
475 508
@@ -482,6 +515,14 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
482 if (!enable && !dig->afmt->enabled) 515 if (!enable && !dig->afmt->enabled)
483 return; 516 return;
484 517
518 if (!enable && dig->afmt->pin) {
519 if (ASIC_IS_DCE6(rdev))
520 dce6_audio_enable(rdev, dig->afmt->pin, 0);
521 else
522 dce4_audio_enable(rdev, dig->afmt->pin, 0);
523 dig->afmt->pin = NULL;
524 }
525
485 dig->afmt->enabled = enable; 526 dig->afmt->enabled = enable;
486 527
487 DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n", 528 DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 67cb472d188c..1dd976f447fa 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2787,6 +2787,8 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
2787 tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2787 tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
2788 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT; 2788 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
2789 vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp); 2789 vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
2790 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
2791 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
2790 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2792 seq_printf(m, "power level %d sclk: %u vddc: %u\n",
2791 current_index, sclk, vddc); 2793 current_index, sclk, vddc);
2792 } 2794 }
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 01fc4888e6fe..715b181c6243 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -789,7 +789,6 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
789 bool disable_mclk_switching; 789 bool disable_mclk_switching;
790 u32 mclk; 790 u32 mclk;
791 u16 vddci; 791 u16 vddci;
792 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
793 int i; 792 int i;
794 793
795 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 794 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -816,29 +815,6 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
816 } 815 }
817 } 816 }
818 817
819 /* limit clocks to max supported clocks based on voltage dependency tables */
820 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
821 &max_sclk_vddc);
822 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
823 &max_mclk_vddci);
824 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
825 &max_mclk_vddc);
826
827 for (i = 0; i < ps->performance_level_count; i++) {
828 if (max_sclk_vddc) {
829 if (ps->performance_levels[i].sclk > max_sclk_vddc)
830 ps->performance_levels[i].sclk = max_sclk_vddc;
831 }
832 if (max_mclk_vddci) {
833 if (ps->performance_levels[i].mclk > max_mclk_vddci)
834 ps->performance_levels[i].mclk = max_mclk_vddci;
835 }
836 if (max_mclk_vddc) {
837 if (ps->performance_levels[i].mclk > max_mclk_vddc)
838 ps->performance_levels[i].mclk = max_mclk_vddc;
839 }
840 }
841
842 /* XXX validate the min clocks required for display */ 818 /* XXX validate the min clocks required for display */
843 819
844 /* adjust low state */ 820 /* adjust low state */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index b0098e792e62..10f8be0ee173 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -869,13 +869,14 @@ bool r100_semaphore_ring_emit(struct radeon_device *rdev,
869 return false; 869 return false;
870} 870}
871 871
872int r100_copy_blit(struct radeon_device *rdev, 872struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
873 uint64_t src_offset, 873 uint64_t src_offset,
874 uint64_t dst_offset, 874 uint64_t dst_offset,
875 unsigned num_gpu_pages, 875 unsigned num_gpu_pages,
876 struct radeon_fence **fence) 876 struct reservation_object *resv)
877{ 877{
878 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 878 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
879 struct radeon_fence *fence;
879 uint32_t cur_pages; 880 uint32_t cur_pages;
880 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; 881 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
881 uint32_t pitch; 882 uint32_t pitch;
@@ -896,7 +897,7 @@ int r100_copy_blit(struct radeon_device *rdev,
896 r = radeon_ring_lock(rdev, ring, ndw); 897 r = radeon_ring_lock(rdev, ring, ndw);
897 if (r) { 898 if (r) {
898 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 899 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
899 return -EINVAL; 900 return ERR_PTR(-EINVAL);
900 } 901 }
901 while (num_gpu_pages > 0) { 902 while (num_gpu_pages > 0) {
902 cur_pages = num_gpu_pages; 903 cur_pages = num_gpu_pages;
@@ -936,11 +937,13 @@ int r100_copy_blit(struct radeon_device *rdev,
936 RADEON_WAIT_2D_IDLECLEAN | 937 RADEON_WAIT_2D_IDLECLEAN |
937 RADEON_WAIT_HOST_IDLECLEAN | 938 RADEON_WAIT_HOST_IDLECLEAN |
938 RADEON_WAIT_DMA_GUI_IDLE); 939 RADEON_WAIT_DMA_GUI_IDLE);
939 if (fence) { 940 r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
940 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); 941 if (r) {
942 radeon_ring_unlock_undo(rdev, ring);
943 return ERR_PTR(r);
941 } 944 }
942 radeon_ring_unlock_commit(rdev, ring, false); 945 radeon_ring_unlock_commit(rdev, ring, false);
943 return r; 946 return fence;
944} 947}
945 948
946static int r100_cp_wait_for_idle(struct radeon_device *rdev) 949static int r100_cp_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 67780374a652..732d4938aab7 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -80,13 +80,14 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
80 return vtx_size; 80 return vtx_size;
81} 81}
82 82
83int r200_copy_dma(struct radeon_device *rdev, 83struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
84 uint64_t src_offset, 84 uint64_t src_offset,
85 uint64_t dst_offset, 85 uint64_t dst_offset,
86 unsigned num_gpu_pages, 86 unsigned num_gpu_pages,
87 struct radeon_fence **fence) 87 struct reservation_object *resv)
88{ 88{
89 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 89 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
90 struct radeon_fence *fence;
90 uint32_t size; 91 uint32_t size;
91 uint32_t cur_size; 92 uint32_t cur_size;
92 int i, num_loops; 93 int i, num_loops;
@@ -98,7 +99,7 @@ int r200_copy_dma(struct radeon_device *rdev,
98 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64); 99 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
99 if (r) { 100 if (r) {
100 DRM_ERROR("radeon: moving bo (%d).\n", r); 101 DRM_ERROR("radeon: moving bo (%d).\n", r);
101 return r; 102 return ERR_PTR(r);
102 } 103 }
103 /* Must wait for 2D idle & clean before DMA or hangs might happen */ 104 /* Must wait for 2D idle & clean before DMA or hangs might happen */
104 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 105 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
@@ -118,11 +119,13 @@ int r200_copy_dma(struct radeon_device *rdev,
118 } 119 }
119 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 120 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
120 radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE); 121 radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
121 if (fence) { 122 r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
122 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); 123 if (r) {
124 radeon_ring_unlock_undo(rdev, ring);
125 return ERR_PTR(r);
123 } 126 }
124 radeon_ring_unlock_commit(rdev, ring, false); 127 radeon_ring_unlock_commit(rdev, ring, false);
125 return r; 128 return fence;
126} 129}
127 130
128 131
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 84b1d5367a11..9418e388b045 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -34,10 +34,10 @@
34 */ 34 */
35 35
36#include <drm/drmP.h> 36#include <drm/drmP.h>
37#include <drm/drm_buffer.h>
38#include <drm/radeon_drm.h> 37#include <drm/radeon_drm.h>
39#include "radeon_drv.h" 38#include "radeon_drv.h"
40#include "r300_reg.h" 39#include "r300_reg.h"
40#include "drm_buffer.h"
41 41
42#include <asm/unaligned.h> 42#include <asm/unaligned.h>
43 43
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index ea5c9af722ef..56b02927cd3d 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -122,6 +122,94 @@ u32 r600_get_xclk(struct radeon_device *rdev)
122 122
123int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) 123int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
124{ 124{
125 unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0;
126 int r;
127
128 /* bypass vclk and dclk with bclk */
129 WREG32_P(CG_UPLL_FUNC_CNTL_2,
130 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
131 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
132
133 /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */
134 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~(
135 UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK));
136
137 if (rdev->family >= CHIP_RS780)
138 WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL,
139 ~UPLL_BYPASS_CNTL);
140
141 if (!vclk || !dclk) {
142 /* keep the Bypass mode, put PLL to sleep */
143 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
144 return 0;
145 }
146
147 if (rdev->clock.spll.reference_freq == 10000)
148 ref_div = 34;
149 else
150 ref_div = 4;
151
152 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
153 ref_div + 1, 0xFFF, 2, 30, ~0,
154 &fb_div, &vclk_div, &dclk_div);
155 if (r)
156 return r;
157
158 if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780)
159 fb_div >>= 1;
160 else
161 fb_div |= 1;
162
163 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
164 if (r)
165 return r;
166
167 /* assert PLL_RESET */
168 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
169
170 /* For RS780 we have to choose ref clk */
171 if (rdev->family >= CHIP_RS780)
172 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK,
173 ~UPLL_REFCLK_SRC_SEL_MASK);
174
175 /* set the required fb, ref and post divder values */
176 WREG32_P(CG_UPLL_FUNC_CNTL,
177 UPLL_FB_DIV(fb_div) |
178 UPLL_REF_DIV(ref_div),
179 ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK));
180 WREG32_P(CG_UPLL_FUNC_CNTL_2,
181 UPLL_SW_HILEN(vclk_div >> 1) |
182 UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
183 UPLL_SW_HILEN2(dclk_div >> 1) |
184 UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) |
185 UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK,
186 ~UPLL_SW_MASK);
187
188 /* give the PLL some time to settle */
189 mdelay(15);
190
191 /* deassert PLL_RESET */
192 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
193
194 mdelay(15);
195
196 /* deassert BYPASS EN */
197 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
198
199 if (rdev->family >= CHIP_RS780)
200 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL);
201
202 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
203 if (r)
204 return r;
205
206 /* switch VCLK and DCLK selection */
207 WREG32_P(CG_UPLL_FUNC_CNTL_2,
208 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
209 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
210
211 mdelay(100);
212
125 return 0; 213 return 0;
126} 214}
127 215
@@ -992,6 +1080,8 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev)
992 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 1080 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
993 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 1081 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
994 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 1082 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1083 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
1084 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
995 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1085 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
996 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1086 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
997 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 1087 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
@@ -1042,6 +1132,8 @@ static void r600_pcie_gart_disable(struct radeon_device *rdev)
1042 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 1132 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1043 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); 1133 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1044 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 1134 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1135 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
1136 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
1045 radeon_gart_table_vram_unpin(rdev); 1137 radeon_gart_table_vram_unpin(rdev);
1046} 1138}
1047 1139
@@ -1338,7 +1430,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
1338 if (rdev->vram_scratch.robj == NULL) { 1430 if (rdev->vram_scratch.robj == NULL) {
1339 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, 1431 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1340 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 1432 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1341 0, NULL, &rdev->vram_scratch.robj); 1433 0, NULL, NULL, &rdev->vram_scratch.robj);
1342 if (r) { 1434 if (r) {
1343 return r; 1435 return r;
1344 } 1436 }
@@ -2792,12 +2884,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2792 * Used by the radeon ttm implementation to move pages if 2884 * Used by the radeon ttm implementation to move pages if
2793 * registered as the asic copy callback. 2885 * registered as the asic copy callback.
2794 */ 2886 */
2795int r600_copy_cpdma(struct radeon_device *rdev, 2887struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
2796 uint64_t src_offset, uint64_t dst_offset, 2888 uint64_t src_offset, uint64_t dst_offset,
2797 unsigned num_gpu_pages, 2889 unsigned num_gpu_pages,
2798 struct radeon_fence **fence) 2890 struct reservation_object *resv)
2799{ 2891{
2800 struct radeon_semaphore *sem = NULL; 2892 struct radeon_semaphore *sem = NULL;
2893 struct radeon_fence *fence;
2801 int ring_index = rdev->asic->copy.blit_ring_index; 2894 int ring_index = rdev->asic->copy.blit_ring_index;
2802 struct radeon_ring *ring = &rdev->ring[ring_index]; 2895 struct radeon_ring *ring = &rdev->ring[ring_index];
2803 u32 size_in_bytes, cur_size_in_bytes, tmp; 2896 u32 size_in_bytes, cur_size_in_bytes, tmp;
@@ -2807,7 +2900,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
2807 r = radeon_semaphore_create(rdev, &sem); 2900 r = radeon_semaphore_create(rdev, &sem);
2808 if (r) { 2901 if (r) {
2809 DRM_ERROR("radeon: moving bo (%d).\n", r); 2902 DRM_ERROR("radeon: moving bo (%d).\n", r);
2810 return r; 2903 return ERR_PTR(r);
2811 } 2904 }
2812 2905
2813 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 2906 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
@@ -2816,10 +2909,10 @@ int r600_copy_cpdma(struct radeon_device *rdev,
2816 if (r) { 2909 if (r) {
2817 DRM_ERROR("radeon: moving bo (%d).\n", r); 2910 DRM_ERROR("radeon: moving bo (%d).\n", r);
2818 radeon_semaphore_free(rdev, &sem, NULL); 2911 radeon_semaphore_free(rdev, &sem, NULL);
2819 return r; 2912 return ERR_PTR(r);
2820 } 2913 }
2821 2914
2822 radeon_semaphore_sync_to(sem, *fence); 2915 radeon_semaphore_sync_resv(rdev, sem, resv, false);
2823 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 2916 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
2824 2917
2825 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2918 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
@@ -2846,17 +2939,17 @@ int r600_copy_cpdma(struct radeon_device *rdev,
2846 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2939 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2847 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); 2940 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
2848 2941
2849 r = radeon_fence_emit(rdev, fence, ring->idx); 2942 r = radeon_fence_emit(rdev, &fence, ring->idx);
2850 if (r) { 2943 if (r) {
2851 radeon_ring_unlock_undo(rdev, ring); 2944 radeon_ring_unlock_undo(rdev, ring);
2852 radeon_semaphore_free(rdev, &sem, NULL); 2945 radeon_semaphore_free(rdev, &sem, NULL);
2853 return r; 2946 return ERR_PTR(r);
2854 } 2947 }
2855 2948
2856 radeon_ring_unlock_commit(rdev, ring, false); 2949 radeon_ring_unlock_commit(rdev, ring, false);
2857 radeon_semaphore_free(rdev, &sem, *fence); 2950 radeon_semaphore_free(rdev, &sem, fence);
2858 2951
2859 return r; 2952 return fence;
2860} 2953}
2861 2954
2862int r600_set_surface_reg(struct radeon_device *rdev, int reg, 2955int r600_set_surface_reg(struct radeon_device *rdev, int reg,
@@ -2907,6 +3000,18 @@ static int r600_startup(struct radeon_device *rdev)
2907 return r; 3000 return r;
2908 } 3001 }
2909 3002
3003 if (rdev->has_uvd) {
3004 r = uvd_v1_0_resume(rdev);
3005 if (!r) {
3006 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
3007 if (r) {
3008 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
3009 }
3010 }
3011 if (r)
3012 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
3013 }
3014
2910 /* Enable IRQ */ 3015 /* Enable IRQ */
2911 if (!rdev->irq.installed) { 3016 if (!rdev->irq.installed) {
2912 r = radeon_irq_kms_init(rdev); 3017 r = radeon_irq_kms_init(rdev);
@@ -2935,6 +3040,18 @@ static int r600_startup(struct radeon_device *rdev)
2935 if (r) 3040 if (r)
2936 return r; 3041 return r;
2937 3042
3043 if (rdev->has_uvd) {
3044 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
3045 if (ring->ring_size) {
3046 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
3047 RADEON_CP_PACKET2);
3048 if (!r)
3049 r = uvd_v1_0_init(rdev);
3050 if (r)
3051 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
3052 }
3053 }
3054
2938 r = radeon_ib_pool_init(rdev); 3055 r = radeon_ib_pool_init(rdev);
2939 if (r) { 3056 if (r) {
2940 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 3057 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2994,6 +3111,10 @@ int r600_suspend(struct radeon_device *rdev)
2994 radeon_pm_suspend(rdev); 3111 radeon_pm_suspend(rdev);
2995 r600_audio_fini(rdev); 3112 r600_audio_fini(rdev);
2996 r600_cp_stop(rdev); 3113 r600_cp_stop(rdev);
3114 if (rdev->has_uvd) {
3115 uvd_v1_0_fini(rdev);
3116 radeon_uvd_suspend(rdev);
3117 }
2997 r600_irq_suspend(rdev); 3118 r600_irq_suspend(rdev);
2998 radeon_wb_disable(rdev); 3119 radeon_wb_disable(rdev);
2999 r600_pcie_gart_disable(rdev); 3120 r600_pcie_gart_disable(rdev);
@@ -3073,6 +3194,14 @@ int r600_init(struct radeon_device *rdev)
3073 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 3194 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3074 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 3195 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3075 3196
3197 if (rdev->has_uvd) {
3198 r = radeon_uvd_init(rdev);
3199 if (!r) {
3200 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
3201 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
3202 }
3203 }
3204
3076 rdev->ih.ring_obj = NULL; 3205 rdev->ih.ring_obj = NULL;
3077 r600_ih_ring_init(rdev, 64 * 1024); 3206 r600_ih_ring_init(rdev, 64 * 1024);
3078 3207
@@ -3102,6 +3231,10 @@ void r600_fini(struct radeon_device *rdev)
3102 r600_audio_fini(rdev); 3231 r600_audio_fini(rdev);
3103 r600_cp_fini(rdev); 3232 r600_cp_fini(rdev);
3104 r600_irq_fini(rdev); 3233 r600_irq_fini(rdev);
3234 if (rdev->has_uvd) {
3235 uvd_v1_0_fini(rdev);
3236 radeon_uvd_fini(rdev);
3237 }
3105 radeon_wb_fini(rdev); 3238 radeon_wb_fini(rdev);
3106 radeon_ib_pool_fini(rdev); 3239 radeon_ib_pool_fini(rdev);
3107 radeon_irq_kms_fini(rdev); 3240 radeon_irq_kms_fini(rdev);
@@ -3235,7 +3368,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
3235 r = radeon_bo_create(rdev, rdev->ih.ring_size, 3368 r = radeon_bo_create(rdev, rdev->ih.ring_size,
3236 PAGE_SIZE, true, 3369 PAGE_SIZE, true,
3237 RADEON_GEM_DOMAIN_GTT, 0, 3370 RADEON_GEM_DOMAIN_GTT, 0,
3238 NULL, &rdev->ih.ring_obj); 3371 NULL, NULL, &rdev->ih.ring_obj);
3239 if (r) { 3372 if (r) {
3240 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); 3373 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3241 return r; 3374 return r;
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
deleted file mode 100644
index bffac10c4296..000000000000
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ /dev/null
@@ -1,207 +0,0 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Christian König.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Christian König
25 */
26#include <drm/drmP.h>
27#include "radeon.h"
28#include "radeon_reg.h"
29#include "radeon_asic.h"
30#include "atom.h"
31
32/*
33 * check if enc_priv stores radeon_encoder_atom_dig
34 */
35static bool radeon_dig_encoder(struct drm_encoder *encoder)
36{
37 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
38 switch (radeon_encoder->encoder_id) {
39 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
40 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
41 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
42 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
43 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
44 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
45 case ENCODER_OBJECT_ID_INTERNAL_DDI:
46 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
47 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
48 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
49 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
50 return true;
51 }
52 return false;
53}
54
55/*
56 * check if the chipset is supported
57 */
58static int r600_audio_chipset_supported(struct radeon_device *rdev)
59{
60 return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev);
61}
62
63struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
64{
65 struct r600_audio_pin status;
66 uint32_t value;
67
68 value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
69
70 /* number of channels */
71 status.channels = (value & 0x7) + 1;
72
73 /* bits per sample */
74 switch ((value & 0xF0) >> 4) {
75 case 0x0:
76 status.bits_per_sample = 8;
77 break;
78 case 0x1:
79 status.bits_per_sample = 16;
80 break;
81 case 0x2:
82 status.bits_per_sample = 20;
83 break;
84 case 0x3:
85 status.bits_per_sample = 24;
86 break;
87 case 0x4:
88 status.bits_per_sample = 32;
89 break;
90 default:
91 dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
92 (int)value);
93 status.bits_per_sample = 16;
94 }
95
96 /* current sampling rate in HZ */
97 if (value & 0x4000)
98 status.rate = 44100;
99 else
100 status.rate = 48000;
101 status.rate *= ((value >> 11) & 0x7) + 1;
102 status.rate /= ((value >> 8) & 0x7) + 1;
103
104 value = RREG32(R600_AUDIO_STATUS_BITS);
105
106 /* iec 60958 status bits */
107 status.status_bits = value & 0xff;
108
109 /* iec 60958 category code */
110 status.category_code = (value >> 8) & 0xff;
111
112 return status;
113}
114
115/*
116 * update all hdmi interfaces with current audio parameters
117 */
118void r600_audio_update_hdmi(struct work_struct *work)
119{
120 struct radeon_device *rdev = container_of(work, struct radeon_device,
121 audio_work);
122 struct drm_device *dev = rdev->ddev;
123 struct r600_audio_pin audio_status = r600_audio_status(rdev);
124 struct drm_encoder *encoder;
125 bool changed = false;
126
127 if (rdev->audio.pin[0].channels != audio_status.channels ||
128 rdev->audio.pin[0].rate != audio_status.rate ||
129 rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample ||
130 rdev->audio.pin[0].status_bits != audio_status.status_bits ||
131 rdev->audio.pin[0].category_code != audio_status.category_code) {
132 rdev->audio.pin[0] = audio_status;
133 changed = true;
134 }
135
136 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
137 if (!radeon_dig_encoder(encoder))
138 continue;
139 if (changed || r600_hdmi_buffer_status_changed(encoder))
140 r600_hdmi_update_audio_settings(encoder);
141 }
142}
143
144/* enable the audio stream */
145void r600_audio_enable(struct radeon_device *rdev,
146 struct r600_audio_pin *pin,
147 bool enable)
148{
149 u32 value = 0;
150
151 if (!pin)
152 return;
153
154 if (ASIC_IS_DCE4(rdev)) {
155 if (enable) {
156 value |= 0x81000000; /* Required to enable audio */
157 value |= 0x0e1000f0; /* fglrx sets that too */
158 }
159 WREG32(EVERGREEN_AUDIO_ENABLE, value);
160 } else {
161 WREG32_P(R600_AUDIO_ENABLE,
162 enable ? 0x81000000 : 0x0, ~0x81000000);
163 }
164}
165
166/*
167 * initialize the audio vars
168 */
169int r600_audio_init(struct radeon_device *rdev)
170{
171 if (!radeon_audio || !r600_audio_chipset_supported(rdev))
172 return 0;
173
174 rdev->audio.enabled = true;
175
176 rdev->audio.num_pins = 1;
177 rdev->audio.pin[0].channels = -1;
178 rdev->audio.pin[0].rate = -1;
179 rdev->audio.pin[0].bits_per_sample = -1;
180 rdev->audio.pin[0].status_bits = 0;
181 rdev->audio.pin[0].category_code = 0;
182 rdev->audio.pin[0].id = 0;
183 /* disable audio. it will be set up later */
184 r600_audio_enable(rdev, &rdev->audio.pin[0], false);
185
186 return 0;
187}
188
189/*
190 * release the audio timer
191 * TODO: How to do this correctly on SMP systems?
192 */
193void r600_audio_fini(struct radeon_device *rdev)
194{
195 if (!rdev->audio.enabled)
196 return;
197
198 r600_audio_enable(rdev, &rdev->audio.pin[0], false);
199
200 rdev->audio.enabled = false;
201}
202
203struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev)
204{
205 /* only one pin on 6xx-NI */
206 return &rdev->audio.pin[0];
207}
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 8c9b7e26533c..09e3f39925fa 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1949,15 +1949,15 @@ int r600_do_cleanup_cp(struct drm_device *dev)
1949#if __OS_HAS_AGP 1949#if __OS_HAS_AGP
1950 if (dev_priv->flags & RADEON_IS_AGP) { 1950 if (dev_priv->flags & RADEON_IS_AGP) {
1951 if (dev_priv->cp_ring != NULL) { 1951 if (dev_priv->cp_ring != NULL) {
1952 drm_core_ioremapfree(dev_priv->cp_ring, dev); 1952 drm_legacy_ioremapfree(dev_priv->cp_ring, dev);
1953 dev_priv->cp_ring = NULL; 1953 dev_priv->cp_ring = NULL;
1954 } 1954 }
1955 if (dev_priv->ring_rptr != NULL) { 1955 if (dev_priv->ring_rptr != NULL) {
1956 drm_core_ioremapfree(dev_priv->ring_rptr, dev); 1956 drm_legacy_ioremapfree(dev_priv->ring_rptr, dev);
1957 dev_priv->ring_rptr = NULL; 1957 dev_priv->ring_rptr = NULL;
1958 } 1958 }
1959 if (dev->agp_buffer_map != NULL) { 1959 if (dev->agp_buffer_map != NULL) {
1960 drm_core_ioremapfree(dev->agp_buffer_map, dev); 1960 drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
1961 dev->agp_buffer_map = NULL; 1961 dev->agp_buffer_map = NULL;
1962 } 1962 }
1963 } else 1963 } else
@@ -1968,7 +1968,7 @@ int r600_do_cleanup_cp(struct drm_device *dev)
1968 r600_page_table_cleanup(dev, &dev_priv->gart_info); 1968 r600_page_table_cleanup(dev, &dev_priv->gart_info);
1969 1969
1970 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) { 1970 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) {
1971 drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); 1971 drm_legacy_ioremapfree(&dev_priv->gart_info.mapping, dev);
1972 dev_priv->gart_info.addr = NULL; 1972 dev_priv->gart_info.addr = NULL;
1973 } 1973 }
1974 } 1974 }
@@ -2052,27 +2052,27 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
2052 dev_priv->buffers_offset = init->buffers_offset; 2052 dev_priv->buffers_offset = init->buffers_offset;
2053 dev_priv->gart_textures_offset = init->gart_textures_offset; 2053 dev_priv->gart_textures_offset = init->gart_textures_offset;
2054 2054
2055 master_priv->sarea = drm_getsarea(dev); 2055 master_priv->sarea = drm_legacy_getsarea(dev);
2056 if (!master_priv->sarea) { 2056 if (!master_priv->sarea) {
2057 DRM_ERROR("could not find sarea!\n"); 2057 DRM_ERROR("could not find sarea!\n");
2058 r600_do_cleanup_cp(dev); 2058 r600_do_cleanup_cp(dev);
2059 return -EINVAL; 2059 return -EINVAL;
2060 } 2060 }
2061 2061
2062 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); 2062 dev_priv->cp_ring = drm_legacy_findmap(dev, init->ring_offset);
2063 if (!dev_priv->cp_ring) { 2063 if (!dev_priv->cp_ring) {
2064 DRM_ERROR("could not find cp ring region!\n"); 2064 DRM_ERROR("could not find cp ring region!\n");
2065 r600_do_cleanup_cp(dev); 2065 r600_do_cleanup_cp(dev);
2066 return -EINVAL; 2066 return -EINVAL;
2067 } 2067 }
2068 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 2068 dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset);
2069 if (!dev_priv->ring_rptr) { 2069 if (!dev_priv->ring_rptr) {
2070 DRM_ERROR("could not find ring read pointer!\n"); 2070 DRM_ERROR("could not find ring read pointer!\n");
2071 r600_do_cleanup_cp(dev); 2071 r600_do_cleanup_cp(dev);
2072 return -EINVAL; 2072 return -EINVAL;
2073 } 2073 }
2074 dev->agp_buffer_token = init->buffers_offset; 2074 dev->agp_buffer_token = init->buffers_offset;
2075 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 2075 dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset);
2076 if (!dev->agp_buffer_map) { 2076 if (!dev->agp_buffer_map) {
2077 DRM_ERROR("could not find dma buffer region!\n"); 2077 DRM_ERROR("could not find dma buffer region!\n");
2078 r600_do_cleanup_cp(dev); 2078 r600_do_cleanup_cp(dev);
@@ -2081,7 +2081,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
2081 2081
2082 if (init->gart_textures_offset) { 2082 if (init->gart_textures_offset) {
2083 dev_priv->gart_textures = 2083 dev_priv->gart_textures =
2084 drm_core_findmap(dev, init->gart_textures_offset); 2084 drm_legacy_findmap(dev, init->gart_textures_offset);
2085 if (!dev_priv->gart_textures) { 2085 if (!dev_priv->gart_textures) {
2086 DRM_ERROR("could not find GART texture region!\n"); 2086 DRM_ERROR("could not find GART texture region!\n");
2087 r600_do_cleanup_cp(dev); 2087 r600_do_cleanup_cp(dev);
@@ -2092,9 +2092,9 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
2092#if __OS_HAS_AGP 2092#if __OS_HAS_AGP
2093 /* XXX */ 2093 /* XXX */
2094 if (dev_priv->flags & RADEON_IS_AGP) { 2094 if (dev_priv->flags & RADEON_IS_AGP) {
2095 drm_core_ioremap_wc(dev_priv->cp_ring, dev); 2095 drm_legacy_ioremap_wc(dev_priv->cp_ring, dev);
2096 drm_core_ioremap_wc(dev_priv->ring_rptr, dev); 2096 drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev);
2097 drm_core_ioremap_wc(dev->agp_buffer_map, dev); 2097 drm_legacy_ioremap_wc(dev->agp_buffer_map, dev);
2098 if (!dev_priv->cp_ring->handle || 2098 if (!dev_priv->cp_ring->handle ||
2099 !dev_priv->ring_rptr->handle || 2099 !dev_priv->ring_rptr->handle ||
2100 !dev->agp_buffer_map->handle) { 2100 !dev->agp_buffer_map->handle) {
@@ -2235,7 +2235,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
2235 dev_priv->gart_info.mapping.size = 2235 dev_priv->gart_info.mapping.size =
2236 dev_priv->gart_info.table_size; 2236 dev_priv->gart_info.table_size;
2237 2237
2238 drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); 2238 drm_legacy_ioremap_wc(&dev_priv->gart_info.mapping, dev);
2239 if (!dev_priv->gart_info.mapping.handle) { 2239 if (!dev_priv->gart_info.mapping.handle) {
2240 DRM_ERROR("ioremap failed.\n"); 2240 DRM_ERROR("ioremap failed.\n");
2241 r600_do_cleanup_cp(dev); 2241 r600_do_cleanup_cp(dev);
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index a908daa006d2..100189ec5fa8 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -427,18 +427,19 @@ void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
427 * @src_offset: src GPU address 427 * @src_offset: src GPU address
428 * @dst_offset: dst GPU address 428 * @dst_offset: dst GPU address
429 * @num_gpu_pages: number of GPU pages to xfer 429 * @num_gpu_pages: number of GPU pages to xfer
430 * @fence: radeon fence object 430 * @resv: reservation object to sync to
431 * 431 *
432 * Copy GPU paging using the DMA engine (r6xx). 432 * Copy GPU paging using the DMA engine (r6xx).
433 * Used by the radeon ttm implementation to move pages if 433 * Used by the radeon ttm implementation to move pages if
434 * registered as the asic copy callback. 434 * registered as the asic copy callback.
435 */ 435 */
436int r600_copy_dma(struct radeon_device *rdev, 436struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
437 uint64_t src_offset, uint64_t dst_offset, 437 uint64_t src_offset, uint64_t dst_offset,
438 unsigned num_gpu_pages, 438 unsigned num_gpu_pages,
439 struct radeon_fence **fence) 439 struct reservation_object *resv)
440{ 440{
441 struct radeon_semaphore *sem = NULL; 441 struct radeon_semaphore *sem = NULL;
442 struct radeon_fence *fence;
442 int ring_index = rdev->asic->copy.dma_ring_index; 443 int ring_index = rdev->asic->copy.dma_ring_index;
443 struct radeon_ring *ring = &rdev->ring[ring_index]; 444 struct radeon_ring *ring = &rdev->ring[ring_index];
444 u32 size_in_dw, cur_size_in_dw; 445 u32 size_in_dw, cur_size_in_dw;
@@ -448,7 +449,7 @@ int r600_copy_dma(struct radeon_device *rdev,
448 r = radeon_semaphore_create(rdev, &sem); 449 r = radeon_semaphore_create(rdev, &sem);
449 if (r) { 450 if (r) {
450 DRM_ERROR("radeon: moving bo (%d).\n", r); 451 DRM_ERROR("radeon: moving bo (%d).\n", r);
451 return r; 452 return ERR_PTR(r);
452 } 453 }
453 454
454 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 455 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
@@ -457,10 +458,10 @@ int r600_copy_dma(struct radeon_device *rdev,
457 if (r) { 458 if (r) {
458 DRM_ERROR("radeon: moving bo (%d).\n", r); 459 DRM_ERROR("radeon: moving bo (%d).\n", r);
459 radeon_semaphore_free(rdev, &sem, NULL); 460 radeon_semaphore_free(rdev, &sem, NULL);
460 return r; 461 return ERR_PTR(r);
461 } 462 }
462 463
463 radeon_semaphore_sync_to(sem, *fence); 464 radeon_semaphore_sync_resv(rdev, sem, resv, false);
464 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 465 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
465 466
466 for (i = 0; i < num_loops; i++) { 467 for (i = 0; i < num_loops; i++) {
@@ -477,15 +478,15 @@ int r600_copy_dma(struct radeon_device *rdev,
477 dst_offset += cur_size_in_dw * 4; 478 dst_offset += cur_size_in_dw * 4;
478 } 479 }
479 480
480 r = radeon_fence_emit(rdev, fence, ring->idx); 481 r = radeon_fence_emit(rdev, &fence, ring->idx);
481 if (r) { 482 if (r) {
482 radeon_ring_unlock_undo(rdev, ring); 483 radeon_ring_unlock_undo(rdev, ring);
483 radeon_semaphore_free(rdev, &sem, NULL); 484 radeon_semaphore_free(rdev, &sem, NULL);
484 return r; 485 return ERR_PTR(r);
485 } 486 }
486 487
487 radeon_ring_unlock_commit(rdev, ring, false); 488 radeon_ring_unlock_commit(rdev, ring, false);
488 radeon_semaphore_free(rdev, &sem, *fence); 489 radeon_semaphore_free(rdev, &sem, fence);
489 490
490 return r; 491 return fence;
491} 492}
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 26ef8ced6f89..b90dc0eb08e6 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -72,6 +72,169 @@ static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
72 72
73 73
74/* 74/*
75 * check if the chipset is supported
76 */
77static int r600_audio_chipset_supported(struct radeon_device *rdev)
78{
79 return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev);
80}
81
82static struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
83{
84 struct r600_audio_pin status;
85 uint32_t value;
86
87 value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
88
89 /* number of channels */
90 status.channels = (value & 0x7) + 1;
91
92 /* bits per sample */
93 switch ((value & 0xF0) >> 4) {
94 case 0x0:
95 status.bits_per_sample = 8;
96 break;
97 case 0x1:
98 status.bits_per_sample = 16;
99 break;
100 case 0x2:
101 status.bits_per_sample = 20;
102 break;
103 case 0x3:
104 status.bits_per_sample = 24;
105 break;
106 case 0x4:
107 status.bits_per_sample = 32;
108 break;
109 default:
110 dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
111 (int)value);
112 status.bits_per_sample = 16;
113 }
114
115 /* current sampling rate in HZ */
116 if (value & 0x4000)
117 status.rate = 44100;
118 else
119 status.rate = 48000;
120 status.rate *= ((value >> 11) & 0x7) + 1;
121 status.rate /= ((value >> 8) & 0x7) + 1;
122
123 value = RREG32(R600_AUDIO_STATUS_BITS);
124
125 /* iec 60958 status bits */
126 status.status_bits = value & 0xff;
127
128 /* iec 60958 category code */
129 status.category_code = (value >> 8) & 0xff;
130
131 return status;
132}
133
134/*
135 * update all hdmi interfaces with current audio parameters
136 */
137void r600_audio_update_hdmi(struct work_struct *work)
138{
139 struct radeon_device *rdev = container_of(work, struct radeon_device,
140 audio_work);
141 struct drm_device *dev = rdev->ddev;
142 struct r600_audio_pin audio_status = r600_audio_status(rdev);
143 struct drm_encoder *encoder;
144 bool changed = false;
145
146 if (rdev->audio.pin[0].channels != audio_status.channels ||
147 rdev->audio.pin[0].rate != audio_status.rate ||
148 rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample ||
149 rdev->audio.pin[0].status_bits != audio_status.status_bits ||
150 rdev->audio.pin[0].category_code != audio_status.category_code) {
151 rdev->audio.pin[0] = audio_status;
152 changed = true;
153 }
154
155 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
156 if (!radeon_encoder_is_digital(encoder))
157 continue;
158 if (changed || r600_hdmi_buffer_status_changed(encoder))
159 r600_hdmi_update_audio_settings(encoder);
160 }
161}
162
163/* enable the audio stream */
164void r600_audio_enable(struct radeon_device *rdev,
165 struct r600_audio_pin *pin,
166 u8 enable_mask)
167{
168 u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL);
169
170 if (!pin)
171 return;
172
173 if (enable_mask) {
174 tmp |= AUDIO_ENABLED;
175 if (enable_mask & 1)
176 tmp |= PIN0_AUDIO_ENABLED;
177 if (enable_mask & 2)
178 tmp |= PIN1_AUDIO_ENABLED;
179 if (enable_mask & 4)
180 tmp |= PIN2_AUDIO_ENABLED;
181 if (enable_mask & 8)
182 tmp |= PIN3_AUDIO_ENABLED;
183 } else {
184 tmp &= ~(AUDIO_ENABLED |
185 PIN0_AUDIO_ENABLED |
186 PIN1_AUDIO_ENABLED |
187 PIN2_AUDIO_ENABLED |
188 PIN3_AUDIO_ENABLED);
189 }
190
191 WREG32(AZ_HOT_PLUG_CONTROL, tmp);
192}
193
194/*
195 * initialize the audio vars
196 */
197int r600_audio_init(struct radeon_device *rdev)
198{
199 if (!radeon_audio || !r600_audio_chipset_supported(rdev))
200 return 0;
201
202 rdev->audio.enabled = true;
203
204 rdev->audio.num_pins = 1;
205 rdev->audio.pin[0].channels = -1;
206 rdev->audio.pin[0].rate = -1;
207 rdev->audio.pin[0].bits_per_sample = -1;
208 rdev->audio.pin[0].status_bits = 0;
209 rdev->audio.pin[0].category_code = 0;
210 rdev->audio.pin[0].id = 0;
211 /* disable audio. it will be set up later */
212 r600_audio_enable(rdev, &rdev->audio.pin[0], 0);
213
214 return 0;
215}
216
217/*
218 * release the audio timer
219 * TODO: How to do this correctly on SMP systems?
220 */
221void r600_audio_fini(struct radeon_device *rdev)
222{
223 if (!rdev->audio.enabled)
224 return;
225
226 r600_audio_enable(rdev, &rdev->audio.pin[0], 0);
227
228 rdev->audio.enabled = false;
229}
230
231struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev)
232{
233 /* only one pin on 6xx-NI */
234 return &rdev->audio.pin[0];
235}
236
237/*
75 * calculate CTS and N values if they are not found in the table 238 * calculate CTS and N values if they are not found in the table
76 */ 239 */
77static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq) 240static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
@@ -357,7 +520,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
357 520
358 /* disable audio prior to setting up hw */ 521 /* disable audio prior to setting up hw */
359 dig->afmt->pin = r600_audio_get_pin(rdev); 522 dig->afmt->pin = r600_audio_get_pin(rdev);
360 r600_audio_enable(rdev, dig->afmt->pin, false); 523 r600_audio_enable(rdev, dig->afmt->pin, 0xf);
361 524
362 r600_audio_set_dto(encoder, mode->clock); 525 r600_audio_set_dto(encoder, mode->clock);
363 526
@@ -443,7 +606,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
443 WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001); 606 WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
444 607
445 /* enable audio after to setting up hw */ 608 /* enable audio after to setting up hw */
446 r600_audio_enable(rdev, dig->afmt->pin, true); 609 r600_audio_enable(rdev, dig->afmt->pin, 0xf);
447} 610}
448 611
449/** 612/**
@@ -528,6 +691,11 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
528 if (!enable && !dig->afmt->enabled) 691 if (!enable && !dig->afmt->enabled)
529 return; 692 return;
530 693
694 if (!enable && dig->afmt->pin) {
695 r600_audio_enable(rdev, dig->afmt->pin, 0);
696 dig->afmt->pin = NULL;
697 }
698
531 /* Older chipsets require setting HDMI and routing manually */ 699 /* Older chipsets require setting HDMI and routing manually */
532 if (!ASIC_IS_DCE3(rdev)) { 700 if (!ASIC_IS_DCE3(rdev)) {
533 if (enable) 701 if (enable)
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 31e1052ad3e3..1e8495cca41e 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -323,11 +323,12 @@
323#define HDP_TILING_CONFIG 0x2F3C 323#define HDP_TILING_CONFIG 0x2F3C
324#define HDP_DEBUG1 0x2F34 324#define HDP_DEBUG1 0x2F34
325 325
326#define MC_CONFIG 0x2000
326#define MC_VM_AGP_TOP 0x2184 327#define MC_VM_AGP_TOP 0x2184
327#define MC_VM_AGP_BOT 0x2188 328#define MC_VM_AGP_BOT 0x2188
328#define MC_VM_AGP_BASE 0x218C 329#define MC_VM_AGP_BASE 0x218C
329#define MC_VM_FB_LOCATION 0x2180 330#define MC_VM_FB_LOCATION 0x2180
330#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C 331#define MC_VM_L1_TLB_MCB_RD_UVD_CNTL 0x2124
331#define ENABLE_L1_TLB (1 << 0) 332#define ENABLE_L1_TLB (1 << 0)
332#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) 333#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
333#define ENABLE_L1_STRICT_ORDERING (1 << 2) 334#define ENABLE_L1_STRICT_ORDERING (1 << 2)
@@ -347,12 +348,14 @@
347#define EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 7) << 15) 348#define EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 7) << 15)
348#define EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00038000 349#define EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00038000
349#define EFFECTIVE_L1_QUEUE_SIZE_SHIFT 15 350#define EFFECTIVE_L1_QUEUE_SIZE_SHIFT 15
351#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C
350#define MC_VM_L1_TLB_MCD_RD_B_CNTL 0x21A0 352#define MC_VM_L1_TLB_MCD_RD_B_CNTL 0x21A0
351#define MC_VM_L1_TLB_MCB_RD_GFX_CNTL 0x21FC 353#define MC_VM_L1_TLB_MCB_RD_GFX_CNTL 0x21FC
352#define MC_VM_L1_TLB_MCB_RD_HDP_CNTL 0x2204 354#define MC_VM_L1_TLB_MCB_RD_HDP_CNTL 0x2204
353#define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL 0x2208 355#define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL 0x2208
354#define MC_VM_L1_TLB_MCB_RD_SEM_CNTL 0x220C 356#define MC_VM_L1_TLB_MCB_RD_SEM_CNTL 0x220C
355#define MC_VM_L1_TLB_MCB_RD_SYS_CNTL 0x2200 357#define MC_VM_L1_TLB_MCB_RD_SYS_CNTL 0x2200
358#define MC_VM_L1_TLB_MCB_WR_UVD_CNTL 0x212c
356#define MC_VM_L1_TLB_MCD_WR_A_CNTL 0x21A4 359#define MC_VM_L1_TLB_MCD_WR_A_CNTL 0x21A4
357#define MC_VM_L1_TLB_MCD_WR_B_CNTL 0x21A8 360#define MC_VM_L1_TLB_MCD_WR_B_CNTL 0x21A8
358#define MC_VM_L1_TLB_MCB_WR_GFX_CNTL 0x2210 361#define MC_VM_L1_TLB_MCB_WR_GFX_CNTL 0x2210
@@ -366,6 +369,8 @@
366#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194 369#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194
367#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198 370#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198
368 371
372#define RS_DQ_RD_RET_CONF 0x2348
373
369#define PA_CL_ENHANCE 0x8A14 374#define PA_CL_ENHANCE 0x8A14
370#define CLIP_VTX_REORDER_ENA (1 << 0) 375#define CLIP_VTX_REORDER_ENA (1 << 0)
371#define NUM_CLIP_SEQ(x) ((x) << 1) 376#define NUM_CLIP_SEQ(x) ((x) << 1)
@@ -922,6 +927,23 @@
922# define TARGET_LINK_SPEED_MASK (0xf << 0) 927# define TARGET_LINK_SPEED_MASK (0xf << 0)
923# define SELECTABLE_DEEMPHASIS (1 << 6) 928# define SELECTABLE_DEEMPHASIS (1 << 6)
924 929
930/* Audio */
931#define AZ_HOT_PLUG_CONTROL 0x7300
932# define AZ_FORCE_CODEC_WAKE (1 << 0)
933# define JACK_DETECTION_ENABLE (1 << 4)
934# define UNSOLICITED_RESPONSE_ENABLE (1 << 8)
935# define CODEC_HOT_PLUG_ENABLE (1 << 12)
936# define AUDIO_ENABLED (1 << 31)
937/* DCE3 adds */
938# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
939# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
940# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
941# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
942# define PIN0_AUDIO_ENABLED (1 << 24)
943# define PIN1_AUDIO_ENABLED (1 << 25)
944# define PIN2_AUDIO_ENABLED (1 << 26)
945# define PIN3_AUDIO_ENABLED (1 << 27)
946
925/* Audio clocks DCE 2.0/3.0 */ 947/* Audio clocks DCE 2.0/3.0 */
926#define AUDIO_DTO 0x7340 948#define AUDIO_DTO 0x7340
927# define AUDIO_DTO_PHASE(x) (((x) & 0xffff) << 0) 949# define AUDIO_DTO_PHASE(x) (((x) & 0xffff) << 0)
@@ -1476,6 +1498,7 @@
1476#define UVD_CGC_GATE 0xf4a8 1498#define UVD_CGC_GATE 0xf4a8
1477#define UVD_LMI_CTRL2 0xf4f4 1499#define UVD_LMI_CTRL2 0xf4f4
1478#define UVD_MASTINT_EN 0xf500 1500#define UVD_MASTINT_EN 0xf500
1501#define UVD_FW_START 0xf51C
1479#define UVD_LMI_ADDR_EXT 0xf594 1502#define UVD_LMI_ADDR_EXT 0xf594
1480#define UVD_LMI_CTRL 0xf598 1503#define UVD_LMI_CTRL 0xf598
1481#define UVD_LMI_SWAP_CNTL 0xf5b4 1504#define UVD_LMI_SWAP_CNTL 0xf5b4
@@ -1488,6 +1511,13 @@
1488#define UVD_MPC_SET_MUX 0xf5f4 1511#define UVD_MPC_SET_MUX 0xf5f4
1489#define UVD_MPC_SET_ALU 0xf5f8 1512#define UVD_MPC_SET_ALU 0xf5f8
1490 1513
1514#define UVD_VCPU_CACHE_OFFSET0 0xf608
1515#define UVD_VCPU_CACHE_SIZE0 0xf60c
1516#define UVD_VCPU_CACHE_OFFSET1 0xf610
1517#define UVD_VCPU_CACHE_SIZE1 0xf614
1518#define UVD_VCPU_CACHE_OFFSET2 0xf618
1519#define UVD_VCPU_CACHE_SIZE2 0xf61c
1520
1491#define UVD_VCPU_CNTL 0xf660 1521#define UVD_VCPU_CNTL 0xf660
1492#define UVD_SOFT_RESET 0xf680 1522#define UVD_SOFT_RESET 0xf680
1493#define RBC_SOFT_RESET (1<<0) 1523#define RBC_SOFT_RESET (1<<0)
@@ -1517,9 +1547,35 @@
1517 1547
1518#define UVD_CONTEXT_ID 0xf6f4 1548#define UVD_CONTEXT_ID 0xf6f4
1519 1549
1550/* rs780 only */
1551#define GFX_MACRO_BYPASS_CNTL 0x30c0
1552#define SPLL_BYPASS_CNTL (1 << 0)
1553#define UPLL_BYPASS_CNTL (1 << 1)
1554
1555#define CG_UPLL_FUNC_CNTL 0x7e0
1556# define UPLL_RESET_MASK 0x00000001
1557# define UPLL_SLEEP_MASK 0x00000002
1558# define UPLL_BYPASS_EN_MASK 0x00000004
1520# define UPLL_CTLREQ_MASK 0x00000008 1559# define UPLL_CTLREQ_MASK 0x00000008
1560# define UPLL_FB_DIV(x) ((x) << 4)
1561# define UPLL_FB_DIV_MASK 0x0000FFF0
1562# define UPLL_REF_DIV(x) ((x) << 16)
1563# define UPLL_REF_DIV_MASK 0x003F0000
1564# define UPLL_REFCLK_SRC_SEL_MASK 0x20000000
1521# define UPLL_CTLACK_MASK 0x40000000 1565# define UPLL_CTLACK_MASK 0x40000000
1522# define UPLL_CTLACK2_MASK 0x80000000 1566# define UPLL_CTLACK2_MASK 0x80000000
1567#define CG_UPLL_FUNC_CNTL_2 0x7e4
1568# define UPLL_SW_HILEN(x) ((x) << 0)
1569# define UPLL_SW_LOLEN(x) ((x) << 4)
1570# define UPLL_SW_HILEN2(x) ((x) << 8)
1571# define UPLL_SW_LOLEN2(x) ((x) << 12)
1572# define UPLL_DIVEN_MASK 0x00010000
1573# define UPLL_DIVEN2_MASK 0x00020000
1574# define UPLL_SW_MASK 0x0003FFFF
1575# define VCLK_SRC_SEL(x) ((x) << 20)
1576# define VCLK_SRC_SEL_MASK 0x01F00000
1577# define DCLK_SRC_SEL(x) ((x) << 25)
1578# define DCLK_SRC_SEL_MASK 0x3E000000
1523 1579
1524/* 1580/*
1525 * PM4 1581 * PM4
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3247bfd14410..f7c4b226a284 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -65,6 +65,8 @@
65#include <linux/list.h> 65#include <linux/list.h>
66#include <linux/kref.h> 66#include <linux/kref.h>
67#include <linux/interval_tree.h> 67#include <linux/interval_tree.h>
68#include <linux/hashtable.h>
69#include <linux/fence.h>
68 70
69#include <ttm/ttm_bo_api.h> 71#include <ttm/ttm_bo_api.h>
70#include <ttm/ttm_bo_driver.h> 72#include <ttm/ttm_bo_driver.h>
@@ -72,6 +74,8 @@
72#include <ttm/ttm_module.h> 74#include <ttm/ttm_module.h>
73#include <ttm/ttm_execbuf_util.h> 75#include <ttm/ttm_execbuf_util.h>
74 76
77#include <drm/drm_gem.h>
78
75#include "radeon_family.h" 79#include "radeon_family.h"
76#include "radeon_mode.h" 80#include "radeon_mode.h"
77#include "radeon_reg.h" 81#include "radeon_reg.h"
@@ -120,9 +124,6 @@ extern int radeon_backlight;
120#define RADEONFB_CONN_LIMIT 4 124#define RADEONFB_CONN_LIMIT 4
121#define RADEON_BIOS_NUM_SCRATCH 8 125#define RADEON_BIOS_NUM_SCRATCH 8
122 126
123/* fence seq are set to this number when signaled */
124#define RADEON_FENCE_SIGNALED_SEQ 0LL
125
126/* internal ring indices */ 127/* internal ring indices */
127/* r1xx+ has gfx CP ring */ 128/* r1xx+ has gfx CP ring */
128#define RADEON_RING_TYPE_GFX_INDEX 0 129#define RADEON_RING_TYPE_GFX_INDEX 0
@@ -350,28 +351,32 @@ extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
350 * Fences. 351 * Fences.
351 */ 352 */
352struct radeon_fence_driver { 353struct radeon_fence_driver {
354 struct radeon_device *rdev;
353 uint32_t scratch_reg; 355 uint32_t scratch_reg;
354 uint64_t gpu_addr; 356 uint64_t gpu_addr;
355 volatile uint32_t *cpu_addr; 357 volatile uint32_t *cpu_addr;
356 /* sync_seq is protected by ring emission lock */ 358 /* sync_seq is protected by ring emission lock */
357 uint64_t sync_seq[RADEON_NUM_RINGS]; 359 uint64_t sync_seq[RADEON_NUM_RINGS];
358 atomic64_t last_seq; 360 atomic64_t last_seq;
359 bool initialized; 361 bool initialized, delayed_irq;
362 struct delayed_work lockup_work;
360}; 363};
361 364
362struct radeon_fence { 365struct radeon_fence {
366 struct fence base;
367
363 struct radeon_device *rdev; 368 struct radeon_device *rdev;
364 struct kref kref;
365 /* protected by radeon_fence.lock */
366 uint64_t seq; 369 uint64_t seq;
367 /* RB, DMA, etc. */ 370 /* RB, DMA, etc. */
368 unsigned ring; 371 unsigned ring;
372
373 wait_queue_t fence_wake;
369}; 374};
370 375
371int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); 376int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
372int radeon_fence_driver_init(struct radeon_device *rdev); 377int radeon_fence_driver_init(struct radeon_device *rdev);
373void radeon_fence_driver_fini(struct radeon_device *rdev); 378void radeon_fence_driver_fini(struct radeon_device *rdev);
374void radeon_fence_driver_force_completion(struct radeon_device *rdev); 379void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
375int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); 380int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
376void radeon_fence_process(struct radeon_device *rdev, int ring); 381void radeon_fence_process(struct radeon_device *rdev, int ring);
377bool radeon_fence_signaled(struct radeon_fence *fence); 382bool radeon_fence_signaled(struct radeon_fence *fence);
@@ -469,7 +474,7 @@ struct radeon_bo {
469 struct list_head list; 474 struct list_head list;
470 /* Protected by tbo.reserved */ 475 /* Protected by tbo.reserved */
471 u32 initial_domain; 476 u32 initial_domain;
472 u32 placements[3]; 477 struct ttm_place placements[3];
473 struct ttm_placement placement; 478 struct ttm_placement placement;
474 struct ttm_buffer_object tbo; 479 struct ttm_buffer_object tbo;
475 struct ttm_bo_kmap_obj kmap; 480 struct ttm_bo_kmap_obj kmap;
@@ -489,6 +494,9 @@ struct radeon_bo {
489 494
490 struct ttm_bo_kmap_obj dma_buf_vmap; 495 struct ttm_bo_kmap_obj dma_buf_vmap;
491 pid_t pid; 496 pid_t pid;
497
498 struct radeon_mn *mn;
499 struct interval_tree_node mn_it;
492}; 500};
493#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) 501#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
494 502
@@ -580,8 +588,12 @@ bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
580 struct radeon_semaphore *semaphore); 588 struct radeon_semaphore *semaphore);
581bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, 589bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
582 struct radeon_semaphore *semaphore); 590 struct radeon_semaphore *semaphore);
583void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, 591void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
584 struct radeon_fence *fence); 592 struct radeon_fence *fence);
593int radeon_semaphore_sync_resv(struct radeon_device *rdev,
594 struct radeon_semaphore *semaphore,
595 struct reservation_object *resv,
596 bool shared);
585int radeon_semaphore_sync_rings(struct radeon_device *rdev, 597int radeon_semaphore_sync_rings(struct radeon_device *rdev,
586 struct radeon_semaphore *semaphore, 598 struct radeon_semaphore *semaphore,
587 int waiting_ring); 599 int waiting_ring);
@@ -702,7 +714,7 @@ struct radeon_flip_work {
702 uint64_t base; 714 uint64_t base;
703 struct drm_pending_vblank_event *event; 715 struct drm_pending_vblank_event *event;
704 struct radeon_bo *old_rbo; 716 struct radeon_bo *old_rbo;
705 struct radeon_fence *fence; 717 struct fence *fence;
706}; 718};
707 719
708struct r500_irq_stat_regs { 720struct r500_irq_stat_regs {
@@ -780,6 +792,7 @@ struct radeon_irq {
780int radeon_irq_kms_init(struct radeon_device *rdev); 792int radeon_irq_kms_init(struct radeon_device *rdev);
781void radeon_irq_kms_fini(struct radeon_device *rdev); 793void radeon_irq_kms_fini(struct radeon_device *rdev);
782void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring); 794void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
795bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring);
783void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring); 796void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
784void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); 797void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
785void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); 798void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
@@ -1642,7 +1655,8 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
1642 uint32_t handle, struct radeon_fence **fence); 1655 uint32_t handle, struct radeon_fence **fence);
1643int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, 1656int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
1644 uint32_t handle, struct radeon_fence **fence); 1657 uint32_t handle, struct radeon_fence **fence);
1645void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo); 1658void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
1659 uint32_t allowed_domains);
1646void radeon_uvd_free_handles(struct radeon_device *rdev, 1660void radeon_uvd_free_handles(struct radeon_device *rdev,
1647 struct drm_file *filp); 1661 struct drm_file *filp);
1648int radeon_uvd_cs_parse(struct radeon_cs_parser *parser); 1662int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
@@ -1731,6 +1745,11 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
1731 struct radeon_ring *cpB); 1745 struct radeon_ring *cpB);
1732void radeon_test_syncing(struct radeon_device *rdev); 1746void radeon_test_syncing(struct radeon_device *rdev);
1733 1747
1748/*
1749 * MMU Notifier
1750 */
1751int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
1752void radeon_mn_unregister(struct radeon_bo *bo);
1734 1753
1735/* 1754/*
1736 * Debugfs 1755 * Debugfs
@@ -1845,24 +1864,24 @@ struct radeon_asic {
1845 } display; 1864 } display;
1846 /* copy functions for bo handling */ 1865 /* copy functions for bo handling */
1847 struct { 1866 struct {
1848 int (*blit)(struct radeon_device *rdev, 1867 struct radeon_fence *(*blit)(struct radeon_device *rdev,
1849 uint64_t src_offset, 1868 uint64_t src_offset,
1850 uint64_t dst_offset, 1869 uint64_t dst_offset,
1851 unsigned num_gpu_pages, 1870 unsigned num_gpu_pages,
1852 struct radeon_fence **fence); 1871 struct reservation_object *resv);
1853 u32 blit_ring_index; 1872 u32 blit_ring_index;
1854 int (*dma)(struct radeon_device *rdev, 1873 struct radeon_fence *(*dma)(struct radeon_device *rdev,
1855 uint64_t src_offset, 1874 uint64_t src_offset,
1856 uint64_t dst_offset, 1875 uint64_t dst_offset,
1857 unsigned num_gpu_pages, 1876 unsigned num_gpu_pages,
1858 struct radeon_fence **fence); 1877 struct reservation_object *resv);
1859 u32 dma_ring_index; 1878 u32 dma_ring_index;
1860 /* method used for bo copy */ 1879 /* method used for bo copy */
1861 int (*copy)(struct radeon_device *rdev, 1880 struct radeon_fence *(*copy)(struct radeon_device *rdev,
1862 uint64_t src_offset, 1881 uint64_t src_offset,
1863 uint64_t dst_offset, 1882 uint64_t dst_offset,
1864 unsigned num_gpu_pages, 1883 unsigned num_gpu_pages,
1865 struct radeon_fence **fence); 1884 struct reservation_object *resv);
1866 /* ring used for bo copies */ 1885 /* ring used for bo copies */
1867 u32 copy_ring_index; 1886 u32 copy_ring_index;
1868 } copy; 1887 } copy;
@@ -2144,6 +2163,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
2144 struct drm_file *filp); 2163 struct drm_file *filp);
2145int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 2164int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
2146 struct drm_file *filp); 2165 struct drm_file *filp);
2166int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
2167 struct drm_file *filp);
2147int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, 2168int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
2148 struct drm_file *file_priv); 2169 struct drm_file *file_priv);
2149int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, 2170int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@ -2300,6 +2321,7 @@ struct radeon_device {
2300 struct radeon_mman mman; 2321 struct radeon_mman mman;
2301 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; 2322 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
2302 wait_queue_head_t fence_queue; 2323 wait_queue_head_t fence_queue;
2324 unsigned fence_context;
2303 struct mutex ring_lock; 2325 struct mutex ring_lock;
2304 struct radeon_ring ring[RADEON_NUM_RINGS]; 2326 struct radeon_ring ring[RADEON_NUM_RINGS];
2305 bool ib_pool_ready; 2327 bool ib_pool_ready;
@@ -2318,7 +2340,7 @@ struct radeon_device {
2318 bool need_dma32; 2340 bool need_dma32;
2319 bool accel_working; 2341 bool accel_working;
2320 bool fastfb_working; /* IGP feature*/ 2342 bool fastfb_working; /* IGP feature*/
2321 bool needs_reset; 2343 bool needs_reset, in_reset;
2322 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 2344 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
2323 const struct firmware *me_fw; /* all family ME firmware */ 2345 const struct firmware *me_fw; /* all family ME firmware */
2324 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 2346 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
@@ -2339,7 +2361,6 @@ struct radeon_device {
2339 struct radeon_mec mec; 2361 struct radeon_mec mec;
2340 struct work_struct hotplug_work; 2362 struct work_struct hotplug_work;
2341 struct work_struct audio_work; 2363 struct work_struct audio_work;
2342 struct work_struct reset_work;
2343 int num_crtc; /* number of crtcs */ 2364 int num_crtc; /* number of crtcs */
2344 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ 2365 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
2345 bool has_uvd; 2366 bool has_uvd;
@@ -2376,6 +2397,9 @@ struct radeon_device {
2376 /* tracking pinned memory */ 2397 /* tracking pinned memory */
2377 u64 vram_pin_size; 2398 u64 vram_pin_size;
2378 u64 gart_pin_size; 2399 u64 gart_pin_size;
2400
2401 struct mutex mn_lock;
2402 DECLARE_HASHTABLE(mn_hash, 7);
2379}; 2403};
2380 2404
2381bool radeon_is_px(struct drm_device *dev); 2405bool radeon_is_px(struct drm_device *dev);
@@ -2431,7 +2455,17 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
2431/* 2455/*
2432 * Cast helper 2456 * Cast helper
2433 */ 2457 */
2434#define to_radeon_fence(p) ((struct radeon_fence *)(p)) 2458extern const struct fence_ops radeon_fence_ops;
2459
2460static inline struct radeon_fence *to_radeon_fence(struct fence *f)
2461{
2462 struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
2463
2464 if (__f->base.ops == &radeon_fence_ops)
2465 return __f;
2466
2467 return NULL;
2468}
2435 2469
2436/* 2470/*
2437 * Registers read & write functions. 2471 * Registers read & write functions.
@@ -2751,18 +2785,25 @@ void radeon_atombios_fini(struct radeon_device *rdev);
2751/* 2785/*
2752 * RING helpers. 2786 * RING helpers.
2753 */ 2787 */
2754#if DRM_DEBUG_CODE == 0 2788
2789/**
2790 * radeon_ring_write - write a value to the ring
2791 *
2792 * @ring: radeon_ring structure holding ring information
2793 * @v: dword (dw) value to write
2794 *
2795 * Write a value to the requested ring buffer (all asics).
2796 */
2755static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) 2797static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
2756{ 2798{
2799 if (ring->count_dw <= 0)
2800 DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
2801
2757 ring->ring[ring->wptr++] = v; 2802 ring->ring[ring->wptr++] = v;
2758 ring->wptr &= ring->ptr_mask; 2803 ring->wptr &= ring->ptr_mask;
2759 ring->count_dw--; 2804 ring->count_dw--;
2760 ring->ring_free_dw--; 2805 ring->ring_free_dw--;
2761} 2806}
2762#else
2763/* With debugging this is just too big to inline */
2764void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2765#endif
2766 2807
2767/* 2808/*
2768 * ASICs macro. 2809 * ASICs macro.
@@ -2801,9 +2842,9 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2801#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m)) 2842#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
2802#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence)) 2843#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
2803#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) 2844#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
2804#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) 2845#define radeon_copy_blit(rdev, s, d, np, resv) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv))
2805#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f)) 2846#define radeon_copy_dma(rdev, s, d, np, resv) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv))
2806#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f)) 2847#define radeon_copy(rdev, s, d, np, resv) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv))
2807#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index 2848#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
2808#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index 2849#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
2809#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index 2850#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
@@ -2877,6 +2918,10 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl
2877extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 2918extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
2878extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); 2919extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
2879extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); 2920extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
2921extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2922 uint32_t flags);
2923extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm);
2924extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm);
2880extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); 2925extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
2881extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 2926extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
2882extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon); 2927extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
@@ -2934,10 +2979,10 @@ struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
2934struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev); 2979struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
2935void r600_audio_enable(struct radeon_device *rdev, 2980void r600_audio_enable(struct radeon_device *rdev,
2936 struct r600_audio_pin *pin, 2981 struct r600_audio_pin *pin,
2937 bool enable); 2982 u8 enable_mask);
2938void dce6_audio_enable(struct radeon_device *rdev, 2983void dce6_audio_enable(struct radeon_device *rdev,
2939 struct r600_audio_pin *pin, 2984 struct r600_audio_pin *pin,
2940 bool enable); 2985 u8 enable_mask);
2941 2986
2942/* 2987/*
2943 * R600 vram scratch functions 2988 * R600 vram scratch functions
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 2dd5847f9b98..850de57069be 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -963,6 +963,19 @@ static struct radeon_asic r600_asic = {
963 }, 963 },
964}; 964};
965 965
966static struct radeon_asic_ring rv6xx_uvd_ring = {
967 .ib_execute = &uvd_v1_0_ib_execute,
968 .emit_fence = &uvd_v1_0_fence_emit,
969 .emit_semaphore = &uvd_v1_0_semaphore_emit,
970 .cs_parse = &radeon_uvd_cs_parse,
971 .ring_test = &uvd_v1_0_ring_test,
972 .ib_test = &uvd_v1_0_ib_test,
973 .is_lockup = &radeon_ring_test_lockup,
974 .get_rptr = &uvd_v1_0_get_rptr,
975 .get_wptr = &uvd_v1_0_get_wptr,
976 .set_wptr = &uvd_v1_0_set_wptr,
977};
978
966static struct radeon_asic rv6xx_asic = { 979static struct radeon_asic rv6xx_asic = {
967 .init = &r600_init, 980 .init = &r600_init,
968 .fini = &r600_fini, 981 .fini = &r600_fini,
@@ -982,6 +995,7 @@ static struct radeon_asic rv6xx_asic = {
982 .ring = { 995 .ring = {
983 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring, 996 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
984 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring, 997 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
998 [R600_RING_TYPE_UVD_INDEX] = &rv6xx_uvd_ring,
985 }, 999 },
986 .irq = { 1000 .irq = {
987 .set = &r600_irq_set, 1001 .set = &r600_irq_set,
@@ -1072,6 +1086,7 @@ static struct radeon_asic rs780_asic = {
1072 .ring = { 1086 .ring = {
1073 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring, 1087 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
1074 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring, 1088 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
1089 [R600_RING_TYPE_UVD_INDEX] = &rv6xx_uvd_ring,
1075 }, 1090 },
1076 .irq = { 1091 .irq = {
1077 .set = &r600_irq_set, 1092 .set = &r600_irq_set,
@@ -2296,7 +2311,15 @@ int radeon_asic_init(struct radeon_device *rdev)
2296 case CHIP_RS780: 2311 case CHIP_RS780:
2297 case CHIP_RS880: 2312 case CHIP_RS880:
2298 rdev->asic = &rs780_asic; 2313 rdev->asic = &rs780_asic;
2299 rdev->has_uvd = true; 2314 /* 760G/780V/880V don't have UVD */
2315 if ((rdev->pdev->device == 0x9616)||
2316 (rdev->pdev->device == 0x9611)||
2317 (rdev->pdev->device == 0x9613)||
2318 (rdev->pdev->device == 0x9711)||
2319 (rdev->pdev->device == 0x9713))
2320 rdev->has_uvd = false;
2321 else
2322 rdev->has_uvd = true;
2300 break; 2323 break;
2301 case CHIP_RV770: 2324 case CHIP_RV770:
2302 case CHIP_RV730: 2325 case CHIP_RV730:
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 7756bc1e1cd3..d8ace5b28a5b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -81,11 +81,11 @@ bool r100_semaphore_ring_emit(struct radeon_device *rdev,
81int r100_cs_parse(struct radeon_cs_parser *p); 81int r100_cs_parse(struct radeon_cs_parser *p);
82void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 82void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
83uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg); 83uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
84int r100_copy_blit(struct radeon_device *rdev, 84struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
85 uint64_t src_offset, 85 uint64_t src_offset,
86 uint64_t dst_offset, 86 uint64_t dst_offset,
87 unsigned num_gpu_pages, 87 unsigned num_gpu_pages,
88 struct radeon_fence **fence); 88 struct reservation_object *resv);
89int r100_set_surface_reg(struct radeon_device *rdev, int reg, 89int r100_set_surface_reg(struct radeon_device *rdev, int reg,
90 uint32_t tiling_flags, uint32_t pitch, 90 uint32_t tiling_flags, uint32_t pitch,
91 uint32_t offset, uint32_t obj_size); 91 uint32_t offset, uint32_t obj_size);
@@ -152,11 +152,11 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
152/* 152/*
153 * r200,rv250,rs300,rv280 153 * r200,rv250,rs300,rv280
154 */ 154 */
155extern int r200_copy_dma(struct radeon_device *rdev, 155struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
156 uint64_t src_offset, 156 uint64_t src_offset,
157 uint64_t dst_offset, 157 uint64_t dst_offset,
158 unsigned num_gpu_pages, 158 unsigned num_gpu_pages,
159 struct radeon_fence **fence); 159 struct reservation_object *resv);
160void r200_set_safe_registers(struct radeon_device *rdev); 160void r200_set_safe_registers(struct radeon_device *rdev);
161 161
162/* 162/*
@@ -340,12 +340,14 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
340void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 340void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
341int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 341int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
342int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 342int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
343int r600_copy_cpdma(struct radeon_device *rdev, 343struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
344 uint64_t src_offset, uint64_t dst_offset, 344 uint64_t src_offset, uint64_t dst_offset,
345 unsigned num_gpu_pages, struct radeon_fence **fence); 345 unsigned num_gpu_pages,
346int r600_copy_dma(struct radeon_device *rdev, 346 struct reservation_object *resv);
347 uint64_t src_offset, uint64_t dst_offset, 347struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
348 unsigned num_gpu_pages, struct radeon_fence **fence); 348 uint64_t src_offset, uint64_t dst_offset,
349 unsigned num_gpu_pages,
350 struct reservation_object *resv);
349void r600_hpd_init(struct radeon_device *rdev); 351void r600_hpd_init(struct radeon_device *rdev);
350void r600_hpd_fini(struct radeon_device *rdev); 352void r600_hpd_fini(struct radeon_device *rdev);
351bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 353bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -389,7 +391,6 @@ void r600_disable_interrupts(struct radeon_device *rdev);
389void r600_rlc_stop(struct radeon_device *rdev); 391void r600_rlc_stop(struct radeon_device *rdev);
390/* r600 audio */ 392/* r600 audio */
391int r600_audio_init(struct radeon_device *rdev); 393int r600_audio_init(struct radeon_device *rdev);
392struct r600_audio_pin r600_audio_status(struct radeon_device *rdev);
393void r600_audio_fini(struct radeon_device *rdev); 394void r600_audio_fini(struct radeon_device *rdev);
394void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock); 395void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
395void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, 396void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
@@ -461,10 +462,10 @@ bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc);
461void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 462void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
462void r700_cp_stop(struct radeon_device *rdev); 463void r700_cp_stop(struct radeon_device *rdev);
463void r700_cp_fini(struct radeon_device *rdev); 464void r700_cp_fini(struct radeon_device *rdev);
464int rv770_copy_dma(struct radeon_device *rdev, 465struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
465 uint64_t src_offset, uint64_t dst_offset, 466 uint64_t src_offset, uint64_t dst_offset,
466 unsigned num_gpu_pages, 467 unsigned num_gpu_pages,
467 struct radeon_fence **fence); 468 struct reservation_object *resv);
468u32 rv770_get_xclk(struct radeon_device *rdev); 469u32 rv770_get_xclk(struct radeon_device *rdev);
469int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 470int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
470int rv770_get_temp(struct radeon_device *rdev); 471int rv770_get_temp(struct radeon_device *rdev);
@@ -535,10 +536,10 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
535 struct radeon_fence *fence); 536 struct radeon_fence *fence);
536void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, 537void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
537 struct radeon_ib *ib); 538 struct radeon_ib *ib);
538int evergreen_copy_dma(struct radeon_device *rdev, 539struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
539 uint64_t src_offset, uint64_t dst_offset, 540 uint64_t src_offset, uint64_t dst_offset,
540 unsigned num_gpu_pages, 541 unsigned num_gpu_pages,
541 struct radeon_fence **fence); 542 struct reservation_object *resv);
542void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); 543void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
543void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 544void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
544int evergreen_get_temp(struct radeon_device *rdev); 545int evergreen_get_temp(struct radeon_device *rdev);
@@ -700,10 +701,10 @@ int si_vm_init(struct radeon_device *rdev);
700void si_vm_fini(struct radeon_device *rdev); 701void si_vm_fini(struct radeon_device *rdev);
701void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 702void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
702int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 703int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
703int si_copy_dma(struct radeon_device *rdev, 704struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
704 uint64_t src_offset, uint64_t dst_offset, 705 uint64_t src_offset, uint64_t dst_offset,
705 unsigned num_gpu_pages, 706 unsigned num_gpu_pages,
706 struct radeon_fence **fence); 707 struct reservation_object *resv);
707 708
708void si_dma_vm_copy_pages(struct radeon_device *rdev, 709void si_dma_vm_copy_pages(struct radeon_device *rdev,
709 struct radeon_ib *ib, 710 struct radeon_ib *ib,
@@ -759,14 +760,14 @@ bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
759 struct radeon_semaphore *semaphore, 760 struct radeon_semaphore *semaphore,
760 bool emit_wait); 761 bool emit_wait);
761void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 762void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
762int cik_copy_dma(struct radeon_device *rdev, 763struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
763 uint64_t src_offset, uint64_t dst_offset, 764 uint64_t src_offset, uint64_t dst_offset,
764 unsigned num_gpu_pages, 765 unsigned num_gpu_pages,
765 struct radeon_fence **fence); 766 struct reservation_object *resv);
766int cik_copy_cpdma(struct radeon_device *rdev, 767struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
767 uint64_t src_offset, uint64_t dst_offset, 768 uint64_t src_offset, uint64_t dst_offset,
768 unsigned num_gpu_pages, 769 unsigned num_gpu_pages,
769 struct radeon_fence **fence); 770 struct reservation_object *resv);
770int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); 771int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
771int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 772int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
772bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 773bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
@@ -882,6 +883,7 @@ uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
882 struct radeon_ring *ring); 883 struct radeon_ring *ring);
883void uvd_v1_0_set_wptr(struct radeon_device *rdev, 884void uvd_v1_0_set_wptr(struct radeon_device *rdev,
884 struct radeon_ring *ring); 885 struct radeon_ring *ring);
886int uvd_v1_0_resume(struct radeon_device *rdev);
885 887
886int uvd_v1_0_init(struct radeon_device *rdev); 888int uvd_v1_0_init(struct radeon_device *rdev);
887void uvd_v1_0_fini(struct radeon_device *rdev); 889void uvd_v1_0_fini(struct radeon_device *rdev);
@@ -889,6 +891,8 @@ int uvd_v1_0_start(struct radeon_device *rdev);
889void uvd_v1_0_stop(struct radeon_device *rdev); 891void uvd_v1_0_stop(struct radeon_device *rdev);
890 892
891int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); 893int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
894void uvd_v1_0_fence_emit(struct radeon_device *rdev,
895 struct radeon_fence *fence);
892int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 896int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
893bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, 897bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
894 struct radeon_ring *ring, 898 struct radeon_ring *ring,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index e74c7e387dde..df69b92ba164 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -458,7 +458,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
458 return true; 458 return true;
459} 459}
460 460
461const int supported_devices_connector_convert[] = { 461static const int supported_devices_connector_convert[] = {
462 DRM_MODE_CONNECTOR_Unknown, 462 DRM_MODE_CONNECTOR_Unknown,
463 DRM_MODE_CONNECTOR_VGA, 463 DRM_MODE_CONNECTOR_VGA,
464 DRM_MODE_CONNECTOR_DVII, 464 DRM_MODE_CONNECTOR_DVII,
@@ -477,7 +477,7 @@ const int supported_devices_connector_convert[] = {
477 DRM_MODE_CONNECTOR_DisplayPort 477 DRM_MODE_CONNECTOR_DisplayPort
478}; 478};
479 479
480const uint16_t supported_devices_connector_object_id_convert[] = { 480static const uint16_t supported_devices_connector_object_id_convert[] = {
481 CONNECTOR_OBJECT_ID_NONE, 481 CONNECTOR_OBJECT_ID_NONE,
482 CONNECTOR_OBJECT_ID_VGA, 482 CONNECTOR_OBJECT_ID_VGA,
483 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */ 483 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */
@@ -494,7 +494,7 @@ const uint16_t supported_devices_connector_object_id_convert[] = {
494 CONNECTOR_OBJECT_ID_SVIDEO 494 CONNECTOR_OBJECT_ID_SVIDEO
495}; 495};
496 496
497const int object_connector_convert[] = { 497static const int object_connector_convert[] = {
498 DRM_MODE_CONNECTOR_Unknown, 498 DRM_MODE_CONNECTOR_Unknown,
499 DRM_MODE_CONNECTOR_DVII, 499 DRM_MODE_CONNECTOR_DVII,
500 DRM_MODE_CONNECTOR_DVII, 500 DRM_MODE_CONNECTOR_DVII,
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 69f5695bdab9..9e7f23dd14bd 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -45,33 +45,29 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
45 for (i = 0; i < n; i++) { 45 for (i = 0; i < n; i++) {
46 switch (flag) { 46 switch (flag) {
47 case RADEON_BENCHMARK_COPY_DMA: 47 case RADEON_BENCHMARK_COPY_DMA:
48 r = radeon_copy_dma(rdev, saddr, daddr, 48 fence = radeon_copy_dma(rdev, saddr, daddr,
49 size / RADEON_GPU_PAGE_SIZE, 49 size / RADEON_GPU_PAGE_SIZE,
50 &fence); 50 NULL);
51 break; 51 break;
52 case RADEON_BENCHMARK_COPY_BLIT: 52 case RADEON_BENCHMARK_COPY_BLIT:
53 r = radeon_copy_blit(rdev, saddr, daddr, 53 fence = radeon_copy_blit(rdev, saddr, daddr,
54 size / RADEON_GPU_PAGE_SIZE, 54 size / RADEON_GPU_PAGE_SIZE,
55 &fence); 55 NULL);
56 break; 56 break;
57 default: 57 default:
58 DRM_ERROR("Unknown copy method\n"); 58 DRM_ERROR("Unknown copy method\n");
59 r = -EINVAL; 59 return -EINVAL;
60 } 60 }
61 if (r) 61 if (IS_ERR(fence))
62 goto exit_do_move; 62 return PTR_ERR(fence);
63
63 r = radeon_fence_wait(fence, false); 64 r = radeon_fence_wait(fence, false);
64 if (r)
65 goto exit_do_move;
66 radeon_fence_unref(&fence); 65 radeon_fence_unref(&fence);
66 if (r)
67 return r;
67 } 68 }
68 end_jiffies = jiffies; 69 end_jiffies = jiffies;
69 r = jiffies_to_msecs(end_jiffies - start_jiffies); 70 return jiffies_to_msecs(end_jiffies - start_jiffies);
70
71exit_do_move:
72 if (fence)
73 radeon_fence_unref(&fence);
74 return r;
75} 71}
76 72
77 73
@@ -97,7 +93,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
97 int time; 93 int time;
98 94
99 n = RADEON_BENCHMARK_ITERATIONS; 95 n = RADEON_BENCHMARK_ITERATIONS;
100 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj); 96 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, NULL, &sobj);
101 if (r) { 97 if (r) {
102 goto out_cleanup; 98 goto out_cleanup;
103 } 99 }
@@ -109,7 +105,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
109 if (r) { 105 if (r) {
110 goto out_cleanup; 106 goto out_cleanup;
111 } 107 }
112 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj); 108 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, NULL, &dobj);
113 if (r) { 109 if (r) {
114 goto out_cleanup; 110 goto out_cleanup;
115 } 111 }
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 6651177110f0..3e5f6b71f3ad 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -116,7 +116,7 @@ enum radeon_combios_connector {
116 CONNECTOR_UNSUPPORTED_LEGACY 116 CONNECTOR_UNSUPPORTED_LEGACY
117}; 117};
118 118
119const int legacy_connector_convert[] = { 119static const int legacy_connector_convert[] = {
120 DRM_MODE_CONNECTOR_Unknown, 120 DRM_MODE_CONNECTOR_Unknown,
121 DRM_MODE_CONNECTOR_DVID, 121 DRM_MODE_CONNECTOR_DVID,
122 DRM_MODE_CONNECTOR_VGA, 122 DRM_MODE_CONNECTOR_VGA,
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index bb0d5c3a8311..ea134a7d51a5 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1298,27 +1298,27 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1298 dev_priv->buffers_offset = init->buffers_offset; 1298 dev_priv->buffers_offset = init->buffers_offset;
1299 dev_priv->gart_textures_offset = init->gart_textures_offset; 1299 dev_priv->gart_textures_offset = init->gart_textures_offset;
1300 1300
1301 master_priv->sarea = drm_getsarea(dev); 1301 master_priv->sarea = drm_legacy_getsarea(dev);
1302 if (!master_priv->sarea) { 1302 if (!master_priv->sarea) {
1303 DRM_ERROR("could not find sarea!\n"); 1303 DRM_ERROR("could not find sarea!\n");
1304 radeon_do_cleanup_cp(dev); 1304 radeon_do_cleanup_cp(dev);
1305 return -EINVAL; 1305 return -EINVAL;
1306 } 1306 }
1307 1307
1308 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); 1308 dev_priv->cp_ring = drm_legacy_findmap(dev, init->ring_offset);
1309 if (!dev_priv->cp_ring) { 1309 if (!dev_priv->cp_ring) {
1310 DRM_ERROR("could not find cp ring region!\n"); 1310 DRM_ERROR("could not find cp ring region!\n");
1311 radeon_do_cleanup_cp(dev); 1311 radeon_do_cleanup_cp(dev);
1312 return -EINVAL; 1312 return -EINVAL;
1313 } 1313 }
1314 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 1314 dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset);
1315 if (!dev_priv->ring_rptr) { 1315 if (!dev_priv->ring_rptr) {
1316 DRM_ERROR("could not find ring read pointer!\n"); 1316 DRM_ERROR("could not find ring read pointer!\n");
1317 radeon_do_cleanup_cp(dev); 1317 radeon_do_cleanup_cp(dev);
1318 return -EINVAL; 1318 return -EINVAL;
1319 } 1319 }
1320 dev->agp_buffer_token = init->buffers_offset; 1320 dev->agp_buffer_token = init->buffers_offset;
1321 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 1321 dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset);
1322 if (!dev->agp_buffer_map) { 1322 if (!dev->agp_buffer_map) {
1323 DRM_ERROR("could not find dma buffer region!\n"); 1323 DRM_ERROR("could not find dma buffer region!\n");
1324 radeon_do_cleanup_cp(dev); 1324 radeon_do_cleanup_cp(dev);
@@ -1327,7 +1327,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1327 1327
1328 if (init->gart_textures_offset) { 1328 if (init->gart_textures_offset) {
1329 dev_priv->gart_textures = 1329 dev_priv->gart_textures =
1330 drm_core_findmap(dev, init->gart_textures_offset); 1330 drm_legacy_findmap(dev, init->gart_textures_offset);
1331 if (!dev_priv->gart_textures) { 1331 if (!dev_priv->gart_textures) {
1332 DRM_ERROR("could not find GART texture region!\n"); 1332 DRM_ERROR("could not find GART texture region!\n");
1333 radeon_do_cleanup_cp(dev); 1333 radeon_do_cleanup_cp(dev);
@@ -1337,9 +1337,9 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1337 1337
1338#if __OS_HAS_AGP 1338#if __OS_HAS_AGP
1339 if (dev_priv->flags & RADEON_IS_AGP) { 1339 if (dev_priv->flags & RADEON_IS_AGP) {
1340 drm_core_ioremap_wc(dev_priv->cp_ring, dev); 1340 drm_legacy_ioremap_wc(dev_priv->cp_ring, dev);
1341 drm_core_ioremap_wc(dev_priv->ring_rptr, dev); 1341 drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev);
1342 drm_core_ioremap_wc(dev->agp_buffer_map, dev); 1342 drm_legacy_ioremap_wc(dev->agp_buffer_map, dev);
1343 if (!dev_priv->cp_ring->handle || 1343 if (!dev_priv->cp_ring->handle ||
1344 !dev_priv->ring_rptr->handle || 1344 !dev_priv->ring_rptr->handle ||
1345 !dev->agp_buffer_map->handle) { 1345 !dev->agp_buffer_map->handle) {
@@ -1475,7 +1475,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1475 dev_priv->gart_info.mapping.size = 1475 dev_priv->gart_info.mapping.size =
1476 dev_priv->gart_info.table_size; 1476 dev_priv->gart_info.table_size;
1477 1477
1478 drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); 1478 drm_legacy_ioremap_wc(&dev_priv->gart_info.mapping, dev);
1479 dev_priv->gart_info.addr = 1479 dev_priv->gart_info.addr =
1480 dev_priv->gart_info.mapping.handle; 1480 dev_priv->gart_info.mapping.handle;
1481 1481
@@ -1569,15 +1569,15 @@ static int radeon_do_cleanup_cp(struct drm_device * dev)
1569#if __OS_HAS_AGP 1569#if __OS_HAS_AGP
1570 if (dev_priv->flags & RADEON_IS_AGP) { 1570 if (dev_priv->flags & RADEON_IS_AGP) {
1571 if (dev_priv->cp_ring != NULL) { 1571 if (dev_priv->cp_ring != NULL) {
1572 drm_core_ioremapfree(dev_priv->cp_ring, dev); 1572 drm_legacy_ioremapfree(dev_priv->cp_ring, dev);
1573 dev_priv->cp_ring = NULL; 1573 dev_priv->cp_ring = NULL;
1574 } 1574 }
1575 if (dev_priv->ring_rptr != NULL) { 1575 if (dev_priv->ring_rptr != NULL) {
1576 drm_core_ioremapfree(dev_priv->ring_rptr, dev); 1576 drm_legacy_ioremapfree(dev_priv->ring_rptr, dev);
1577 dev_priv->ring_rptr = NULL; 1577 dev_priv->ring_rptr = NULL;
1578 } 1578 }
1579 if (dev->agp_buffer_map != NULL) { 1579 if (dev->agp_buffer_map != NULL) {
1580 drm_core_ioremapfree(dev->agp_buffer_map, dev); 1580 drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
1581 dev->agp_buffer_map = NULL; 1581 dev->agp_buffer_map = NULL;
1582 } 1582 }
1583 } else 1583 } else
@@ -1597,7 +1597,7 @@ static int radeon_do_cleanup_cp(struct drm_device * dev)
1597 1597
1598 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) 1598 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
1599 { 1599 {
1600 drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); 1600 drm_legacy_ioremapfree(&dev_priv->gart_info.mapping, dev);
1601 dev_priv->gart_info.addr = NULL; 1601 dev_priv->gart_info.addr = NULL;
1602 } 1602 }
1603 } 1603 }
@@ -2106,9 +2106,9 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
2106 else 2106 else
2107 dev_priv->flags |= RADEON_IS_PCI; 2107 dev_priv->flags |= RADEON_IS_PCI;
2108 2108
2109 ret = drm_addmap(dev, pci_resource_start(dev->pdev, 2), 2109 ret = drm_legacy_addmap(dev, pci_resource_start(dev->pdev, 2),
2110 pci_resource_len(dev->pdev, 2), _DRM_REGISTERS, 2110 pci_resource_len(dev->pdev, 2), _DRM_REGISTERS,
2111 _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio); 2111 _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
2112 if (ret != 0) 2112 if (ret != 0)
2113 return ret; 2113 return ret;
2114 2114
@@ -2135,8 +2135,8 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
2135 2135
2136 /* prebuild the SAREA */ 2136 /* prebuild the SAREA */
2137 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE); 2137 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
2138 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, 2138 ret = drm_legacy_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
2139 &master_priv->sarea); 2139 &master_priv->sarea);
2140 if (ret) { 2140 if (ret) {
2141 DRM_ERROR("SAREA setup failed\n"); 2141 DRM_ERROR("SAREA setup failed\n");
2142 kfree(master_priv); 2142 kfree(master_priv);
@@ -2162,7 +2162,7 @@ void radeon_master_destroy(struct drm_device *dev, struct drm_master *master)
2162 2162
2163 master_priv->sarea_priv = NULL; 2163 master_priv->sarea_priv = NULL;
2164 if (master_priv->sarea) 2164 if (master_priv->sarea)
2165 drm_rmmap_locked(dev, master_priv->sarea); 2165 drm_legacy_rmmap_locked(dev, master_priv->sarea);
2166 2166
2167 kfree(master_priv); 2167 kfree(master_priv);
2168 2168
@@ -2181,9 +2181,9 @@ int radeon_driver_firstopen(struct drm_device *dev)
2181 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; 2181 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
2182 2182
2183 dev_priv->fb_aper_offset = pci_resource_start(dev->pdev, 0); 2183 dev_priv->fb_aper_offset = pci_resource_start(dev->pdev, 0);
2184 ret = drm_addmap(dev, dev_priv->fb_aper_offset, 2184 ret = drm_legacy_addmap(dev, dev_priv->fb_aper_offset,
2185 pci_resource_len(dev->pdev, 0), _DRM_FRAME_BUFFER, 2185 pci_resource_len(dev->pdev, 0),
2186 _DRM_WRITE_COMBINING, &map); 2186 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &map);
2187 if (ret != 0) 2187 if (ret != 0)
2188 return ret; 2188 return ret;
2189 2189
@@ -2196,7 +2196,7 @@ int radeon_driver_unload(struct drm_device *dev)
2196 2196
2197 DRM_DEBUG("\n"); 2197 DRM_DEBUG("\n");
2198 2198
2199 drm_rmmap(dev, dev_priv->mmio); 2199 drm_legacy_rmmap(dev, dev_priv->mmio);
2200 2200
2201 kfree(dev_priv); 2201 kfree(dev_priv);
2202 2202
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 83f382e8e40e..1c893447d7cd 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -78,7 +78,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
78 struct radeon_cs_chunk *chunk; 78 struct radeon_cs_chunk *chunk;
79 struct radeon_cs_buckets buckets; 79 struct radeon_cs_buckets buckets;
80 unsigned i, j; 80 unsigned i, j;
81 bool duplicate; 81 bool duplicate, need_mmap_lock = false;
82 int r;
82 83
83 if (p->chunk_relocs_idx == -1) { 84 if (p->chunk_relocs_idx == -1) {
84 return 0; 85 return 0;
@@ -136,10 +137,13 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
136 + !!r->write_domain; 137 + !!r->write_domain;
137 138
138 /* the first reloc of an UVD job is the msg and that must be in 139 /* the first reloc of an UVD job is the msg and that must be in
139 VRAM, also but everything into VRAM on AGP cards to avoid 140 VRAM, also but everything into VRAM on AGP cards and older
140 image corruptions */ 141 IGP chips to avoid image corruptions */
141 if (p->ring == R600_RING_TYPE_UVD_INDEX && 142 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
142 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { 143 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
144 p->rdev->family == CHIP_RS780 ||
145 p->rdev->family == CHIP_RS880)) {
146
143 /* TODO: is this still needed for NI+ ? */ 147 /* TODO: is this still needed for NI+ ? */
144 p->relocs[i].prefered_domains = 148 p->relocs[i].prefered_domains =
145 RADEON_GEM_DOMAIN_VRAM; 149 RADEON_GEM_DOMAIN_VRAM;
@@ -165,7 +169,21 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
165 p->relocs[i].allowed_domains = domain; 169 p->relocs[i].allowed_domains = domain;
166 } 170 }
167 171
172 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
173 uint32_t domain = p->relocs[i].prefered_domains;
174 if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
175 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
176 "allowed for userptr BOs\n");
177 return -EINVAL;
178 }
179 need_mmap_lock = true;
180 domain = RADEON_GEM_DOMAIN_GTT;
181 p->relocs[i].prefered_domains = domain;
182 p->relocs[i].allowed_domains = domain;
183 }
184
168 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; 185 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
186 p->relocs[i].tv.shared = !r->write_domain;
169 p->relocs[i].handle = r->handle; 187 p->relocs[i].handle = r->handle;
170 188
171 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, 189 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
@@ -177,8 +195,15 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
177 if (p->cs_flags & RADEON_CS_USE_VM) 195 if (p->cs_flags & RADEON_CS_USE_VM)
178 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, 196 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
179 &p->validated); 197 &p->validated);
198 if (need_mmap_lock)
199 down_read(&current->mm->mmap_sem);
180 200
181 return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); 201 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
202
203 if (need_mmap_lock)
204 up_read(&current->mm->mmap_sem);
205
206 return r;
182} 207}
183 208
184static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) 209static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
@@ -224,17 +249,24 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
224 return 0; 249 return 0;
225} 250}
226 251
227static void radeon_cs_sync_rings(struct radeon_cs_parser *p) 252static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
228{ 253{
229 int i; 254 int i, r = 0;
230 255
231 for (i = 0; i < p->nrelocs; i++) { 256 for (i = 0; i < p->nrelocs; i++) {
257 struct reservation_object *resv;
258
232 if (!p->relocs[i].robj) 259 if (!p->relocs[i].robj)
233 continue; 260 continue;
234 261
235 radeon_semaphore_sync_to(p->ib.semaphore, 262 resv = p->relocs[i].robj->tbo.resv;
236 p->relocs[i].robj->tbo.sync_obj); 263 r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
264 p->relocs[i].tv.shared);
265
266 if (r)
267 break;
237 } 268 }
269 return r;
238} 270}
239 271
240/* XXX: note that this is called from the legacy UMS CS ioctl as well */ 272/* XXX: note that this is called from the legacy UMS CS ioctl as well */
@@ -403,7 +435,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
403 435
404 ttm_eu_fence_buffer_objects(&parser->ticket, 436 ttm_eu_fence_buffer_objects(&parser->ticket,
405 &parser->validated, 437 &parser->validated,
406 parser->ib.fence); 438 &parser->ib.fence->base);
407 } else if (backoff) { 439 } else if (backoff) {
408 ttm_eu_backoff_reservation(&parser->ticket, 440 ttm_eu_backoff_reservation(&parser->ticket,
409 &parser->validated); 441 &parser->validated);
@@ -444,13 +476,19 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
444 return r; 476 return r;
445 } 477 }
446 478
479 r = radeon_cs_sync_rings(parser);
480 if (r) {
481 if (r != -ERESTARTSYS)
482 DRM_ERROR("Failed to sync rings: %i\n", r);
483 return r;
484 }
485
447 if (parser->ring == R600_RING_TYPE_UVD_INDEX) 486 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
448 radeon_uvd_note_usage(rdev); 487 radeon_uvd_note_usage(rdev);
449 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) || 488 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
450 (parser->ring == TN_RING_TYPE_VCE2_INDEX)) 489 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
451 radeon_vce_note_usage(rdev); 490 radeon_vce_note_usage(rdev);
452 491
453 radeon_cs_sync_rings(parser);
454 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); 492 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
455 if (r) { 493 if (r) {
456 DRM_ERROR("Failed to schedule IB !\n"); 494 DRM_ERROR("Failed to schedule IB !\n");
@@ -537,8 +575,14 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
537 if (r) { 575 if (r) {
538 goto out; 576 goto out;
539 } 577 }
540 radeon_cs_sync_rings(parser); 578
541 radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence); 579 r = radeon_cs_sync_rings(parser);
580 if (r) {
581 if (r != -ERESTARTSYS)
582 DRM_ERROR("Failed to sync rings: %i\n", r);
583 goto out;
584 }
585 radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
542 586
543 if ((rdev->family >= CHIP_TAHITI) && 587 if ((rdev->family >= CHIP_TAHITI) &&
544 (parser->chunk_const_ib_idx != -1)) { 588 (parser->chunk_const_ib_idx != -1)) {
@@ -629,6 +673,13 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
629 up_read(&rdev->exclusive_lock); 673 up_read(&rdev->exclusive_lock);
630 return -EBUSY; 674 return -EBUSY;
631 } 675 }
676 if (rdev->in_reset) {
677 up_read(&rdev->exclusive_lock);
678 r = radeon_gpu_reset(rdev);
679 if (!r)
680 r = -EAGAIN;
681 return r;
682 }
632 /* initialize parser */ 683 /* initialize parser */
633 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 684 memset(&parser, 0, sizeof(struct radeon_cs_parser));
634 parser.filp = filp; 685 parser.filp = filp;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 12c8329644c4..f41cc1538e48 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -434,7 +434,7 @@ int radeon_wb_init(struct radeon_device *rdev)
434 434
435 if (rdev->wb.wb_obj == NULL) { 435 if (rdev->wb.wb_obj == NULL) {
436 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 436 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
437 RADEON_GEM_DOMAIN_GTT, 0, NULL, 437 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
438 &rdev->wb.wb_obj); 438 &rdev->wb.wb_obj);
439 if (r) { 439 if (r) {
440 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 440 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
@@ -1257,6 +1257,7 @@ int radeon_device_init(struct radeon_device *rdev,
1257 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1257 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1258 rdev->ring[i].idx = i; 1258 rdev->ring[i].idx = i;
1259 } 1259 }
1260 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
1260 1261
1261 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 1262 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1262 radeon_family_name[rdev->family], pdev->vendor, pdev->device, 1263 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
@@ -1274,6 +1275,8 @@ int radeon_device_init(struct radeon_device *rdev,
1274 init_rwsem(&rdev->pm.mclk_lock); 1275 init_rwsem(&rdev->pm.mclk_lock);
1275 init_rwsem(&rdev->exclusive_lock); 1276 init_rwsem(&rdev->exclusive_lock);
1276 init_waitqueue_head(&rdev->irq.vblank_queue); 1277 init_waitqueue_head(&rdev->irq.vblank_queue);
1278 mutex_init(&rdev->mn_lock);
1279 hash_init(rdev->mn_hash);
1277 r = radeon_gem_init(rdev); 1280 r = radeon_gem_init(rdev);
1278 if (r) 1281 if (r)
1279 return r; 1282 return r;
@@ -1399,10 +1402,6 @@ int radeon_device_init(struct radeon_device *rdev,
1399 if (r) 1402 if (r)
1400 goto failed; 1403 goto failed;
1401 1404
1402 r = radeon_ib_ring_tests(rdev);
1403 if (r)
1404 DRM_ERROR("ib ring test failed (%d).\n", r);
1405
1406 r = radeon_gem_debugfs_init(rdev); 1405 r = radeon_gem_debugfs_init(rdev);
1407 if (r) { 1406 if (r) {
1408 DRM_ERROR("registering gem debugfs failed (%d).\n", r); 1407 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
@@ -1420,6 +1419,10 @@ int radeon_device_init(struct radeon_device *rdev,
1420 goto failed; 1419 goto failed;
1421 } 1420 }
1422 1421
1422 r = radeon_ib_ring_tests(rdev);
1423 if (r)
1424 DRM_ERROR("ib ring test failed (%d).\n", r);
1425
1423 if ((radeon_testing & 1)) { 1426 if ((radeon_testing & 1)) {
1424 if (rdev->accel_working) 1427 if (rdev->accel_working)
1425 radeon_test_moves(rdev); 1428 radeon_test_moves(rdev);
@@ -1497,7 +1500,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1497 struct drm_crtc *crtc; 1500 struct drm_crtc *crtc;
1498 struct drm_connector *connector; 1501 struct drm_connector *connector;
1499 int i, r; 1502 int i, r;
1500 bool force_completion = false;
1501 1503
1502 if (dev == NULL || dev->dev_private == NULL) { 1504 if (dev == NULL || dev->dev_private == NULL) {
1503 return -ENODEV; 1505 return -ENODEV;
@@ -1541,12 +1543,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1541 r = radeon_fence_wait_empty(rdev, i); 1543 r = radeon_fence_wait_empty(rdev, i);
1542 if (r) { 1544 if (r) {
1543 /* delay GPU reset to resume */ 1545 /* delay GPU reset to resume */
1544 force_completion = true; 1546 radeon_fence_driver_force_completion(rdev, i);
1545 } 1547 }
1546 } 1548 }
1547 if (force_completion) {
1548 radeon_fence_driver_force_completion(rdev);
1549 }
1550 1549
1551 radeon_save_bios_scratch_regs(rdev); 1550 radeon_save_bios_scratch_regs(rdev);
1552 1551
@@ -1686,8 +1685,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
1686 return 0; 1685 return 0;
1687 } 1686 }
1688 1687
1689 rdev->needs_reset = false;
1690
1691 radeon_save_bios_scratch_regs(rdev); 1688 radeon_save_bios_scratch_regs(rdev);
1692 /* block TTM */ 1689 /* block TTM */
1693 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1690 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -1704,7 +1701,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
1704 } 1701 }
1705 } 1702 }
1706 1703
1707retry:
1708 r = radeon_asic_reset(rdev); 1704 r = radeon_asic_reset(rdev);
1709 if (!r) { 1705 if (!r) {
1710 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n"); 1706 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
@@ -1713,26 +1709,12 @@ retry:
1713 1709
1714 radeon_restore_bios_scratch_regs(rdev); 1710 radeon_restore_bios_scratch_regs(rdev);
1715 1711
1716 if (!r) { 1712 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1717 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1713 if (!r && ring_data[i]) {
1718 radeon_ring_restore(rdev, &rdev->ring[i], 1714 radeon_ring_restore(rdev, &rdev->ring[i],
1719 ring_sizes[i], ring_data[i]); 1715 ring_sizes[i], ring_data[i]);
1720 ring_sizes[i] = 0; 1716 } else {
1721 ring_data[i] = NULL; 1717 radeon_fence_driver_force_completion(rdev, i);
1722 }
1723
1724 r = radeon_ib_ring_tests(rdev);
1725 if (r) {
1726 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1727 if (saved) {
1728 saved = false;
1729 radeon_suspend(rdev);
1730 goto retry;
1731 }
1732 }
1733 } else {
1734 radeon_fence_driver_force_completion(rdev);
1735 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1736 kfree(ring_data[i]); 1718 kfree(ring_data[i]);
1737 } 1719 }
1738 } 1720 }
@@ -1764,19 +1746,32 @@ retry:
1764 /* reset hpd state */ 1746 /* reset hpd state */
1765 radeon_hpd_init(rdev); 1747 radeon_hpd_init(rdev);
1766 1748
1749 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1750
1751 rdev->in_reset = true;
1752 rdev->needs_reset = false;
1753
1754 downgrade_write(&rdev->exclusive_lock);
1755
1767 drm_helper_resume_force_mode(rdev->ddev); 1756 drm_helper_resume_force_mode(rdev->ddev);
1768 1757
1769 /* set the power state here in case we are a PX system or headless */ 1758 /* set the power state here in case we are a PX system or headless */
1770 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) 1759 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1771 radeon_pm_compute_clocks(rdev); 1760 radeon_pm_compute_clocks(rdev);
1772 1761
1773 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1762 if (!r) {
1774 if (r) { 1763 r = radeon_ib_ring_tests(rdev);
1764 if (r && saved)
1765 r = -EAGAIN;
1766 } else {
1775 /* bad news, how to tell it to userspace ? */ 1767 /* bad news, how to tell it to userspace ? */
1776 dev_info(rdev->dev, "GPU reset failed\n"); 1768 dev_info(rdev->dev, "GPU reset failed\n");
1777 } 1769 }
1778 1770
1779 up_write(&rdev->exclusive_lock); 1771 rdev->needs_reset = r == -EAGAIN;
1772 rdev->in_reset = false;
1773
1774 up_read(&rdev->exclusive_lock);
1780 return r; 1775 return r;
1781} 1776}
1782 1777
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3fdf87318069..00ead8c2758a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -402,12 +402,21 @@ static void radeon_flip_work_func(struct work_struct *__work)
402 402
403 down_read(&rdev->exclusive_lock); 403 down_read(&rdev->exclusive_lock);
404 if (work->fence) { 404 if (work->fence) {
405 r = radeon_fence_wait(work->fence, false); 405 struct radeon_fence *fence;
406 if (r == -EDEADLK) { 406
407 up_read(&rdev->exclusive_lock); 407 fence = to_radeon_fence(work->fence);
408 r = radeon_gpu_reset(rdev); 408 if (fence && fence->rdev == rdev) {
409 down_read(&rdev->exclusive_lock); 409 r = radeon_fence_wait(fence, false);
410 } 410 if (r == -EDEADLK) {
411 up_read(&rdev->exclusive_lock);
412 do {
413 r = radeon_gpu_reset(rdev);
414 } while (r == -EAGAIN);
415 down_read(&rdev->exclusive_lock);
416 }
417 } else
418 r = fence_wait(work->fence, false);
419
411 if (r) 420 if (r)
412 DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); 421 DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
413 422
@@ -416,7 +425,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
416 * confused about which BO the CRTC is scanning out 425 * confused about which BO the CRTC is scanning out
417 */ 426 */
418 427
419 radeon_fence_unref(&work->fence); 428 fence_put(work->fence);
429 work->fence = NULL;
420 } 430 }
421 431
422 /* We borrow the event spin lock for protecting flip_status */ 432 /* We borrow the event spin lock for protecting flip_status */
@@ -474,11 +484,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
474 obj = new_radeon_fb->obj; 484 obj = new_radeon_fb->obj;
475 new_rbo = gem_to_radeon_bo(obj); 485 new_rbo = gem_to_radeon_bo(obj);
476 486
477 spin_lock(&new_rbo->tbo.bdev->fence_lock);
478 if (new_rbo->tbo.sync_obj)
479 work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
480 spin_unlock(&new_rbo->tbo.bdev->fence_lock);
481
482 /* pin the new buffer */ 487 /* pin the new buffer */
483 DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n", 488 DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
484 work->old_rbo, new_rbo); 489 work->old_rbo, new_rbo);
@@ -497,6 +502,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
497 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 502 DRM_ERROR("failed to pin new rbo buffer before flip\n");
498 goto cleanup; 503 goto cleanup;
499 } 504 }
505 work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
500 radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); 506 radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
501 radeon_bo_unreserve(new_rbo); 507 radeon_bo_unreserve(new_rbo);
502 508
@@ -578,9 +584,8 @@ pflip_cleanup:
578 584
579cleanup: 585cleanup:
580 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 586 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
581 radeon_fence_unref(&work->fence); 587 fence_put(work->fence);
582 kfree(work); 588 kfree(work);
583
584 return r; 589 return r;
585} 590}
586 591
@@ -1917,7 +1922,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
1917 1922
1918 /* In vblank? */ 1923 /* In vblank? */
1919 if (in_vbl) 1924 if (in_vbl)
1920 ret |= DRM_SCANOUTPOS_INVBL; 1925 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1921 1926
1922 /* Is vpos outside nominal vblank area, but less than 1927 /* Is vpos outside nominal vblank area, but less than
1923 * 1/100 of a frame height away from start of vblank? 1928 * 1/100 of a frame height away from start of vblank?
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index f9d17b29b343..dcffa30ee2db 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -38,6 +38,8 @@
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/pm_runtime.h> 39#include <linux/pm_runtime.h>
40#include <linux/vga_switcheroo.h> 40#include <linux/vga_switcheroo.h>
41#include <drm/drm_gem.h>
42
41#include "drm_crtc_helper.h" 43#include "drm_crtc_helper.h"
42/* 44/*
43 * KMS wrapper. 45 * KMS wrapper.
@@ -114,6 +116,9 @@ int radeon_gem_object_open(struct drm_gem_object *obj,
114 struct drm_file *file_priv); 116 struct drm_file *file_priv);
115void radeon_gem_object_close(struct drm_gem_object *obj, 117void radeon_gem_object_close(struct drm_gem_object *obj,
116 struct drm_file *file_priv); 118 struct drm_file *file_priv);
119struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
120 struct drm_gem_object *gobj,
121 int flags);
117extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 122extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
118 unsigned int flags, 123 unsigned int flags,
119 int *vpos, int *hpos, ktime_t *stime, 124 int *vpos, int *hpos, ktime_t *stime,
@@ -130,7 +135,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
130 struct drm_mode_create_dumb *args); 135 struct drm_mode_create_dumb *args);
131struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); 136struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
132struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, 137struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
133 size_t size, 138 struct dma_buf_attachment *,
134 struct sg_table *sg); 139 struct sg_table *sg);
135int radeon_gem_prime_pin(struct drm_gem_object *obj); 140int radeon_gem_prime_pin(struct drm_gem_object *obj);
136void radeon_gem_prime_unpin(struct drm_gem_object *obj); 141void radeon_gem_prime_unpin(struct drm_gem_object *obj);
@@ -309,7 +314,7 @@ static const struct file_operations radeon_driver_old_fops = {
309 .open = drm_open, 314 .open = drm_open,
310 .release = drm_release, 315 .release = drm_release,
311 .unlocked_ioctl = drm_ioctl, 316 .unlocked_ioctl = drm_ioctl,
312 .mmap = drm_mmap, 317 .mmap = drm_legacy_mmap,
313 .poll = drm_poll, 318 .poll = drm_poll,
314 .read = drm_read, 319 .read = drm_read,
315#ifdef CONFIG_COMPAT 320#ifdef CONFIG_COMPAT
@@ -329,6 +334,7 @@ static struct drm_driver driver_old = {
329 .preclose = radeon_driver_preclose, 334 .preclose = radeon_driver_preclose,
330 .postclose = radeon_driver_postclose, 335 .postclose = radeon_driver_postclose,
331 .lastclose = radeon_driver_lastclose, 336 .lastclose = radeon_driver_lastclose,
337 .set_busid = drm_pci_set_busid,
332 .unload = radeon_driver_unload, 338 .unload = radeon_driver_unload,
333 .suspend = radeon_suspend, 339 .suspend = radeon_suspend,
334 .resume = radeon_resume, 340 .resume = radeon_resume,
@@ -553,6 +559,7 @@ static struct drm_driver kms_driver = {
553 .preclose = radeon_driver_preclose_kms, 559 .preclose = radeon_driver_preclose_kms,
554 .postclose = radeon_driver_postclose_kms, 560 .postclose = radeon_driver_postclose_kms,
555 .lastclose = radeon_driver_lastclose_kms, 561 .lastclose = radeon_driver_lastclose_kms,
562 .set_busid = drm_pci_set_busid,
556 .unload = radeon_driver_unload_kms, 563 .unload = radeon_driver_unload_kms,
557 .get_vblank_counter = radeon_get_vblank_counter_kms, 564 .get_vblank_counter = radeon_get_vblank_counter_kms,
558 .enable_vblank = radeon_enable_vblank_kms, 565 .enable_vblank = radeon_enable_vblank_kms,
@@ -578,7 +585,7 @@ static struct drm_driver kms_driver = {
578 585
579 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 586 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
580 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 587 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
581 .gem_prime_export = drm_gem_prime_export, 588 .gem_prime_export = radeon_gem_prime_export,
582 .gem_prime_import = drm_gem_prime_import, 589 .gem_prime_import = drm_gem_prime_import,
583 .gem_prime_pin = radeon_gem_prime_pin, 590 .gem_prime_pin = radeon_gem_prime_pin,
584 .gem_prime_unpin = radeon_gem_prime_unpin, 591 .gem_prime_unpin = radeon_gem_prime_unpin,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index dafd812e4571..46bd3938282c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -33,7 +33,9 @@
33 33
34#include <linux/firmware.h> 34#include <linux/firmware.h>
35#include <linux/platform_device.h> 35#include <linux/platform_device.h>
36#include <drm/drm_legacy.h>
36 37
38#include <drm/ati_pcigart.h>
37#include "radeon_family.h" 39#include "radeon_family.h"
38 40
39/* General customization: 41/* General customization:
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 15edf23b465c..9a19e52cc655 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -410,3 +410,24 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
410 } 410 }
411} 411}
412 412
413bool radeon_encoder_is_digital(struct drm_encoder *encoder)
414{
415 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
416 switch (radeon_encoder->encoder_id) {
417 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
418 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
419 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
420 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
421 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
422 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
423 case ENCODER_OBJECT_ID_INTERNAL_DDI:
424 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
425 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
426 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
427 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
428 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
429 return true;
430 default:
431 return false;
432 }
433}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 94b0f2aa3d7c..0ea1db83d573 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -189,7 +189,8 @@ out_unref:
189static int radeonfb_create(struct drm_fb_helper *helper, 189static int radeonfb_create(struct drm_fb_helper *helper,
190 struct drm_fb_helper_surface_size *sizes) 190 struct drm_fb_helper_surface_size *sizes)
191{ 191{
192 struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper; 192 struct radeon_fbdev *rfbdev =
193 container_of(helper, struct radeon_fbdev, helper);
193 struct radeon_device *rdev = rfbdev->rdev; 194 struct radeon_device *rdev = rfbdev->rdev;
194 struct fb_info *info; 195 struct fb_info *info;
195 struct drm_framebuffer *fb = NULL; 196 struct drm_framebuffer *fb = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 913787085dfa..995167025282 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -98,6 +98,25 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
98} 98}
99 99
100/** 100/**
101 * radeon_fence_schedule_check - schedule lockup check
102 *
103 * @rdev: radeon_device pointer
104 * @ring: ring index we should work with
105 *
106 * Queues a delayed work item to check for lockups.
107 */
108static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
109{
110 /*
111 * Do not reset the timer here with mod_delayed_work,
112 * this can livelock in an interaction with TTM delayed destroy.
113 */
114 queue_delayed_work(system_power_efficient_wq,
115 &rdev->fence_drv[ring].lockup_work,
116 RADEON_FENCE_JIFFIES_TIMEOUT);
117}
118
119/**
101 * radeon_fence_emit - emit a fence on the requested ring 120 * radeon_fence_emit - emit a fence on the requested ring
102 * 121 *
103 * @rdev: radeon_device pointer 122 * @rdev: radeon_device pointer
@@ -111,30 +130,70 @@ int radeon_fence_emit(struct radeon_device *rdev,
111 struct radeon_fence **fence, 130 struct radeon_fence **fence,
112 int ring) 131 int ring)
113{ 132{
133 u64 seq = ++rdev->fence_drv[ring].sync_seq[ring];
134
114 /* we are protected by the ring emission mutex */ 135 /* we are protected by the ring emission mutex */
115 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); 136 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
116 if ((*fence) == NULL) { 137 if ((*fence) == NULL) {
117 return -ENOMEM; 138 return -ENOMEM;
118 } 139 }
119 kref_init(&((*fence)->kref));
120 (*fence)->rdev = rdev; 140 (*fence)->rdev = rdev;
121 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; 141 (*fence)->seq = seq;
122 (*fence)->ring = ring; 142 (*fence)->ring = ring;
143 fence_init(&(*fence)->base, &radeon_fence_ops,
144 &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
123 radeon_fence_ring_emit(rdev, ring, *fence); 145 radeon_fence_ring_emit(rdev, ring, *fence);
124 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); 146 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
147 radeon_fence_schedule_check(rdev, ring);
125 return 0; 148 return 0;
126} 149}
127 150
128/** 151/**
129 * radeon_fence_process - process a fence 152 * radeon_fence_check_signaled - callback from fence_queue
153 *
154 * this function is called with fence_queue lock held, which is also used
155 * for the fence locking itself, so unlocked variants are used for
156 * fence_signal, and remove_wait_queue.
157 */
158static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
159{
160 struct radeon_fence *fence;
161 u64 seq;
162
163 fence = container_of(wait, struct radeon_fence, fence_wake);
164
165 /*
166 * We cannot use radeon_fence_process here because we're already
167 * in the waitqueue, in a call from wake_up_all.
168 */
169 seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
170 if (seq >= fence->seq) {
171 int ret = fence_signal_locked(&fence->base);
172
173 if (!ret)
174 FENCE_TRACE(&fence->base, "signaled from irq context\n");
175 else
176 FENCE_TRACE(&fence->base, "was already signaled\n");
177
178 radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
179 __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
180 fence_put(&fence->base);
181 } else
182 FENCE_TRACE(&fence->base, "pending\n");
183 return 0;
184}
185
186/**
187 * radeon_fence_activity - check for fence activity
130 * 188 *
131 * @rdev: radeon_device pointer 189 * @rdev: radeon_device pointer
132 * @ring: ring index the fence is associated with 190 * @ring: ring index the fence is associated with
133 * 191 *
134 * Checks the current fence value and wakes the fence queue 192 * Checks the current fence value and calculates the last
135 * if the sequence number has increased (all asics). 193 * signalled fence value. Returns true if activity occured
194 * on the ring, and the fence_queue should be waken up.
136 */ 195 */
137void radeon_fence_process(struct radeon_device *rdev, int ring) 196static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
138{ 197{
139 uint64_t seq, last_seq, last_emitted; 198 uint64_t seq, last_seq, last_emitted;
140 unsigned count_loop = 0; 199 unsigned count_loop = 0;
@@ -190,23 +249,77 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
190 } 249 }
191 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); 250 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
192 251
193 if (wake) 252 if (seq < last_emitted)
194 wake_up_all(&rdev->fence_queue); 253 radeon_fence_schedule_check(rdev, ring);
254
255 return wake;
195} 256}
196 257
197/** 258/**
198 * radeon_fence_destroy - destroy a fence 259 * radeon_fence_check_lockup - check for hardware lockup
199 * 260 *
200 * @kref: fence kref 261 * @work: delayed work item
201 * 262 *
202 * Frees the fence object (all asics). 263 * Checks for fence activity and if there is none probe
264 * the hardware if a lockup occured.
203 */ 265 */
204static void radeon_fence_destroy(struct kref *kref) 266static void radeon_fence_check_lockup(struct work_struct *work)
205{ 267{
206 struct radeon_fence *fence; 268 struct radeon_fence_driver *fence_drv;
269 struct radeon_device *rdev;
270 int ring;
271
272 fence_drv = container_of(work, struct radeon_fence_driver,
273 lockup_work.work);
274 rdev = fence_drv->rdev;
275 ring = fence_drv - &rdev->fence_drv[0];
276
277 if (!down_read_trylock(&rdev->exclusive_lock)) {
278 /* just reschedule the check if a reset is going on */
279 radeon_fence_schedule_check(rdev, ring);
280 return;
281 }
282
283 if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) {
284 unsigned long irqflags;
285
286 fence_drv->delayed_irq = false;
287 spin_lock_irqsave(&rdev->irq.lock, irqflags);
288 radeon_irq_set(rdev);
289 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
290 }
291
292 if (radeon_fence_activity(rdev, ring))
293 wake_up_all(&rdev->fence_queue);
207 294
208 fence = container_of(kref, struct radeon_fence, kref); 295 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
209 kfree(fence); 296
297 /* good news we believe it's a lockup */
298 dev_warn(rdev->dev, "GPU lockup (current fence id "
299 "0x%016llx last fence id 0x%016llx on ring %d)\n",
300 (uint64_t)atomic64_read(&fence_drv->last_seq),
301 fence_drv->sync_seq[ring], ring);
302
303 /* remember that we need an reset */
304 rdev->needs_reset = true;
305 wake_up_all(&rdev->fence_queue);
306 }
307 up_read(&rdev->exclusive_lock);
308}
309
310/**
311 * radeon_fence_process - process a fence
312 *
313 * @rdev: radeon_device pointer
314 * @ring: ring index the fence is associated with
315 *
316 * Checks the current fence value and wakes the fence queue
317 * if the sequence number has increased (all asics).
318 */
319void radeon_fence_process(struct radeon_device *rdev, int ring)
320{
321 if (radeon_fence_activity(rdev, ring))
322 wake_up_all(&rdev->fence_queue);
210} 323}
211 324
212/** 325/**
@@ -237,6 +350,75 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
237 return false; 350 return false;
238} 351}
239 352
353static bool radeon_fence_is_signaled(struct fence *f)
354{
355 struct radeon_fence *fence = to_radeon_fence(f);
356 struct radeon_device *rdev = fence->rdev;
357 unsigned ring = fence->ring;
358 u64 seq = fence->seq;
359
360 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
361 return true;
362 }
363
364 if (down_read_trylock(&rdev->exclusive_lock)) {
365 radeon_fence_process(rdev, ring);
366 up_read(&rdev->exclusive_lock);
367
368 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
369 return true;
370 }
371 }
372 return false;
373}
374
375/**
376 * radeon_fence_enable_signaling - enable signalling on fence
377 * @fence: fence
378 *
379 * This function is called with fence_queue lock held, and adds a callback
380 * to fence_queue that checks if this fence is signaled, and if so it
381 * signals the fence and removes itself.
382 */
383static bool radeon_fence_enable_signaling(struct fence *f)
384{
385 struct radeon_fence *fence = to_radeon_fence(f);
386 struct radeon_device *rdev = fence->rdev;
387
388 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
389 return false;
390
391 if (down_read_trylock(&rdev->exclusive_lock)) {
392 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
393
394 if (radeon_fence_activity(rdev, fence->ring))
395 wake_up_all_locked(&rdev->fence_queue);
396
397 /* did fence get signaled after we enabled the sw irq? */
398 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
399 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
400 up_read(&rdev->exclusive_lock);
401 return false;
402 }
403
404 up_read(&rdev->exclusive_lock);
405 } else {
406 /* we're probably in a lockup, lets not fiddle too much */
407 if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
408 rdev->fence_drv[fence->ring].delayed_irq = true;
409 radeon_fence_schedule_check(rdev, fence->ring);
410 }
411
412 fence->fence_wake.flags = 0;
413 fence->fence_wake.private = NULL;
414 fence->fence_wake.func = radeon_fence_check_signaled;
415 __add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
416 fence_get(f);
417
418 FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
419 return true;
420}
421
240/** 422/**
241 * radeon_fence_signaled - check if a fence has signaled 423 * radeon_fence_signaled - check if a fence has signaled
242 * 424 *
@@ -247,14 +429,15 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
247 */ 429 */
248bool radeon_fence_signaled(struct radeon_fence *fence) 430bool radeon_fence_signaled(struct radeon_fence *fence)
249{ 431{
250 if (!fence) { 432 if (!fence)
251 return true; 433 return true;
252 } 434
253 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
254 return true;
255 }
256 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { 435 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
257 fence->seq = RADEON_FENCE_SIGNALED_SEQ; 436 int ret;
437
438 ret = fence_signal(&fence->base);
439 if (!ret)
440 FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
258 return true; 441 return true;
259 } 442 }
260 return false; 443 return false;
@@ -283,110 +466,70 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
283} 466}
284 467
285/** 468/**
286 * radeon_fence_wait_seq - wait for a specific sequence numbers 469 * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers
287 * 470 *
288 * @rdev: radeon device pointer 471 * @rdev: radeon device pointer
289 * @target_seq: sequence number(s) we want to wait for 472 * @target_seq: sequence number(s) we want to wait for
290 * @intr: use interruptable sleep 473 * @intr: use interruptable sleep
474 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
291 * 475 *
292 * Wait for the requested sequence number(s) to be written by any ring 476 * Wait for the requested sequence number(s) to be written by any ring
293 * (all asics). Sequnce number array is indexed by ring id. 477 * (all asics). Sequnce number array is indexed by ring id.
294 * @intr selects whether to use interruptable (true) or non-interruptable 478 * @intr selects whether to use interruptable (true) or non-interruptable
295 * (false) sleep when waiting for the sequence number. Helper function 479 * (false) sleep when waiting for the sequence number. Helper function
296 * for radeon_fence_wait_*(). 480 * for radeon_fence_wait_*().
297 * Returns 0 if the sequence number has passed, error for all other cases. 481 * Returns remaining time if the sequence number has passed, 0 when
482 * the wait timeout, or an error for all other cases.
298 * -EDEADLK is returned when a GPU lockup has been detected. 483 * -EDEADLK is returned when a GPU lockup has been detected.
299 */ 484 */
300static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, 485static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
301 bool intr) 486 u64 *target_seq, bool intr,
487 long timeout)
302{ 488{
303 uint64_t last_seq[RADEON_NUM_RINGS]; 489 long r;
304 bool signaled; 490 int i;
305 int i, r;
306
307 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
308 491
309 /* Save current sequence values, used to check for GPU lockups */ 492 if (radeon_fence_any_seq_signaled(rdev, target_seq))
310 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 493 return timeout;
311 if (!target_seq[i])
312 continue;
313 494
314 last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq); 495 /* enable IRQs and tracing */
315 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); 496 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
316 radeon_irq_kms_sw_irq_get(rdev, i); 497 if (!target_seq[i])
317 } 498 continue;
318 499
319 if (intr) { 500 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
320 r = wait_event_interruptible_timeout(rdev->fence_queue, ( 501 radeon_irq_kms_sw_irq_get(rdev, i);
321 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) 502 }
322 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
323 } else {
324 r = wait_event_timeout(rdev->fence_queue, (
325 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
326 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
327 }
328 503
329 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 504 if (intr) {
330 if (!target_seq[i]) 505 r = wait_event_interruptible_timeout(rdev->fence_queue, (
331 continue; 506 radeon_fence_any_seq_signaled(rdev, target_seq)
507 || rdev->needs_reset), timeout);
508 } else {
509 r = wait_event_timeout(rdev->fence_queue, (
510 radeon_fence_any_seq_signaled(rdev, target_seq)
511 || rdev->needs_reset), timeout);
512 }
332 513
333 radeon_irq_kms_sw_irq_put(rdev, i); 514 if (rdev->needs_reset)
334 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); 515 r = -EDEADLK;
335 }
336 516
337 if (unlikely(r < 0)) 517 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
338 return r; 518 if (!target_seq[i])
519 continue;
339 520
340 if (unlikely(!signaled)) { 521 radeon_irq_kms_sw_irq_put(rdev, i);
341 if (rdev->needs_reset) 522 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
342 return -EDEADLK;
343
344 /* we were interrupted for some reason and fence
345 * isn't signaled yet, resume waiting */
346 if (r)
347 continue;
348
349 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
350 if (!target_seq[i])
351 continue;
352
353 if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
354 break;
355 }
356
357 if (i != RADEON_NUM_RINGS)
358 continue;
359
360 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
361 if (!target_seq[i])
362 continue;
363
364 if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
365 break;
366 }
367
368 if (i < RADEON_NUM_RINGS) {
369 /* good news we believe it's a lockup */
370 dev_warn(rdev->dev, "GPU lockup (waiting for "
371 "0x%016llx last fence id 0x%016llx on"
372 " ring %d)\n",
373 target_seq[i], last_seq[i], i);
374
375 /* remember that we need an reset */
376 rdev->needs_reset = true;
377 wake_up_all(&rdev->fence_queue);
378 return -EDEADLK;
379 }
380 }
381 } 523 }
382 return 0; 524
525 return r;
383} 526}
384 527
385/** 528/**
386 * radeon_fence_wait - wait for a fence to signal 529 * radeon_fence_wait - wait for a fence to signal
387 * 530 *
388 * @fence: radeon fence object 531 * @fence: radeon fence object
389 * @intr: use interruptable sleep 532 * @intr: use interruptible sleep
390 * 533 *
391 * Wait for the requested fence to signal (all asics). 534 * Wait for the requested fence to signal (all asics).
392 * @intr selects whether to use interruptable (true) or non-interruptable 535 * @intr selects whether to use interruptable (true) or non-interruptable
@@ -396,22 +539,26 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
396int radeon_fence_wait(struct radeon_fence *fence, bool intr) 539int radeon_fence_wait(struct radeon_fence *fence, bool intr)
397{ 540{
398 uint64_t seq[RADEON_NUM_RINGS] = {}; 541 uint64_t seq[RADEON_NUM_RINGS] = {};
399 int r; 542 long r;
400 543
401 if (fence == NULL) { 544 /*
402 WARN(1, "Querying an invalid fence : %p !\n", fence); 545 * This function should not be called on !radeon fences.
403 return -EINVAL; 546 * If this is the case, it would mean this function can
404 } 547 * also be called on radeon fences belonging to another card.
548 * exclusive_lock is not held in that case.
549 */
550 if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
551 return fence_wait(&fence->base, intr);
405 552
406 seq[fence->ring] = fence->seq; 553 seq[fence->ring] = fence->seq;
407 if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) 554 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
408 return 0; 555 if (r < 0) {
409
410 r = radeon_fence_wait_seq(fence->rdev, seq, intr);
411 if (r)
412 return r; 556 return r;
557 }
413 558
414 fence->seq = RADEON_FENCE_SIGNALED_SEQ; 559 r = fence_signal(&fence->base);
560 if (!r)
561 FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
415 return 0; 562 return 0;
416} 563}
417 564
@@ -434,7 +581,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
434{ 581{
435 uint64_t seq[RADEON_NUM_RINGS]; 582 uint64_t seq[RADEON_NUM_RINGS];
436 unsigned i, num_rings = 0; 583 unsigned i, num_rings = 0;
437 int r; 584 long r;
438 585
439 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 586 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
440 seq[i] = 0; 587 seq[i] = 0;
@@ -445,18 +592,14 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
445 592
446 seq[i] = fences[i]->seq; 593 seq[i] = fences[i]->seq;
447 ++num_rings; 594 ++num_rings;
448
449 /* test if something was allready signaled */
450 if (seq[i] == RADEON_FENCE_SIGNALED_SEQ)
451 return 0;
452 } 595 }
453 596
454 /* nothing to wait for ? */ 597 /* nothing to wait for ? */
455 if (num_rings == 0) 598 if (num_rings == 0)
456 return -ENOENT; 599 return -ENOENT;
457 600
458 r = radeon_fence_wait_seq(rdev, seq, intr); 601 r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
459 if (r) { 602 if (r < 0) {
460 return r; 603 return r;
461 } 604 }
462 return 0; 605 return 0;
@@ -475,6 +618,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
475int radeon_fence_wait_next(struct radeon_device *rdev, int ring) 618int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
476{ 619{
477 uint64_t seq[RADEON_NUM_RINGS] = {}; 620 uint64_t seq[RADEON_NUM_RINGS] = {};
621 long r;
478 622
479 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; 623 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
480 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { 624 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
@@ -482,7 +626,10 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
482 already the last emited fence */ 626 already the last emited fence */
483 return -ENOENT; 627 return -ENOENT;
484 } 628 }
485 return radeon_fence_wait_seq(rdev, seq, false); 629 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
630 if (r < 0)
631 return r;
632 return 0;
486} 633}
487 634
488/** 635/**
@@ -498,18 +645,18 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
498int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) 645int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
499{ 646{
500 uint64_t seq[RADEON_NUM_RINGS] = {}; 647 uint64_t seq[RADEON_NUM_RINGS] = {};
501 int r; 648 long r;
502 649
503 seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; 650 seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
504 if (!seq[ring]) 651 if (!seq[ring])
505 return 0; 652 return 0;
506 653
507 r = radeon_fence_wait_seq(rdev, seq, false); 654 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
508 if (r) { 655 if (r < 0) {
509 if (r == -EDEADLK) 656 if (r == -EDEADLK)
510 return -EDEADLK; 657 return -EDEADLK;
511 658
512 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", 659 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
513 ring, r); 660 ring, r);
514 } 661 }
515 return 0; 662 return 0;
@@ -525,7 +672,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
525 */ 672 */
526struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) 673struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
527{ 674{
528 kref_get(&fence->kref); 675 fence_get(&fence->base);
529 return fence; 676 return fence;
530} 677}
531 678
@@ -542,7 +689,7 @@ void radeon_fence_unref(struct radeon_fence **fence)
542 689
543 *fence = NULL; 690 *fence = NULL;
544 if (tmp) { 691 if (tmp) {
545 kref_put(&tmp->kref, radeon_fence_destroy); 692 fence_put(&tmp->base);
546 } 693 }
547} 694}
548 695
@@ -711,6 +858,9 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
711 rdev->fence_drv[ring].sync_seq[i] = 0; 858 rdev->fence_drv[ring].sync_seq[i] = 0;
712 atomic64_set(&rdev->fence_drv[ring].last_seq, 0); 859 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
713 rdev->fence_drv[ring].initialized = false; 860 rdev->fence_drv[ring].initialized = false;
861 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
862 radeon_fence_check_lockup);
863 rdev->fence_drv[ring].rdev = rdev;
714} 864}
715 865
716/** 866/**
@@ -758,8 +908,9 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
758 r = radeon_fence_wait_empty(rdev, ring); 908 r = radeon_fence_wait_empty(rdev, ring);
759 if (r) { 909 if (r) {
760 /* no need to trigger GPU reset as we are unloading */ 910 /* no need to trigger GPU reset as we are unloading */
761 radeon_fence_driver_force_completion(rdev); 911 radeon_fence_driver_force_completion(rdev, ring);
762 } 912 }
913 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
763 wake_up_all(&rdev->fence_queue); 914 wake_up_all(&rdev->fence_queue);
764 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 915 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
765 rdev->fence_drv[ring].initialized = false; 916 rdev->fence_drv[ring].initialized = false;
@@ -771,18 +922,16 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
771 * radeon_fence_driver_force_completion - force all fence waiter to complete 922 * radeon_fence_driver_force_completion - force all fence waiter to complete
772 * 923 *
773 * @rdev: radeon device pointer 924 * @rdev: radeon device pointer
925 * @ring: the ring to complete
774 * 926 *
775 * In case of GPU reset failure make sure no process keep waiting on fence 927 * In case of GPU reset failure make sure no process keep waiting on fence
776 * that will never complete. 928 * that will never complete.
777 */ 929 */
778void radeon_fence_driver_force_completion(struct radeon_device *rdev) 930void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
779{ 931{
780 int ring; 932 if (rdev->fence_drv[ring].initialized) {
781
782 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
783 if (!rdev->fence_drv[ring].initialized)
784 continue;
785 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); 933 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
934 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
786 } 935 }
787} 936}
788 937
@@ -833,6 +982,7 @@ static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
833 down_read(&rdev->exclusive_lock); 982 down_read(&rdev->exclusive_lock);
834 seq_printf(m, "%d\n", rdev->needs_reset); 983 seq_printf(m, "%d\n", rdev->needs_reset);
835 rdev->needs_reset = true; 984 rdev->needs_reset = true;
985 wake_up_all(&rdev->fence_queue);
836 up_read(&rdev->exclusive_lock); 986 up_read(&rdev->exclusive_lock);
837 987
838 return 0; 988 return 0;
@@ -852,3 +1002,72 @@ int radeon_debugfs_fence_init(struct radeon_device *rdev)
852 return 0; 1002 return 0;
853#endif 1003#endif
854} 1004}
1005
1006static const char *radeon_fence_get_driver_name(struct fence *fence)
1007{
1008 return "radeon";
1009}
1010
1011static const char *radeon_fence_get_timeline_name(struct fence *f)
1012{
1013 struct radeon_fence *fence = to_radeon_fence(f);
1014 switch (fence->ring) {
1015 case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
1016 case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
1017 case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2";
1018 case R600_RING_TYPE_DMA_INDEX: return "radeon.dma";
1019 case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1";
1020 case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
1021 case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
1022 case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
1023 default: WARN_ON_ONCE(1); return "radeon.unk";
1024 }
1025}
1026
1027static inline bool radeon_test_signaled(struct radeon_fence *fence)
1028{
1029 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1030}
1031
1032static signed long radeon_fence_default_wait(struct fence *f, bool intr,
1033 signed long t)
1034{
1035 struct radeon_fence *fence = to_radeon_fence(f);
1036 struct radeon_device *rdev = fence->rdev;
1037 bool signaled;
1038
1039 fence_enable_sw_signaling(&fence->base);
1040
1041 /*
1042 * This function has to return -EDEADLK, but cannot hold
1043 * exclusive_lock during the wait because some callers
1044 * may already hold it. This means checking needs_reset without
1045 * lock, and not fiddling with any gpu internals.
1046 *
1047 * The callback installed with fence_enable_sw_signaling will
1048 * run before our wait_event_*timeout call, so we will see
1049 * both the signaled fence and the changes to needs_reset.
1050 */
1051
1052 if (intr)
1053 t = wait_event_interruptible_timeout(rdev->fence_queue,
1054 ((signaled = radeon_test_signaled(fence)) ||
1055 rdev->needs_reset), t);
1056 else
1057 t = wait_event_timeout(rdev->fence_queue,
1058 ((signaled = radeon_test_signaled(fence)) ||
1059 rdev->needs_reset), t);
1060
1061 if (t > 0 && !signaled)
1062 return -EDEADLK;
1063 return t;
1064}
1065
1066const struct fence_ops radeon_fence_ops = {
1067 .get_driver_name = radeon_fence_get_driver_name,
1068 .get_timeline_name = radeon_fence_get_timeline_name,
1069 .enable_signaling = radeon_fence_enable_signaling,
1070 .signaled = radeon_fence_is_signaled,
1071 .wait = radeon_fence_default_wait,
1072 .release = NULL,
1073};
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a053a0779aac..84146d5901aa 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -128,7 +128,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
128 if (rdev->gart.robj == NULL) { 128 if (rdev->gart.robj == NULL) {
129 r = radeon_bo_create(rdev, rdev->gart.table_size, 129 r = radeon_bo_create(rdev, rdev->gart.table_size,
130 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 130 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
131 0, NULL, &rdev->gart.robj); 131 0, NULL, NULL, &rdev->gart.robj);
132 if (r) { 132 if (r) {
133 return r; 133 return r;
134 } 134 }
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index bfd7e1b0ff3f..c194497aa586 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -67,7 +67,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
67 67
68retry: 68retry:
69 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, 69 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
70 flags, NULL, &robj); 70 flags, NULL, NULL, &robj);
71 if (r) { 71 if (r) {
72 if (r != -ERESTARTSYS) { 72 if (r != -ERESTARTSYS) {
73 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 73 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
@@ -94,7 +94,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
94{ 94{
95 struct radeon_bo *robj; 95 struct radeon_bo *robj;
96 uint32_t domain; 96 uint32_t domain;
97 int r; 97 long r;
98 98
99 /* FIXME: reeimplement */ 99 /* FIXME: reeimplement */
100 robj = gem_to_radeon_bo(gobj); 100 robj = gem_to_radeon_bo(gobj);
@@ -110,9 +110,12 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
110 } 110 }
111 if (domain == RADEON_GEM_DOMAIN_CPU) { 111 if (domain == RADEON_GEM_DOMAIN_CPU) {
112 /* Asking for cpu access wait for object idle */ 112 /* Asking for cpu access wait for object idle */
113 r = radeon_bo_wait(robj, NULL, false); 113 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
114 if (r) { 114 if (!r)
115 printk(KERN_ERR "Failed to wait for object !\n"); 115 r = -EBUSY;
116
117 if (r < 0 && r != -EINTR) {
118 printk(KERN_ERR "Failed to wait for object: %li\n", r);
116 return r; 119 return r;
117 } 120 }
118 } 121 }
@@ -272,6 +275,94 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
272 return 0; 275 return 0;
273} 276}
274 277
278int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
279 struct drm_file *filp)
280{
281 struct radeon_device *rdev = dev->dev_private;
282 struct drm_radeon_gem_userptr *args = data;
283 struct drm_gem_object *gobj;
284 struct radeon_bo *bo;
285 uint32_t handle;
286 int r;
287
288 if (offset_in_page(args->addr | args->size))
289 return -EINVAL;
290
291 /* reject unknown flag values */
292 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
293 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
294 RADEON_GEM_USERPTR_REGISTER))
295 return -EINVAL;
296
297 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
298 /* readonly pages not tested on older hardware */
299 if (rdev->family < CHIP_R600)
300 return -EINVAL;
301
302 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
303 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
304
305 /* if we want to write to it we must require anonymous
306 memory and install a MMU notifier */
307 return -EACCES;
308 }
309
310 down_read(&rdev->exclusive_lock);
311
312 /* create a gem object to contain this object in */
313 r = radeon_gem_object_create(rdev, args->size, 0,
314 RADEON_GEM_DOMAIN_CPU, 0,
315 false, &gobj);
316 if (r)
317 goto handle_lockup;
318
319 bo = gem_to_radeon_bo(gobj);
320 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
321 if (r)
322 goto release_object;
323
324 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
325 r = radeon_mn_register(bo, args->addr);
326 if (r)
327 goto release_object;
328 }
329
330 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
331 down_read(&current->mm->mmap_sem);
332 r = radeon_bo_reserve(bo, true);
333 if (r) {
334 up_read(&current->mm->mmap_sem);
335 goto release_object;
336 }
337
338 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
339 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
340 radeon_bo_unreserve(bo);
341 up_read(&current->mm->mmap_sem);
342 if (r)
343 goto release_object;
344 }
345
346 r = drm_gem_handle_create(filp, gobj, &handle);
347 /* drop reference from allocate - handle holds it now */
348 drm_gem_object_unreference_unlocked(gobj);
349 if (r)
350 goto handle_lockup;
351
352 args->handle = handle;
353 up_read(&rdev->exclusive_lock);
354 return 0;
355
356release_object:
357 drm_gem_object_unreference_unlocked(gobj);
358
359handle_lockup:
360 up_read(&rdev->exclusive_lock);
361 r = radeon_gem_handle_lockup(rdev, r);
362
363 return r;
364}
365
275int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 366int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
276 struct drm_file *filp) 367 struct drm_file *filp)
277{ 368{
@@ -315,6 +406,10 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
315 return -ENOENT; 406 return -ENOENT;
316 } 407 }
317 robj = gem_to_radeon_bo(gobj); 408 robj = gem_to_radeon_bo(gobj);
409 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
410 drm_gem_object_unreference_unlocked(gobj);
411 return -EPERM;
412 }
318 *offset_p = radeon_bo_mmap_offset(robj); 413 *offset_p = radeon_bo_mmap_offset(robj);
319 drm_gem_object_unreference_unlocked(gobj); 414 drm_gem_object_unreference_unlocked(gobj);
320 return 0; 415 return 0;
@@ -357,15 +452,22 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
357 struct drm_radeon_gem_wait_idle *args = data; 452 struct drm_radeon_gem_wait_idle *args = data;
358 struct drm_gem_object *gobj; 453 struct drm_gem_object *gobj;
359 struct radeon_bo *robj; 454 struct radeon_bo *robj;
360 int r; 455 int r = 0;
361 uint32_t cur_placement = 0; 456 uint32_t cur_placement = 0;
457 long ret;
362 458
363 gobj = drm_gem_object_lookup(dev, filp, args->handle); 459 gobj = drm_gem_object_lookup(dev, filp, args->handle);
364 if (gobj == NULL) { 460 if (gobj == NULL) {
365 return -ENOENT; 461 return -ENOENT;
366 } 462 }
367 robj = gem_to_radeon_bo(gobj); 463 robj = gem_to_radeon_bo(gobj);
368 r = radeon_bo_wait(robj, &cur_placement, false); 464
465 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
466 if (ret == 0)
467 r = -EBUSY;
468 else if (ret < 0)
469 r = ret;
470
369 /* Flush HDP cache via MMIO if necessary */ 471 /* Flush HDP cache via MMIO if necessary */
370 if (rdev->asic->mmio_hdp_flush && 472 if (rdev->asic->mmio_hdp_flush &&
371 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 473 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
@@ -532,6 +634,11 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
532 return -ENOENT; 634 return -ENOENT;
533 } 635 }
534 robj = gem_to_radeon_bo(gobj); 636 robj = gem_to_radeon_bo(gobj);
637
638 r = -EPERM;
639 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
640 goto out;
641
535 r = radeon_bo_reserve(robj, false); 642 r = radeon_bo_reserve(robj, false);
536 if (unlikely(r)) 643 if (unlikely(r))
537 goto out; 644 goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 5bf2c0a05827..3f39fcca4d07 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -145,7 +145,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
145 if (ib->vm) { 145 if (ib->vm) {
146 struct radeon_fence *vm_id_fence; 146 struct radeon_fence *vm_id_fence;
147 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); 147 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
148 radeon_semaphore_sync_to(ib->semaphore, vm_id_fence); 148 radeon_semaphore_sync_fence(ib->semaphore, vm_id_fence);
149 } 149 }
150 150
151 /* sync with other rings */ 151 /* sync with other rings */
@@ -269,6 +269,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
269 269
270 r = radeon_ib_test(rdev, i, ring); 270 r = radeon_ib_test(rdev, i, ring);
271 if (r) { 271 if (r) {
272 radeon_fence_driver_force_completion(rdev, i);
272 ring->ready = false; 273 ring->ready = false;
273 rdev->needs_reset = false; 274 rdev->needs_reset = false;
274 275
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 16807afab362..7784911d78ef 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -88,23 +88,6 @@ static void radeon_hotplug_work_func(struct work_struct *work)
88} 88}
89 89
90/** 90/**
91 * radeon_irq_reset_work_func - execute gpu reset
92 *
93 * @work: work struct
94 *
95 * Execute scheduled gpu reset (cayman+).
96 * This function is called when the irq handler
97 * thinks we need a gpu reset.
98 */
99static void radeon_irq_reset_work_func(struct work_struct *work)
100{
101 struct radeon_device *rdev = container_of(work, struct radeon_device,
102 reset_work);
103
104 radeon_gpu_reset(rdev);
105}
106
107/**
108 * radeon_driver_irq_preinstall_kms - drm irq preinstall callback 91 * radeon_driver_irq_preinstall_kms - drm irq preinstall callback
109 * 92 *
110 * @dev: drm dev pointer 93 * @dev: drm dev pointer
@@ -284,7 +267,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
284 267
285 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); 268 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
286 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); 269 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
287 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
288 270
289 rdev->irq.installed = true; 271 rdev->irq.installed = true;
290 r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); 272 r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
@@ -342,6 +324,21 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
342} 324}
343 325
344/** 326/**
327 * radeon_irq_kms_sw_irq_get_delayed - enable software interrupt
328 *
329 * @rdev: radeon device pointer
330 * @ring: ring whose interrupt you want to enable
331 *
332 * Enables the software interrupt for a specific ring (all asics).
333 * The software interrupt is generally used to signal a fence on
334 * a particular ring.
335 */
336bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring)
337{
338 return atomic_inc_return(&rdev->irq.ring_int[ring]) == 1;
339}
340
341/**
345 * radeon_irq_kms_sw_irq_put - disable software interrupt 342 * radeon_irq_kms_sw_irq_put - disable software interrupt
346 * 343 *
347 * @rdev: radeon device pointer 344 * @rdev: radeon device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index eb7164d07985..8309b11e674d 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -885,5 +885,6 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = {
885 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 885 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
886 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 886 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
887 DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 887 DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
888 DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
888}; 889};
889int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); 890int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
new file mode 100644
index 000000000000..a69bd441dd2d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -0,0 +1,274 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <christian.koenig@amd.com>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <linux/mmu_notifier.h>
34#include <drm/drmP.h>
35#include <drm/drm.h>
36
37#include "radeon.h"
38
39struct radeon_mn {
40 /* constant after initialisation */
41 struct radeon_device *rdev;
42 struct mm_struct *mm;
43 struct mmu_notifier mn;
44
45 /* only used on destruction */
46 struct work_struct work;
47
48 /* protected by rdev->mn_lock */
49 struct hlist_node node;
50
51 /* objects protected by lock */
52 struct mutex lock;
53 struct rb_root objects;
54};
55
56/**
57 * radeon_mn_destroy - destroy the rmn
58 *
59 * @work: previously sheduled work item
60 *
61 * Lazy destroys the notifier from a work item
62 */
63static void radeon_mn_destroy(struct work_struct *work)
64{
65 struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
66 struct radeon_device *rdev = rmn->rdev;
67 struct radeon_bo *bo, *next;
68
69 mutex_lock(&rdev->mn_lock);
70 mutex_lock(&rmn->lock);
71 hash_del(&rmn->node);
72 rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) {
73 interval_tree_remove(&bo->mn_it, &rmn->objects);
74 bo->mn = NULL;
75 }
76 mutex_unlock(&rmn->lock);
77 mutex_unlock(&rdev->mn_lock);
78 mmu_notifier_unregister(&rmn->mn, rmn->mm);
79 kfree(rmn);
80}
81
82/**
83 * radeon_mn_release - callback to notify about mm destruction
84 *
85 * @mn: our notifier
86 * @mn: the mm this callback is about
87 *
88 * Shedule a work item to lazy destroy our notifier.
89 */
90static void radeon_mn_release(struct mmu_notifier *mn,
91 struct mm_struct *mm)
92{
93 struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
94 INIT_WORK(&rmn->work, radeon_mn_destroy);
95 schedule_work(&rmn->work);
96}
97
98/**
99 * radeon_mn_invalidate_range_start - callback to notify about mm change
100 *
101 * @mn: our notifier
102 * @mn: the mm this callback is about
103 * @start: start of updated range
104 * @end: end of updated range
105 *
106 * We block for all BOs between start and end to be idle and
107 * unmap them by move them into system domain again.
108 */
109static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
110 struct mm_struct *mm,
111 unsigned long start,
112 unsigned long end)
113{
114 struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
115 struct interval_tree_node *it;
116
117 /* notification is exclusive, but interval is inclusive */
118 end -= 1;
119
120 mutex_lock(&rmn->lock);
121
122 it = interval_tree_iter_first(&rmn->objects, start, end);
123 while (it) {
124 struct radeon_bo *bo;
125 struct fence *fence;
126 int r;
127
128 bo = container_of(it, struct radeon_bo, mn_it);
129 it = interval_tree_iter_next(it, start, end);
130
131 r = radeon_bo_reserve(bo, true);
132 if (r) {
133 DRM_ERROR("(%d) failed to reserve user bo\n", r);
134 continue;
135 }
136
137 fence = reservation_object_get_excl(bo->tbo.resv);
138 if (fence) {
139 r = radeon_fence_wait((struct radeon_fence *)fence, false);
140 if (r)
141 DRM_ERROR("(%d) failed to wait for user bo\n", r);
142 }
143
144 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
145 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
146 if (r)
147 DRM_ERROR("(%d) failed to validate user bo\n", r);
148
149 radeon_bo_unreserve(bo);
150 }
151
152 mutex_unlock(&rmn->lock);
153}
154
155static const struct mmu_notifier_ops radeon_mn_ops = {
156 .release = radeon_mn_release,
157 .invalidate_range_start = radeon_mn_invalidate_range_start,
158};
159
160/**
161 * radeon_mn_get - create notifier context
162 *
163 * @rdev: radeon device pointer
164 *
165 * Creates a notifier context for current->mm.
166 */
167static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
168{
169 struct mm_struct *mm = current->mm;
170 struct radeon_mn *rmn;
171 int r;
172
173 down_write(&mm->mmap_sem);
174 mutex_lock(&rdev->mn_lock);
175
176 hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
177 if (rmn->mm == mm)
178 goto release_locks;
179
180 rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
181 if (!rmn) {
182 rmn = ERR_PTR(-ENOMEM);
183 goto release_locks;
184 }
185
186 rmn->rdev = rdev;
187 rmn->mm = mm;
188 rmn->mn.ops = &radeon_mn_ops;
189 mutex_init(&rmn->lock);
190 rmn->objects = RB_ROOT;
191
192 r = __mmu_notifier_register(&rmn->mn, mm);
193 if (r)
194 goto free_rmn;
195
196 hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm);
197
198release_locks:
199 mutex_unlock(&rdev->mn_lock);
200 up_write(&mm->mmap_sem);
201
202 return rmn;
203
204free_rmn:
205 mutex_unlock(&rdev->mn_lock);
206 up_write(&mm->mmap_sem);
207 kfree(rmn);
208
209 return ERR_PTR(r);
210}
211
212/**
213 * radeon_mn_register - register a BO for notifier updates
214 *
215 * @bo: radeon buffer object
216 * @addr: userptr addr we should monitor
217 *
218 * Registers an MMU notifier for the given BO at the specified address.
219 * Returns 0 on success, -ERRNO if anything goes wrong.
220 */
221int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
222{
223 unsigned long end = addr + radeon_bo_size(bo) - 1;
224 struct radeon_device *rdev = bo->rdev;
225 struct radeon_mn *rmn;
226 struct interval_tree_node *it;
227
228 rmn = radeon_mn_get(rdev);
229 if (IS_ERR(rmn))
230 return PTR_ERR(rmn);
231
232 mutex_lock(&rmn->lock);
233
234 it = interval_tree_iter_first(&rmn->objects, addr, end);
235 if (it) {
236 mutex_unlock(&rmn->lock);
237 return -EEXIST;
238 }
239
240 bo->mn = rmn;
241 bo->mn_it.start = addr;
242 bo->mn_it.last = end;
243 interval_tree_insert(&bo->mn_it, &rmn->objects);
244
245 mutex_unlock(&rmn->lock);
246
247 return 0;
248}
249
250/**
251 * radeon_mn_unregister - unregister a BO for notifier updates
252 *
253 * @bo: radeon buffer object
254 *
255 * Remove any registration of MMU notifier updates from the buffer object.
256 */
257void radeon_mn_unregister(struct radeon_bo *bo)
258{
259 struct radeon_device *rdev = bo->rdev;
260 struct radeon_mn *rmn;
261
262 mutex_lock(&rdev->mn_lock);
263 rmn = bo->mn;
264 if (rmn == NULL) {
265 mutex_unlock(&rdev->mn_lock);
266 return;
267 }
268
269 mutex_lock(&rmn->lock);
270 interval_tree_remove(&bo->mn_it, &rmn->objects);
271 bo->mn = NULL;
272 mutex_unlock(&rmn->lock);
273 mutex_unlock(&rdev->mn_lock);
274}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e27608c29c11..04db2fdd8692 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -777,6 +777,7 @@ extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
777extern int atombios_get_encoder_mode(struct drm_encoder *encoder); 777extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
778extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action); 778extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action);
779extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); 779extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
780extern bool radeon_encoder_is_digital(struct drm_encoder *encoder);
780 781
781extern void radeon_crtc_load_lut(struct drm_crtc *crtc); 782extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
782extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, 783extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 480c87d8edc5..99a960a4f302 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -75,6 +75,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
75 bo = container_of(tbo, struct radeon_bo, tbo); 75 bo = container_of(tbo, struct radeon_bo, tbo);
76 76
77 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); 77 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
78 radeon_mn_unregister(bo);
78 79
79 mutex_lock(&bo->rdev->gem.mutex); 80 mutex_lock(&bo->rdev->gem.mutex);
80 list_del_init(&bo->list); 81 list_del_init(&bo->list);
@@ -96,55 +97,80 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
96{ 97{
97 u32 c = 0, i; 98 u32 c = 0, i;
98 99
99 rbo->placement.fpfn = 0;
100 rbo->placement.lpfn = 0;
101 rbo->placement.placement = rbo->placements; 100 rbo->placement.placement = rbo->placements;
102 rbo->placement.busy_placement = rbo->placements; 101 rbo->placement.busy_placement = rbo->placements;
103 if (domain & RADEON_GEM_DOMAIN_VRAM) 102 if (domain & RADEON_GEM_DOMAIN_VRAM)
104 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 103 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
105 TTM_PL_FLAG_VRAM; 104 TTM_PL_FLAG_UNCACHED |
105 TTM_PL_FLAG_VRAM;
106
106 if (domain & RADEON_GEM_DOMAIN_GTT) { 107 if (domain & RADEON_GEM_DOMAIN_GTT) {
107 if (rbo->flags & RADEON_GEM_GTT_UC) { 108 if (rbo->flags & RADEON_GEM_GTT_UC) {
108 rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; 109 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
110 TTM_PL_FLAG_TT;
111
109 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 112 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
110 (rbo->rdev->flags & RADEON_IS_AGP)) { 113 (rbo->rdev->flags & RADEON_IS_AGP)) {
111 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 114 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
115 TTM_PL_FLAG_UNCACHED |
112 TTM_PL_FLAG_TT; 116 TTM_PL_FLAG_TT;
113 } else { 117 } else {
114 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; 118 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
119 TTM_PL_FLAG_TT;
115 } 120 }
116 } 121 }
122
117 if (domain & RADEON_GEM_DOMAIN_CPU) { 123 if (domain & RADEON_GEM_DOMAIN_CPU) {
118 if (rbo->flags & RADEON_GEM_GTT_UC) { 124 if (rbo->flags & RADEON_GEM_GTT_UC) {
119 rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; 125 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
126 TTM_PL_FLAG_SYSTEM;
127
120 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 128 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
121 rbo->rdev->flags & RADEON_IS_AGP) { 129 rbo->rdev->flags & RADEON_IS_AGP) {
122 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 130 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
131 TTM_PL_FLAG_UNCACHED |
123 TTM_PL_FLAG_SYSTEM; 132 TTM_PL_FLAG_SYSTEM;
124 } else { 133 } else {
125 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; 134 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
135 TTM_PL_FLAG_SYSTEM;
126 } 136 }
127 } 137 }
128 if (!c) 138 if (!c)
129 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 139 rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
140 TTM_PL_FLAG_SYSTEM;
141
130 rbo->placement.num_placement = c; 142 rbo->placement.num_placement = c;
131 rbo->placement.num_busy_placement = c; 143 rbo->placement.num_busy_placement = c;
132 144
145 for (i = 0; i < c; ++i) {
146 rbo->placements[i].fpfn = 0;
147 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
148 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM))
149 rbo->placements[i].lpfn =
150 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
151 else
152 rbo->placements[i].lpfn = 0;
153 }
154
133 /* 155 /*
134 * Use two-ended allocation depending on the buffer size to 156 * Use two-ended allocation depending on the buffer size to
135 * improve fragmentation quality. 157 * improve fragmentation quality.
136 * 512kb was measured as the most optimal number. 158 * 512kb was measured as the most optimal number.
137 */ 159 */
138 if (rbo->tbo.mem.size > 512 * 1024) { 160 if (!((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
161 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) &&
162 rbo->tbo.mem.size > 512 * 1024) {
139 for (i = 0; i < c; i++) { 163 for (i = 0; i < c; i++) {
140 rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN; 164 rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
141 } 165 }
142 } 166 }
143} 167}
144 168
145int radeon_bo_create(struct radeon_device *rdev, 169int radeon_bo_create(struct radeon_device *rdev,
146 unsigned long size, int byte_align, bool kernel, u32 domain, 170 unsigned long size, int byte_align, bool kernel,
147 u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr) 171 u32 domain, u32 flags, struct sg_table *sg,
172 struct reservation_object *resv,
173 struct radeon_bo **bo_ptr)
148{ 174{
149 struct radeon_bo *bo; 175 struct radeon_bo *bo;
150 enum ttm_bo_type type; 176 enum ttm_bo_type type;
@@ -192,7 +218,7 @@ int radeon_bo_create(struct radeon_device *rdev,
192 down_read(&rdev->pm.mclk_lock); 218 down_read(&rdev->pm.mclk_lock);
193 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 219 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
194 &bo->placement, page_align, !kernel, NULL, 220 &bo->placement, page_align, !kernel, NULL,
195 acc_size, sg, &radeon_ttm_bo_destroy); 221 acc_size, sg, resv, &radeon_ttm_bo_destroy);
196 up_read(&rdev->pm.mclk_lock); 222 up_read(&rdev->pm.mclk_lock);
197 if (unlikely(r != 0)) { 223 if (unlikely(r != 0)) {
198 return r; 224 return r;
@@ -264,6 +290,9 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
264{ 290{
265 int r, i; 291 int r, i;
266 292
293 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
294 return -EPERM;
295
267 if (bo->pin_count) { 296 if (bo->pin_count) {
268 bo->pin_count++; 297 bo->pin_count++;
269 if (gpu_addr) 298 if (gpu_addr)
@@ -283,21 +312,19 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
283 return 0; 312 return 0;
284 } 313 }
285 radeon_ttm_placement_from_domain(bo, domain); 314 radeon_ttm_placement_from_domain(bo, domain);
286 if (domain == RADEON_GEM_DOMAIN_VRAM) { 315 for (i = 0; i < bo->placement.num_placement; i++) {
287 /* force to pin into visible video ram */ 316 /* force to pin into visible video ram */
288 bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 317 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
289 } 318 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
290 if (max_offset) { 319 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
291 u64 lpfn = max_offset >> PAGE_SHIFT; 320 bo->placements[i].lpfn =
292 321 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
293 if (!bo->placement.lpfn) 322 else
294 bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; 323 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
295 324
296 if (lpfn < bo->placement.lpfn) 325 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
297 bo->placement.lpfn = lpfn;
298 } 326 }
299 for (i = 0; i < bo->placement.num_placement; i++) 327
300 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
301 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 328 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
302 if (likely(r == 0)) { 329 if (likely(r == 0)) {
303 bo->pin_count = 1; 330 bo->pin_count = 1;
@@ -329,8 +356,10 @@ int radeon_bo_unpin(struct radeon_bo *bo)
329 bo->pin_count--; 356 bo->pin_count--;
330 if (bo->pin_count) 357 if (bo->pin_count)
331 return 0; 358 return 0;
332 for (i = 0; i < bo->placement.num_placement; i++) 359 for (i = 0; i < bo->placement.num_placement; i++) {
333 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 360 bo->placements[i].lpfn = 0;
361 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
362 }
334 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 363 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
335 if (likely(r == 0)) { 364 if (likely(r == 0)) {
336 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 365 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
@@ -459,7 +488,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
459 u64 bytes_moved = 0, initial_bytes_moved; 488 u64 bytes_moved = 0, initial_bytes_moved;
460 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); 489 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
461 490
462 r = ttm_eu_reserve_buffers(ticket, head); 491 r = ttm_eu_reserve_buffers(ticket, head, true);
463 if (unlikely(r != 0)) { 492 if (unlikely(r != 0)) {
464 return r; 493 return r;
465 } 494 }
@@ -468,6 +497,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
468 bo = lobj->robj; 497 bo = lobj->robj;
469 if (!bo->pin_count) { 498 if (!bo->pin_count) {
470 u32 domain = lobj->prefered_domains; 499 u32 domain = lobj->prefered_domains;
500 u32 allowed = lobj->allowed_domains;
471 u32 current_domain = 501 u32 current_domain =
472 radeon_mem_type_to_domain(bo->tbo.mem.mem_type); 502 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
473 503
@@ -479,7 +509,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
479 * into account. We don't want to disallow buffer moves 509 * into account. We don't want to disallow buffer moves
480 * completely. 510 * completely.
481 */ 511 */
482 if ((lobj->allowed_domains & current_domain) != 0 && 512 if ((allowed & current_domain) != 0 &&
483 (domain & current_domain) == 0 && /* will be moved */ 513 (domain & current_domain) == 0 && /* will be moved */
484 bytes_moved > bytes_moved_threshold) { 514 bytes_moved > bytes_moved_threshold) {
485 /* don't move it */ 515 /* don't move it */
@@ -489,7 +519,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
489 retry: 519 retry:
490 radeon_ttm_placement_from_domain(bo, domain); 520 radeon_ttm_placement_from_domain(bo, domain);
491 if (ring == R600_RING_TYPE_UVD_INDEX) 521 if (ring == R600_RING_TYPE_UVD_INDEX)
492 radeon_uvd_force_into_uvd_segment(bo); 522 radeon_uvd_force_into_uvd_segment(bo, allowed);
493 523
494 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); 524 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
495 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 525 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
@@ -731,7 +761,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
731 761
732 /* hurrah the memory is not visible ! */ 762 /* hurrah the memory is not visible ! */
733 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 763 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
734 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 764 rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
735 r = ttm_bo_validate(bo, &rbo->placement, false, false); 765 r = ttm_bo_validate(bo, &rbo->placement, false, false);
736 if (unlikely(r == -ENOMEM)) { 766 if (unlikely(r == -ENOMEM)) {
737 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 767 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -755,12 +785,10 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
755 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); 785 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
756 if (unlikely(r != 0)) 786 if (unlikely(r != 0))
757 return r; 787 return r;
758 spin_lock(&bo->tbo.bdev->fence_lock);
759 if (mem_type) 788 if (mem_type)
760 *mem_type = bo->tbo.mem.mem_type; 789 *mem_type = bo->tbo.mem.mem_type;
761 if (bo->tbo.sync_obj) 790
762 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 791 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
763 spin_unlock(&bo->tbo.bdev->fence_lock);
764 ttm_bo_unreserve(&bo->tbo); 792 ttm_bo_unreserve(&bo->tbo);
765 return r; 793 return r;
766} 794}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 98a47fdf3625..1b8ec7917154 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -126,6 +126,7 @@ extern int radeon_bo_create(struct radeon_device *rdev,
126 unsigned long size, int byte_align, 126 unsigned long size, int byte_align,
127 bool kernel, u32 domain, u32 flags, 127 bool kernel, u32 domain, u32 flags,
128 struct sg_table *sg, 128 struct sg_table *sg,
129 struct reservation_object *resv,
129 struct radeon_bo **bo_ptr); 130 struct radeon_bo **bo_ptr);
130extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); 131extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
131extern void radeon_bo_kunmap(struct radeon_bo *bo); 132extern void radeon_bo_kunmap(struct radeon_bo *bo);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 164898b0010c..32522cc940a1 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1556,7 +1556,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1556 if (rdev->pm.active_crtcs & (1 << crtc)) { 1556 if (rdev->pm.active_crtcs & (1 << crtc)) {
1557 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL); 1557 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
1558 if ((vbl_status & DRM_SCANOUTPOS_VALID) && 1558 if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1559 !(vbl_status & DRM_SCANOUTPOS_INVBL)) 1559 !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1560 in_vbl = false; 1560 in_vbl = false;
1561 } 1561 }
1562 } 1562 }
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 0b16f2cbcf17..f3609c97496b 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -27,6 +27,7 @@
27 27
28#include "radeon.h" 28#include "radeon.h"
29#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
30#include <linux/dma-buf.h>
30 31
31struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj) 32struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
32{ 33{
@@ -57,15 +58,18 @@ void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
57} 58}
58 59
59struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, 60struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
60 size_t size, 61 struct dma_buf_attachment *attach,
61 struct sg_table *sg) 62 struct sg_table *sg)
62{ 63{
64 struct reservation_object *resv = attach->dmabuf->resv;
63 struct radeon_device *rdev = dev->dev_private; 65 struct radeon_device *rdev = dev->dev_private;
64 struct radeon_bo *bo; 66 struct radeon_bo *bo;
65 int ret; 67 int ret;
66 68
67 ret = radeon_bo_create(rdev, size, PAGE_SIZE, false, 69 ww_mutex_lock(&resv->lock, NULL);
68 RADEON_GEM_DOMAIN_GTT, 0, sg, &bo); 70 ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
71 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
72 ww_mutex_unlock(&resv->lock);
69 if (ret) 73 if (ret)
70 return ERR_PTR(ret); 74 return ERR_PTR(ret);
71 75
@@ -111,3 +115,13 @@ struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
111 115
112 return bo->tbo.resv; 116 return bo->tbo.resv;
113} 117}
118
119struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
120 struct drm_gem_object *gobj,
121 int flags)
122{
123 struct radeon_bo *bo = gem_to_radeon_bo(gobj);
124 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
125 return ERR_PTR(-EPERM);
126 return drm_gem_prime_export(dev, gobj, flags);
127}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index d65607902537..3d17af34afa7 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -45,27 +45,6 @@
45static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); 45static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
46 46
47/** 47/**
48 * radeon_ring_write - write a value to the ring
49 *
50 * @ring: radeon_ring structure holding ring information
51 * @v: dword (dw) value to write
52 *
53 * Write a value to the requested ring buffer (all asics).
54 */
55void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
56{
57#if DRM_DEBUG_CODE
58 if (ring->count_dw <= 0) {
59 DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
60 }
61#endif
62 ring->ring[ring->wptr++] = v;
63 ring->wptr &= ring->ptr_mask;
64 ring->count_dw--;
65 ring->ring_free_dw--;
66}
67
68/**
69 * radeon_ring_supports_scratch_reg - check if the ring supports 48 * radeon_ring_supports_scratch_reg - check if the ring supports
70 * writing to scratch registers 49 * writing to scratch registers
71 * 50 *
@@ -404,7 +383,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
404 /* Allocate ring buffer */ 383 /* Allocate ring buffer */
405 if (ring->ring_obj == NULL) { 384 if (ring->ring_obj == NULL) {
406 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, 385 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
407 RADEON_GEM_DOMAIN_GTT, 0, 386 RADEON_GEM_DOMAIN_GTT, 0, NULL,
408 NULL, &ring->ring_obj); 387 NULL, &ring->ring_obj);
409 if (r) { 388 if (r) {
410 dev_err(rdev->dev, "(%d) ring create failed\n", r); 389 dev_err(rdev->dev, "(%d) ring create failed\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index b84f97c8718c..c507896aca45 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -65,7 +65,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
65 } 65 }
66 66
67 r = radeon_bo_create(rdev, size, align, true, 67 r = radeon_bo_create(rdev, size, align, true,
68 domain, flags, NULL, &sa_manager->bo); 68 domain, flags, NULL, NULL, &sa_manager->bo);
69 if (r) { 69 if (r) {
70 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); 70 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
71 return r; 71 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index abd6753a570a..6deb08f045b7 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -96,15 +96,15 @@ bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
96} 96}
97 97
98/** 98/**
99 * radeon_semaphore_sync_to - use the semaphore to sync to a fence 99 * radeon_semaphore_sync_fence - use the semaphore to sync to a fence
100 * 100 *
101 * @semaphore: semaphore object to add fence to 101 * @semaphore: semaphore object to add fence to
102 * @fence: fence to sync to 102 * @fence: fence to sync to
103 * 103 *
104 * Sync to the fence using this semaphore object 104 * Sync to the fence using this semaphore object
105 */ 105 */
106void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, 106void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
107 struct radeon_fence *fence) 107 struct radeon_fence *fence)
108{ 108{
109 struct radeon_fence *other; 109 struct radeon_fence *other;
110 110
@@ -116,6 +116,53 @@ void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
116} 116}
117 117
118/** 118/**
119 * radeon_semaphore_sync_to - use the semaphore to sync to a reservation object
120 *
121 * @sema: semaphore object to add fence from reservation object to
122 * @resv: reservation object with embedded fence
123 * @shared: true if we should onyl sync to the exclusive fence
124 *
125 * Sync to the fence using this semaphore object
126 */
127int radeon_semaphore_sync_resv(struct radeon_device *rdev,
128 struct radeon_semaphore *sema,
129 struct reservation_object *resv,
130 bool shared)
131{
132 struct reservation_object_list *flist;
133 struct fence *f;
134 struct radeon_fence *fence;
135 unsigned i;
136 int r = 0;
137
138 /* always sync to the exclusive fence */
139 f = reservation_object_get_excl(resv);
140 fence = f ? to_radeon_fence(f) : NULL;
141 if (fence && fence->rdev == rdev)
142 radeon_semaphore_sync_fence(sema, fence);
143 else if (f)
144 r = fence_wait(f, true);
145
146 flist = reservation_object_get_list(resv);
147 if (shared || !flist || r)
148 return r;
149
150 for (i = 0; i < flist->shared_count; ++i) {
151 f = rcu_dereference_protected(flist->shared[i],
152 reservation_object_held(resv));
153 fence = to_radeon_fence(f);
154 if (fence && fence->rdev == rdev)
155 radeon_semaphore_sync_fence(sema, fence);
156 else
157 r = fence_wait(f, true);
158
159 if (r)
160 break;
161 }
162 return r;
163}
164
165/**
119 * radeon_semaphore_sync_rings - sync ring to all registered fences 166 * radeon_semaphore_sync_rings - sync ring to all registered fences
120 * 167 *
121 * @rdev: radeon_device pointer 168 * @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 23bb64fd775f..535403e0c8a2 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -30,9 +30,9 @@
30 */ 30 */
31 31
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33#include <drm/drm_buffer.h>
34#include <drm/radeon_drm.h> 33#include <drm/radeon_drm.h>
35#include "radeon_drv.h" 34#include "radeon_drv.h"
35#include "drm_buffer.h"
36 36
37/* ================================================================ 37/* ================================================================
38 * Helper functions for client state checking and fixup 38 * Helper functions for client state checking and fixup
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 17bc3dced9f1..07b506b41008 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -67,7 +67,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
67 } 67 }
68 68
69 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 69 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
70 0, NULL, &vram_obj); 70 0, NULL, NULL, &vram_obj);
71 if (r) { 71 if (r) {
72 DRM_ERROR("Failed to create VRAM object\n"); 72 DRM_ERROR("Failed to create VRAM object\n");
73 goto out_cleanup; 73 goto out_cleanup;
@@ -87,7 +87,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
87 struct radeon_fence *fence = NULL; 87 struct radeon_fence *fence = NULL;
88 88
89 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 89 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
90 RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); 90 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
91 gtt_obj + i);
91 if (r) { 92 if (r) {
92 DRM_ERROR("Failed to create GTT object %d\n", i); 93 DRM_ERROR("Failed to create GTT object %d\n", i);
93 goto out_lclean; 94 goto out_lclean;
@@ -116,11 +117,16 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
116 radeon_bo_kunmap(gtt_obj[i]); 117 radeon_bo_kunmap(gtt_obj[i]);
117 118
118 if (ring == R600_RING_TYPE_DMA_INDEX) 119 if (ring == R600_RING_TYPE_DMA_INDEX)
119 r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 120 fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
121 size / RADEON_GPU_PAGE_SIZE,
122 NULL);
120 else 123 else
121 r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 124 fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
122 if (r) { 125 size / RADEON_GPU_PAGE_SIZE,
126 NULL);
127 if (IS_ERR(fence)) {
123 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 128 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
129 r = PTR_ERR(fence);
124 goto out_lclean_unpin; 130 goto out_lclean_unpin;
125 } 131 }
126 132
@@ -162,11 +168,16 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
162 radeon_bo_kunmap(vram_obj); 168 radeon_bo_kunmap(vram_obj);
163 169
164 if (ring == R600_RING_TYPE_DMA_INDEX) 170 if (ring == R600_RING_TYPE_DMA_INDEX)
165 r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 171 fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
172 size / RADEON_GPU_PAGE_SIZE,
173 NULL);
166 else 174 else
167 r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 175 fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
168 if (r) { 176 size / RADEON_GPU_PAGE_SIZE,
177 NULL);
178 if (IS_ERR(fence)) {
169 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 179 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
180 r = PTR_ERR(fence);
170 goto out_lclean_unpin; 181 goto out_lclean_unpin;
171 } 182 }
172 183
@@ -222,7 +233,7 @@ out_lclean:
222 radeon_bo_unreserve(gtt_obj[i]); 233 radeon_bo_unreserve(gtt_obj[i]);
223 radeon_bo_unref(&gtt_obj[i]); 234 radeon_bo_unref(&gtt_obj[i]);
224 } 235 }
225 if (fence) 236 if (fence && !IS_ERR(fence))
226 radeon_fence_unref(&fence); 237 radeon_fence_unref(&fence);
227 break; 238 break;
228 } 239 }
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 72afe82a95c9..8624979afb65 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -39,6 +39,8 @@
39#include <linux/seq_file.h> 39#include <linux/seq_file.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/swiotlb.h> 41#include <linux/swiotlb.h>
42#include <linux/swap.h>
43#include <linux/pagemap.h>
42#include <linux/debugfs.h> 44#include <linux/debugfs.h>
43#include "radeon_reg.h" 45#include "radeon_reg.h"
44#include "radeon.h" 46#include "radeon.h"
@@ -176,12 +178,15 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
176static void radeon_evict_flags(struct ttm_buffer_object *bo, 178static void radeon_evict_flags(struct ttm_buffer_object *bo,
177 struct ttm_placement *placement) 179 struct ttm_placement *placement)
178{ 180{
181 static struct ttm_place placements = {
182 .fpfn = 0,
183 .lpfn = 0,
184 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
185 };
186
179 struct radeon_bo *rbo; 187 struct radeon_bo *rbo;
180 static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
181 188
182 if (!radeon_ttm_bo_is_radeon_bo(bo)) { 189 if (!radeon_ttm_bo_is_radeon_bo(bo)) {
183 placement->fpfn = 0;
184 placement->lpfn = 0;
185 placement->placement = &placements; 190 placement->placement = &placements;
186 placement->busy_placement = &placements; 191 placement->busy_placement = &placements;
187 placement->num_placement = 1; 192 placement->num_placement = 1;
@@ -228,6 +233,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
228 struct radeon_device *rdev; 233 struct radeon_device *rdev;
229 uint64_t old_start, new_start; 234 uint64_t old_start, new_start;
230 struct radeon_fence *fence; 235 struct radeon_fence *fence;
236 unsigned num_pages;
231 int r, ridx; 237 int r, ridx;
232 238
233 rdev = radeon_get_rdev(bo->bdev); 239 rdev = radeon_get_rdev(bo->bdev);
@@ -264,13 +270,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
264 270
265 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); 271 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
266 272
267 /* sync other rings */ 273 num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
268 fence = bo->sync_obj; 274 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
269 r = radeon_copy(rdev, old_start, new_start, 275 if (IS_ERR(fence))
270 new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ 276 return PTR_ERR(fence);
271 &fence); 277
272 /* FIXME: handle copy error */ 278 r = ttm_bo_move_accel_cleanup(bo, &fence->base,
273 r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
274 evict, no_wait_gpu, new_mem); 279 evict, no_wait_gpu, new_mem);
275 radeon_fence_unref(&fence); 280 radeon_fence_unref(&fence);
276 return r; 281 return r;
@@ -284,20 +289,20 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
284 struct radeon_device *rdev; 289 struct radeon_device *rdev;
285 struct ttm_mem_reg *old_mem = &bo->mem; 290 struct ttm_mem_reg *old_mem = &bo->mem;
286 struct ttm_mem_reg tmp_mem; 291 struct ttm_mem_reg tmp_mem;
287 u32 placements; 292 struct ttm_place placements;
288 struct ttm_placement placement; 293 struct ttm_placement placement;
289 int r; 294 int r;
290 295
291 rdev = radeon_get_rdev(bo->bdev); 296 rdev = radeon_get_rdev(bo->bdev);
292 tmp_mem = *new_mem; 297 tmp_mem = *new_mem;
293 tmp_mem.mm_node = NULL; 298 tmp_mem.mm_node = NULL;
294 placement.fpfn = 0;
295 placement.lpfn = 0;
296 placement.num_placement = 1; 299 placement.num_placement = 1;
297 placement.placement = &placements; 300 placement.placement = &placements;
298 placement.num_busy_placement = 1; 301 placement.num_busy_placement = 1;
299 placement.busy_placement = &placements; 302 placement.busy_placement = &placements;
300 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 303 placements.fpfn = 0;
304 placements.lpfn = 0;
305 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
301 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 306 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
302 interruptible, no_wait_gpu); 307 interruptible, no_wait_gpu);
303 if (unlikely(r)) { 308 if (unlikely(r)) {
@@ -332,19 +337,19 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
332 struct ttm_mem_reg *old_mem = &bo->mem; 337 struct ttm_mem_reg *old_mem = &bo->mem;
333 struct ttm_mem_reg tmp_mem; 338 struct ttm_mem_reg tmp_mem;
334 struct ttm_placement placement; 339 struct ttm_placement placement;
335 u32 placements; 340 struct ttm_place placements;
336 int r; 341 int r;
337 342
338 rdev = radeon_get_rdev(bo->bdev); 343 rdev = radeon_get_rdev(bo->bdev);
339 tmp_mem = *new_mem; 344 tmp_mem = *new_mem;
340 tmp_mem.mm_node = NULL; 345 tmp_mem.mm_node = NULL;
341 placement.fpfn = 0;
342 placement.lpfn = 0;
343 placement.num_placement = 1; 346 placement.num_placement = 1;
344 placement.placement = &placements; 347 placement.placement = &placements;
345 placement.num_busy_placement = 1; 348 placement.num_busy_placement = 1;
346 placement.busy_placement = &placements; 349 placement.busy_placement = &placements;
347 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 350 placements.fpfn = 0;
351 placements.lpfn = 0;
352 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
348 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 353 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
349 interruptible, no_wait_gpu); 354 interruptible, no_wait_gpu);
350 if (unlikely(r)) { 355 if (unlikely(r)) {
@@ -483,39 +488,108 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
483{ 488{
484} 489}
485 490
486static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) 491/*
487{ 492 * TTM backend functions.
488 return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); 493 */
489} 494struct radeon_ttm_tt {
495 struct ttm_dma_tt ttm;
496 struct radeon_device *rdev;
497 u64 offset;
490 498
491static int radeon_sync_obj_flush(void *sync_obj) 499 uint64_t userptr;
500 struct mm_struct *usermm;
501 uint32_t userflags;
502};
503
504/* prepare the sg table with the user pages */
505static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
492{ 506{
507 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
508 struct radeon_ttm_tt *gtt = (void *)ttm;
509 unsigned pinned = 0, nents;
510 int r;
511
512 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
513 enum dma_data_direction direction = write ?
514 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
515
516 if (current->mm != gtt->usermm)
517 return -EPERM;
518
519 if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
520 /* check that we only pin down anonymous memory
521 to prevent problems with writeback */
522 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
523 struct vm_area_struct *vma;
524 vma = find_vma(gtt->usermm, gtt->userptr);
525 if (!vma || vma->vm_file || vma->vm_end < end)
526 return -EPERM;
527 }
528
529 do {
530 unsigned num_pages = ttm->num_pages - pinned;
531 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
532 struct page **pages = ttm->pages + pinned;
533
534 r = get_user_pages(current, current->mm, userptr, num_pages,
535 write, 0, pages, NULL);
536 if (r < 0)
537 goto release_pages;
538
539 pinned += r;
540
541 } while (pinned < ttm->num_pages);
542
543 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
544 ttm->num_pages << PAGE_SHIFT,
545 GFP_KERNEL);
546 if (r)
547 goto release_sg;
548
549 r = -ENOMEM;
550 nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
551 if (nents != ttm->sg->nents)
552 goto release_sg;
553
554 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
555 gtt->ttm.dma_address, ttm->num_pages);
556
493 return 0; 557 return 0;
494}
495 558
496static void radeon_sync_obj_unref(void **sync_obj) 559release_sg:
497{ 560 kfree(ttm->sg);
498 radeon_fence_unref((struct radeon_fence **)sync_obj);
499}
500 561
501static void *radeon_sync_obj_ref(void *sync_obj) 562release_pages:
502{ 563 release_pages(ttm->pages, pinned, 0);
503 return radeon_fence_ref((struct radeon_fence *)sync_obj); 564 return r;
504} 565}
505 566
506static bool radeon_sync_obj_signaled(void *sync_obj) 567static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
507{ 568{
508 return radeon_fence_signaled((struct radeon_fence *)sync_obj); 569 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
509} 570 struct radeon_ttm_tt *gtt = (void *)ttm;
571 struct scatterlist *sg;
572 int i;
510 573
511/* 574 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
512 * TTM backend functions. 575 enum dma_data_direction direction = write ?
513 */ 576 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
514struct radeon_ttm_tt { 577
515 struct ttm_dma_tt ttm; 578 /* free the sg table and pages again */
516 struct radeon_device *rdev; 579 dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
517 u64 offset; 580
518}; 581 for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) {
582 struct page *page = sg_page(sg);
583
584 if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
585 set_page_dirty(page);
586
587 mark_page_accessed(page);
588 page_cache_release(page);
589 }
590
591 sg_free_table(ttm->sg);
592}
519 593
520static int radeon_ttm_backend_bind(struct ttm_tt *ttm, 594static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
521 struct ttm_mem_reg *bo_mem) 595 struct ttm_mem_reg *bo_mem)
@@ -525,6 +599,11 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
525 RADEON_GART_PAGE_WRITE; 599 RADEON_GART_PAGE_WRITE;
526 int r; 600 int r;
527 601
602 if (gtt->userptr) {
603 radeon_ttm_tt_pin_userptr(ttm);
604 flags &= ~RADEON_GART_PAGE_WRITE;
605 }
606
528 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); 607 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
529 if (!ttm->num_pages) { 608 if (!ttm->num_pages) {
530 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", 609 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
@@ -547,6 +626,10 @@ static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
547 struct radeon_ttm_tt *gtt = (void *)ttm; 626 struct radeon_ttm_tt *gtt = (void *)ttm;
548 627
549 radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); 628 radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
629
630 if (gtt->userptr)
631 radeon_ttm_tt_unpin_userptr(ttm);
632
550 return 0; 633 return 0;
551} 634}
552 635
@@ -592,10 +675,17 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
592 return &gtt->ttm.ttm; 675 return &gtt->ttm.ttm;
593} 676}
594 677
678static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
679{
680 if (!ttm || ttm->func != &radeon_backend_func)
681 return NULL;
682 return (struct radeon_ttm_tt *)ttm;
683}
684
595static int radeon_ttm_tt_populate(struct ttm_tt *ttm) 685static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
596{ 686{
687 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
597 struct radeon_device *rdev; 688 struct radeon_device *rdev;
598 struct radeon_ttm_tt *gtt = (void *)ttm;
599 unsigned i; 689 unsigned i;
600 int r; 690 int r;
601 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 691 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -603,6 +693,16 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
603 if (ttm->state != tt_unpopulated) 693 if (ttm->state != tt_unpopulated)
604 return 0; 694 return 0;
605 695
696 if (gtt && gtt->userptr) {
697 ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL);
698 if (!ttm->sg)
699 return -ENOMEM;
700
701 ttm->page_flags |= TTM_PAGE_FLAG_SG;
702 ttm->state = tt_unbound;
703 return 0;
704 }
705
606 if (slave && ttm->sg) { 706 if (slave && ttm->sg) {
607 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, 707 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
608 gtt->ttm.dma_address, ttm->num_pages); 708 gtt->ttm.dma_address, ttm->num_pages);
@@ -648,10 +748,16 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
648static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) 748static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
649{ 749{
650 struct radeon_device *rdev; 750 struct radeon_device *rdev;
651 struct radeon_ttm_tt *gtt = (void *)ttm; 751 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
652 unsigned i; 752 unsigned i;
653 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 753 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
654 754
755 if (gtt && gtt->userptr) {
756 kfree(ttm->sg);
757 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
758 return;
759 }
760
655 if (slave) 761 if (slave)
656 return; 762 return;
657 763
@@ -680,6 +786,40 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
680 ttm_pool_unpopulate(ttm); 786 ttm_pool_unpopulate(ttm);
681} 787}
682 788
789int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
790 uint32_t flags)
791{
792 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
793
794 if (gtt == NULL)
795 return -EINVAL;
796
797 gtt->userptr = addr;
798 gtt->usermm = current->mm;
799 gtt->userflags = flags;
800 return 0;
801}
802
803bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
804{
805 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
806
807 if (gtt == NULL)
808 return false;
809
810 return !!gtt->userptr;
811}
812
813bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
814{
815 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
816
817 if (gtt == NULL)
818 return false;
819
820 return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
821}
822
683static struct ttm_bo_driver radeon_bo_driver = { 823static struct ttm_bo_driver radeon_bo_driver = {
684 .ttm_tt_create = &radeon_ttm_tt_create, 824 .ttm_tt_create = &radeon_ttm_tt_create,
685 .ttm_tt_populate = &radeon_ttm_tt_populate, 825 .ttm_tt_populate = &radeon_ttm_tt_populate,
@@ -689,11 +829,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
689 .evict_flags = &radeon_evict_flags, 829 .evict_flags = &radeon_evict_flags,
690 .move = &radeon_bo_move, 830 .move = &radeon_bo_move,
691 .verify_access = &radeon_verify_access, 831 .verify_access = &radeon_verify_access,
692 .sync_obj_signaled = &radeon_sync_obj_signaled,
693 .sync_obj_wait = &radeon_sync_obj_wait,
694 .sync_obj_flush = &radeon_sync_obj_flush,
695 .sync_obj_unref = &radeon_sync_obj_unref,
696 .sync_obj_ref = &radeon_sync_obj_ref,
697 .move_notify = &radeon_bo_move_notify, 832 .move_notify = &radeon_bo_move_notify,
698 .fault_reserve_notify = &radeon_bo_fault_reserve_notify, 833 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
699 .io_mem_reserve = &radeon_ttm_io_mem_reserve, 834 .io_mem_reserve = &radeon_ttm_io_mem_reserve,
@@ -730,7 +865,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
730 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 865 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
731 866
732 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, 867 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
733 RADEON_GEM_DOMAIN_VRAM, 0, 868 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
734 NULL, &rdev->stollen_vga_memory); 869 NULL, &rdev->stollen_vga_memory);
735 if (r) { 870 if (r) {
736 return r; 871 return r;
@@ -828,7 +963,7 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
828 int r; 963 int r;
829 964
830 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { 965 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
831 return drm_mmap(filp, vma); 966 return -EINVAL;
832 } 967 }
833 968
834 file_priv = filp->private_data; 969 file_priv = filp->private_data;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 341848a14376..11b662469253 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -40,12 +40,18 @@
40#define UVD_IDLE_TIMEOUT_MS 1000 40#define UVD_IDLE_TIMEOUT_MS 1000
41 41
42/* Firmware Names */ 42/* Firmware Names */
43#define FIRMWARE_R600 "radeon/R600_uvd.bin"
44#define FIRMWARE_RS780 "radeon/RS780_uvd.bin"
45#define FIRMWARE_RV770 "radeon/RV770_uvd.bin"
43#define FIRMWARE_RV710 "radeon/RV710_uvd.bin" 46#define FIRMWARE_RV710 "radeon/RV710_uvd.bin"
44#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin" 47#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
45#define FIRMWARE_SUMO "radeon/SUMO_uvd.bin" 48#define FIRMWARE_SUMO "radeon/SUMO_uvd.bin"
46#define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin" 49#define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin"
47#define FIRMWARE_BONAIRE "radeon/BONAIRE_uvd.bin" 50#define FIRMWARE_BONAIRE "radeon/BONAIRE_uvd.bin"
48 51
52MODULE_FIRMWARE(FIRMWARE_R600);
53MODULE_FIRMWARE(FIRMWARE_RS780);
54MODULE_FIRMWARE(FIRMWARE_RV770);
49MODULE_FIRMWARE(FIRMWARE_RV710); 55MODULE_FIRMWARE(FIRMWARE_RV710);
50MODULE_FIRMWARE(FIRMWARE_CYPRESS); 56MODULE_FIRMWARE(FIRMWARE_CYPRESS);
51MODULE_FIRMWARE(FIRMWARE_SUMO); 57MODULE_FIRMWARE(FIRMWARE_SUMO);
@@ -63,6 +69,23 @@ int radeon_uvd_init(struct radeon_device *rdev)
63 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); 69 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
64 70
65 switch (rdev->family) { 71 switch (rdev->family) {
72 case CHIP_RV610:
73 case CHIP_RV630:
74 case CHIP_RV670:
75 case CHIP_RV620:
76 case CHIP_RV635:
77 fw_name = FIRMWARE_R600;
78 break;
79
80 case CHIP_RS780:
81 case CHIP_RS880:
82 fw_name = FIRMWARE_RS780;
83 break;
84
85 case CHIP_RV770:
86 fw_name = FIRMWARE_RV770;
87 break;
88
66 case CHIP_RV710: 89 case CHIP_RV710:
67 case CHIP_RV730: 90 case CHIP_RV730:
68 case CHIP_RV740: 91 case CHIP_RV740:
@@ -115,9 +138,11 @@ int radeon_uvd_init(struct radeon_device *rdev)
115 } 138 }
116 139
117 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + 140 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
118 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; 141 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
142 RADEON_GPU_PAGE_SIZE;
119 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, 143 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
120 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo); 144 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
145 NULL, &rdev->uvd.vcpu_bo);
121 if (r) { 146 if (r) {
122 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); 147 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
123 return r; 148 return r;
@@ -231,10 +256,30 @@ int radeon_uvd_resume(struct radeon_device *rdev)
231 return 0; 256 return 0;
232} 257}
233 258
234void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo) 259void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
260 uint32_t allowed_domains)
235{ 261{
236 rbo->placement.fpfn = 0 >> PAGE_SHIFT; 262 int i;
237 rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; 263
264 for (i = 0; i < rbo->placement.num_placement; ++i) {
265 rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
266 rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
267 }
268
269 /* If it must be in VRAM it must be in the first segment as well */
270 if (allowed_domains == RADEON_GEM_DOMAIN_VRAM)
271 return;
272
273 /* abort if we already have more than one placement */
274 if (rbo->placement.num_placement > 1)
275 return;
276
277 /* add another 256MB segment */
278 rbo->placements[1] = rbo->placements[0];
279 rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
280 rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
281 rbo->placement.num_placement++;
282 rbo->placement.num_busy_placement++;
238} 283}
239 284
240void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) 285void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
@@ -356,6 +401,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
356{ 401{
357 int32_t *msg, msg_type, handle; 402 int32_t *msg, msg_type, handle;
358 unsigned img_size = 0; 403 unsigned img_size = 0;
404 struct fence *f;
359 void *ptr; 405 void *ptr;
360 406
361 int i, r; 407 int i, r;
@@ -365,8 +411,9 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
365 return -EINVAL; 411 return -EINVAL;
366 } 412 }
367 413
368 if (bo->tbo.sync_obj) { 414 f = reservation_object_get_excl(bo->tbo.resv);
369 r = radeon_fence_wait(bo->tbo.sync_obj, false); 415 if (f) {
416 r = radeon_fence_wait((struct radeon_fence *)f, false);
370 if (r) { 417 if (r) {
371 DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); 418 DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
372 return r; 419 return r;
@@ -604,38 +651,16 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
604} 651}
605 652
606static int radeon_uvd_send_msg(struct radeon_device *rdev, 653static int radeon_uvd_send_msg(struct radeon_device *rdev,
607 int ring, struct radeon_bo *bo, 654 int ring, uint64_t addr,
608 struct radeon_fence **fence) 655 struct radeon_fence **fence)
609{ 656{
610 struct ttm_validate_buffer tv;
611 struct ww_acquire_ctx ticket;
612 struct list_head head;
613 struct radeon_ib ib; 657 struct radeon_ib ib;
614 uint64_t addr;
615 int i, r; 658 int i, r;
616 659
617 memset(&tv, 0, sizeof(tv));
618 tv.bo = &bo->tbo;
619
620 INIT_LIST_HEAD(&head);
621 list_add(&tv.head, &head);
622
623 r = ttm_eu_reserve_buffers(&ticket, &head);
624 if (r)
625 return r;
626
627 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
628 radeon_uvd_force_into_uvd_segment(bo);
629
630 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
631 if (r)
632 goto err;
633
634 r = radeon_ib_get(rdev, ring, &ib, NULL, 64); 660 r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
635 if (r) 661 if (r)
636 goto err; 662 return r;
637 663
638 addr = radeon_bo_gpu_offset(bo);
639 ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0); 664 ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
640 ib.ptr[1] = addr; 665 ib.ptr[1] = addr;
641 ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0); 666 ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
@@ -647,19 +672,11 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
647 ib.length_dw = 16; 672 ib.length_dw = 16;
648 673
649 r = radeon_ib_schedule(rdev, &ib, NULL, false); 674 r = radeon_ib_schedule(rdev, &ib, NULL, false);
650 if (r)
651 goto err;
652 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
653 675
654 if (fence) 676 if (fence)
655 *fence = radeon_fence_ref(ib.fence); 677 *fence = radeon_fence_ref(ib.fence);
656 678
657 radeon_ib_free(rdev, &ib); 679 radeon_ib_free(rdev, &ib);
658 radeon_bo_unref(&bo);
659 return 0;
660
661err:
662 ttm_eu_backoff_reservation(&ticket, &head);
663 return r; 680 return r;
664} 681}
665 682
@@ -669,27 +686,18 @@ err:
669int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, 686int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
670 uint32_t handle, struct radeon_fence **fence) 687 uint32_t handle, struct radeon_fence **fence)
671{ 688{
672 struct radeon_bo *bo; 689 /* we use the last page of the vcpu bo for the UVD message */
673 uint32_t *msg; 690 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
674 int r, i; 691 RADEON_GPU_PAGE_SIZE;
675 692
676 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, 693 uint32_t *msg = rdev->uvd.cpu_addr + offs;
677 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo); 694 uint64_t addr = rdev->uvd.gpu_addr + offs;
678 if (r)
679 return r;
680 695
681 r = radeon_bo_reserve(bo, false); 696 int r, i;
682 if (r) {
683 radeon_bo_unref(&bo);
684 return r;
685 }
686 697
687 r = radeon_bo_kmap(bo, (void **)&msg); 698 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
688 if (r) { 699 if (r)
689 radeon_bo_unreserve(bo);
690 radeon_bo_unref(&bo);
691 return r; 700 return r;
692 }
693 701
694 /* stitch together an UVD create msg */ 702 /* stitch together an UVD create msg */
695 msg[0] = cpu_to_le32(0x00000de4); 703 msg[0] = cpu_to_le32(0x00000de4);
@@ -706,36 +714,26 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
706 for (i = 11; i < 1024; ++i) 714 for (i = 11; i < 1024; ++i)
707 msg[i] = cpu_to_le32(0x0); 715 msg[i] = cpu_to_le32(0x0);
708 716
709 radeon_bo_kunmap(bo); 717 r = radeon_uvd_send_msg(rdev, ring, addr, fence);
710 radeon_bo_unreserve(bo); 718 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
711 719 return r;
712 return radeon_uvd_send_msg(rdev, ring, bo, fence);
713} 720}
714 721
715int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, 722int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
716 uint32_t handle, struct radeon_fence **fence) 723 uint32_t handle, struct radeon_fence **fence)
717{ 724{
718 struct radeon_bo *bo; 725 /* we use the last page of the vcpu bo for the UVD message */
719 uint32_t *msg; 726 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
720 int r, i; 727 RADEON_GPU_PAGE_SIZE;
721 728
722 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, 729 uint32_t *msg = rdev->uvd.cpu_addr + offs;
723 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo); 730 uint64_t addr = rdev->uvd.gpu_addr + offs;
724 if (r)
725 return r;
726 731
727 r = radeon_bo_reserve(bo, false); 732 int r, i;
728 if (r) {
729 radeon_bo_unref(&bo);
730 return r;
731 }
732 733
733 r = radeon_bo_kmap(bo, (void **)&msg); 734 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
734 if (r) { 735 if (r)
735 radeon_bo_unreserve(bo);
736 radeon_bo_unref(&bo);
737 return r; 736 return r;
738 }
739 737
740 /* stitch together an UVD destroy msg */ 738 /* stitch together an UVD destroy msg */
741 msg[0] = cpu_to_le32(0x00000de4); 739 msg[0] = cpu_to_le32(0x00000de4);
@@ -745,10 +743,9 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
745 for (i = 4; i < 1024; ++i) 743 for (i = 4; i < 1024; ++i)
746 msg[i] = cpu_to_le32(0x0); 744 msg[i] = cpu_to_le32(0x0);
747 745
748 radeon_bo_kunmap(bo); 746 r = radeon_uvd_send_msg(rdev, ring, addr, fence);
749 radeon_bo_unreserve(bo); 747 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
750 748 return r;
751 return radeon_uvd_send_msg(rdev, ring, bo, fence);
752} 749}
753 750
754/** 751/**
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index c7190aadbd89..9e85757d5599 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -126,7 +126,8 @@ int radeon_vce_init(struct radeon_device *rdev)
126 size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) + 126 size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
127 RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE; 127 RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
128 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 128 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
129 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo); 129 RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL,
130 &rdev->vce.vcpu_bo);
130 if (r) { 131 if (r) {
131 dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r); 132 dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
132 return r; 133 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 088ffdc2f577..4532cc76a0a6 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -143,6 +143,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
143 list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; 143 list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
144 list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; 144 list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
145 list[0].tv.bo = &vm->page_directory->tbo; 145 list[0].tv.bo = &vm->page_directory->tbo;
146 list[0].tv.shared = false;
146 list[0].tiling_flags = 0; 147 list[0].tiling_flags = 0;
147 list[0].handle = 0; 148 list[0].handle = 0;
148 list_add(&list[0].tv.head, head); 149 list_add(&list[0].tv.head, head);
@@ -156,6 +157,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
156 list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; 157 list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
157 list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; 158 list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
158 list[idx].tv.bo = &list[idx].robj->tbo; 159 list[idx].tv.bo = &list[idx].robj->tbo;
160 list[idx].tv.shared = false;
159 list[idx].tiling_flags = 0; 161 list[idx].tiling_flags = 0;
160 list[idx].handle = 0; 162 list[idx].handle = 0;
161 list_add(&list[idx++].tv.head, head); 163 list_add(&list[idx++].tv.head, head);
@@ -395,11 +397,12 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
395 397
396 memset(&tv, 0, sizeof(tv)); 398 memset(&tv, 0, sizeof(tv));
397 tv.bo = &bo->tbo; 399 tv.bo = &bo->tbo;
400 tv.shared = false;
398 401
399 INIT_LIST_HEAD(&head); 402 INIT_LIST_HEAD(&head);
400 list_add(&tv.head, &head); 403 list_add(&tv.head, &head);
401 404
402 r = ttm_eu_reserve_buffers(&ticket, &head); 405 r = ttm_eu_reserve_buffers(&ticket, &head, true);
403 if (r) 406 if (r)
404 return r; 407 return r;
405 408
@@ -424,7 +427,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
424 if (r) 427 if (r)
425 goto error; 428 goto error;
426 429
427 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); 430 ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
428 radeon_ib_free(rdev, &ib); 431 radeon_ib_free(rdev, &ib);
429 432
430 return 0; 433 return 0;
@@ -545,7 +548,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
545 548
546 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, 549 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
547 RADEON_GPU_PAGE_SIZE, true, 550 RADEON_GPU_PAGE_SIZE, true,
548 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt); 551 RADEON_GEM_DOMAIN_VRAM, 0,
552 NULL, NULL, &pt);
549 if (r) 553 if (r)
550 return r; 554 return r;
551 555
@@ -694,8 +698,9 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
694 698
695 if (ib.length_dw != 0) { 699 if (ib.length_dw != 0) {
696 radeon_asic_vm_pad_ib(rdev, &ib); 700 radeon_asic_vm_pad_ib(rdev, &ib);
697 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); 701
698 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); 702 radeon_semaphore_sync_resv(rdev, ib.semaphore, pd->tbo.resv, false);
703 radeon_semaphore_sync_fence(ib.semaphore, vm->last_id_use);
699 WARN_ON(ib.length_dw > ndw); 704 WARN_ON(ib.length_dw > ndw);
700 r = radeon_ib_schedule(rdev, &ib, NULL, false); 705 r = radeon_ib_schedule(rdev, &ib, NULL, false);
701 if (r) { 706 if (r) {
@@ -821,7 +826,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
821 unsigned nptes; 826 unsigned nptes;
822 uint64_t pte; 827 uint64_t pte;
823 828
824 radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj); 829 radeon_semaphore_sync_resv(rdev, ib->semaphore, pt->tbo.resv, false);
825 830
826 if ((addr & ~mask) == (end & ~mask)) 831 if ((addr & ~mask) == (end & ~mask))
827 nptes = end - addr; 832 nptes = end - addr;
@@ -892,6 +897,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
892 bo_va->flags &= ~RADEON_VM_PAGE_VALID; 897 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
893 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; 898 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
894 bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; 899 bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
900 if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm))
901 bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
902
895 if (mem) { 903 if (mem) {
896 addr = mem->start << PAGE_SHIFT; 904 addr = mem->start << PAGE_SHIFT;
897 if (mem->mem_type != TTM_PL_SYSTEM) { 905 if (mem->mem_type != TTM_PL_SYSTEM) {
@@ -960,7 +968,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
960 radeon_asic_vm_pad_ib(rdev, &ib); 968 radeon_asic_vm_pad_ib(rdev, &ib);
961 WARN_ON(ib.length_dw > ndw); 969 WARN_ON(ib.length_dw > ndw);
962 970
963 radeon_semaphore_sync_to(ib.semaphore, vm->fence); 971 radeon_semaphore_sync_fence(ib.semaphore, vm->fence);
964 r = radeon_ib_schedule(rdev, &ib, NULL, false); 972 r = radeon_ib_schedule(rdev, &ib, NULL, false);
965 if (r) { 973 if (r) {
966 radeon_ib_free(rdev, &ib); 974 radeon_ib_free(rdev, &ib);
@@ -1120,7 +1128,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1120 1128
1121 r = radeon_bo_create(rdev, pd_size, align, true, 1129 r = radeon_bo_create(rdev, pd_size, align, true,
1122 RADEON_GEM_DOMAIN_VRAM, 0, NULL, 1130 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
1123 &vm->page_directory); 1131 NULL, &vm->page_directory);
1124 if (r) 1132 if (r)
1125 return r; 1133 return r;
1126 1134
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index d9f5ce715c9b..372016e266d0 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -26,7 +26,6 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/firmware.h> 28#include <linux/firmware.h>
29#include <linux/platform_device.h>
30#include <linux/slab.h> 29#include <linux/slab.h>
31#include <drm/drmP.h> 30#include <drm/drmP.h>
32#include "radeon.h" 31#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index 74426ac2bb5c..7f34bad2e724 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -33,18 +33,19 @@
33 * @src_offset: src GPU address 33 * @src_offset: src GPU address
34 * @dst_offset: dst GPU address 34 * @dst_offset: dst GPU address
35 * @num_gpu_pages: number of GPU pages to xfer 35 * @num_gpu_pages: number of GPU pages to xfer
36 * @fence: radeon fence object 36 * @resv: reservation object to sync to
37 * 37 *
38 * Copy GPU paging using the DMA engine (r7xx). 38 * Copy GPU paging using the DMA engine (r7xx).
39 * Used by the radeon ttm implementation to move pages if 39 * Used by the radeon ttm implementation to move pages if
40 * registered as the asic copy callback. 40 * registered as the asic copy callback.
41 */ 41 */
42int rv770_copy_dma(struct radeon_device *rdev, 42struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
43 uint64_t src_offset, uint64_t dst_offset, 43 uint64_t src_offset, uint64_t dst_offset,
44 unsigned num_gpu_pages, 44 unsigned num_gpu_pages,
45 struct radeon_fence **fence) 45 struct reservation_object *resv)
46{ 46{
47 struct radeon_semaphore *sem = NULL; 47 struct radeon_semaphore *sem = NULL;
48 struct radeon_fence *fence;
48 int ring_index = rdev->asic->copy.dma_ring_index; 49 int ring_index = rdev->asic->copy.dma_ring_index;
49 struct radeon_ring *ring = &rdev->ring[ring_index]; 50 struct radeon_ring *ring = &rdev->ring[ring_index];
50 u32 size_in_dw, cur_size_in_dw; 51 u32 size_in_dw, cur_size_in_dw;
@@ -54,7 +55,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
54 r = radeon_semaphore_create(rdev, &sem); 55 r = radeon_semaphore_create(rdev, &sem);
55 if (r) { 56 if (r) {
56 DRM_ERROR("radeon: moving bo (%d).\n", r); 57 DRM_ERROR("radeon: moving bo (%d).\n", r);
57 return r; 58 return ERR_PTR(r);
58 } 59 }
59 60
60 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 61 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
@@ -63,10 +64,10 @@ int rv770_copy_dma(struct radeon_device *rdev,
63 if (r) { 64 if (r) {
64 DRM_ERROR("radeon: moving bo (%d).\n", r); 65 DRM_ERROR("radeon: moving bo (%d).\n", r);
65 radeon_semaphore_free(rdev, &sem, NULL); 66 radeon_semaphore_free(rdev, &sem, NULL);
66 return r; 67 return ERR_PTR(r);
67 } 68 }
68 69
69 radeon_semaphore_sync_to(sem, *fence); 70 radeon_semaphore_sync_resv(rdev, sem, resv, false);
70 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 71 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
71 72
72 for (i = 0; i < num_loops; i++) { 73 for (i = 0; i < num_loops; i++) {
@@ -83,15 +84,15 @@ int rv770_copy_dma(struct radeon_device *rdev,
83 dst_offset += cur_size_in_dw * 4; 84 dst_offset += cur_size_in_dw * 4;
84 } 85 }
85 86
86 r = radeon_fence_emit(rdev, fence, ring->idx); 87 r = radeon_fence_emit(rdev, &fence, ring->idx);
87 if (r) { 88 if (r) {
88 radeon_ring_unlock_undo(rdev, ring); 89 radeon_ring_unlock_undo(rdev, ring);
89 radeon_semaphore_free(rdev, &sem, NULL); 90 radeon_semaphore_free(rdev, &sem, NULL);
90 return r; 91 return ERR_PTR(r);
91 } 92 }
92 93
93 radeon_ring_unlock_commit(rdev, ring, false); 94 radeon_ring_unlock_commit(rdev, ring, false);
94 radeon_semaphore_free(rdev, &sem, *fence); 95 radeon_semaphore_free(rdev, &sem, fence);
95 96
96 return r; 97 return fence;
97} 98}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 3a0b973e8a96..eeea5b6a1775 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4684,7 +4684,7 @@ static int si_vm_packet3_compute_check(struct radeon_device *rdev,
4684int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) 4684int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4685{ 4685{
4686 int ret = 0; 4686 int ret = 0;
4687 u32 idx = 0; 4687 u32 idx = 0, i;
4688 struct radeon_cs_packet pkt; 4688 struct radeon_cs_packet pkt;
4689 4689
4690 do { 4690 do {
@@ -4695,6 +4695,12 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4695 switch (pkt.type) { 4695 switch (pkt.type) {
4696 case RADEON_PACKET_TYPE0: 4696 case RADEON_PACKET_TYPE0:
4697 dev_err(rdev->dev, "Packet0 not allowed!\n"); 4697 dev_err(rdev->dev, "Packet0 not allowed!\n");
4698 for (i = 0; i < ib->length_dw; i++) {
4699 if (i == idx)
4700 printk("\t0x%08x <---\n", ib->ptr[i]);
4701 else
4702 printk("\t0x%08x\n", ib->ptr[i]);
4703 }
4698 ret = -EINVAL; 4704 ret = -EINVAL;
4699 break; 4705 break;
4700 case RADEON_PACKET_TYPE2: 4706 case RADEON_PACKET_TYPE2:
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 7c22baaf94db..b58f12b762d7 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -218,18 +218,19 @@ void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
218 * @src_offset: src GPU address 218 * @src_offset: src GPU address
219 * @dst_offset: dst GPU address 219 * @dst_offset: dst GPU address
220 * @num_gpu_pages: number of GPU pages to xfer 220 * @num_gpu_pages: number of GPU pages to xfer
221 * @fence: radeon fence object 221 * @resv: reservation object to sync to
222 * 222 *
223 * Copy GPU paging using the DMA engine (SI). 223 * Copy GPU paging using the DMA engine (SI).
224 * Used by the radeon ttm implementation to move pages if 224 * Used by the radeon ttm implementation to move pages if
225 * registered as the asic copy callback. 225 * registered as the asic copy callback.
226 */ 226 */
227int si_copy_dma(struct radeon_device *rdev, 227struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
228 uint64_t src_offset, uint64_t dst_offset, 228 uint64_t src_offset, uint64_t dst_offset,
229 unsigned num_gpu_pages, 229 unsigned num_gpu_pages,
230 struct radeon_fence **fence) 230 struct reservation_object *resv)
231{ 231{
232 struct radeon_semaphore *sem = NULL; 232 struct radeon_semaphore *sem = NULL;
233 struct radeon_fence *fence;
233 int ring_index = rdev->asic->copy.dma_ring_index; 234 int ring_index = rdev->asic->copy.dma_ring_index;
234 struct radeon_ring *ring = &rdev->ring[ring_index]; 235 struct radeon_ring *ring = &rdev->ring[ring_index];
235 u32 size_in_bytes, cur_size_in_bytes; 236 u32 size_in_bytes, cur_size_in_bytes;
@@ -239,7 +240,7 @@ int si_copy_dma(struct radeon_device *rdev,
239 r = radeon_semaphore_create(rdev, &sem); 240 r = radeon_semaphore_create(rdev, &sem);
240 if (r) { 241 if (r) {
241 DRM_ERROR("radeon: moving bo (%d).\n", r); 242 DRM_ERROR("radeon: moving bo (%d).\n", r);
242 return r; 243 return ERR_PTR(r);
243 } 244 }
244 245
245 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 246 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
@@ -248,10 +249,10 @@ int si_copy_dma(struct radeon_device *rdev,
248 if (r) { 249 if (r) {
249 DRM_ERROR("radeon: moving bo (%d).\n", r); 250 DRM_ERROR("radeon: moving bo (%d).\n", r);
250 radeon_semaphore_free(rdev, &sem, NULL); 251 radeon_semaphore_free(rdev, &sem, NULL);
251 return r; 252 return ERR_PTR(r);
252 } 253 }
253 254
254 radeon_semaphore_sync_to(sem, *fence); 255 radeon_semaphore_sync_resv(rdev, sem, resv, false);
255 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 256 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
256 257
257 for (i = 0; i < num_loops; i++) { 258 for (i = 0; i < num_loops; i++) {
@@ -268,16 +269,16 @@ int si_copy_dma(struct radeon_device *rdev,
268 dst_offset += cur_size_in_bytes; 269 dst_offset += cur_size_in_bytes;
269 } 270 }
270 271
271 r = radeon_fence_emit(rdev, fence, ring->idx); 272 r = radeon_fence_emit(rdev, &fence, ring->idx);
272 if (r) { 273 if (r) {
273 radeon_ring_unlock_undo(rdev, ring); 274 radeon_ring_unlock_undo(rdev, ring);
274 radeon_semaphore_free(rdev, &sem, NULL); 275 radeon_semaphore_free(rdev, &sem, NULL);
275 return r; 276 return ERR_PTR(r);
276 } 277 }
277 278
278 radeon_ring_unlock_commit(rdev, ring, false); 279 radeon_ring_unlock_commit(rdev, ring, false);
279 radeon_semaphore_free(rdev, &sem, *fence); 280 radeon_semaphore_free(rdev, &sem, fence);
280 281
281 return r; 282 return fence;
282} 283}
283 284
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 70e61ffeace2..9e4d5d7d348f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2916,7 +2916,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2916 bool disable_sclk_switching = false; 2916 bool disable_sclk_switching = false;
2917 u32 mclk, sclk; 2917 u32 mclk, sclk;
2918 u16 vddc, vddci; 2918 u16 vddc, vddci;
2919 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2920 int i; 2919 int i;
2921 2920
2922 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2921 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -2950,29 +2949,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2950 } 2949 }
2951 } 2950 }
2952 2951
2953 /* limit clocks to max supported clocks based on voltage dependency tables */
2954 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2955 &max_sclk_vddc);
2956 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2957 &max_mclk_vddci);
2958 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2959 &max_mclk_vddc);
2960
2961 for (i = 0; i < ps->performance_level_count; i++) {
2962 if (max_sclk_vddc) {
2963 if (ps->performance_levels[i].sclk > max_sclk_vddc)
2964 ps->performance_levels[i].sclk = max_sclk_vddc;
2965 }
2966 if (max_mclk_vddci) {
2967 if (ps->performance_levels[i].mclk > max_mclk_vddci)
2968 ps->performance_levels[i].mclk = max_mclk_vddci;
2969 }
2970 if (max_mclk_vddc) {
2971 if (ps->performance_levels[i].mclk > max_mclk_vddc)
2972 ps->performance_levels[i].mclk = max_mclk_vddc;
2973 }
2974 }
2975
2976 /* XXX validate the min clocks required for display */ 2952 /* XXX validate the min clocks required for display */
2977 2953
2978 if (disable_mclk_switching) { 2954 if (disable_mclk_switching) {
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index fd414d34d885..6635da9ec986 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -736,7 +736,7 @@
736# define DESCRIPTION16(x) (((x) & 0xff) << 0) 736# define DESCRIPTION16(x) (((x) & 0xff) << 0)
737# define DESCRIPTION17(x) (((x) & 0xff) << 8) 737# define DESCRIPTION17(x) (((x) & 0xff) << 8)
738 738
739#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54 739#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54
740# define AUDIO_ENABLED (1 << 31) 740# define AUDIO_ENABLED (1 << 31)
741 741
742#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56 742#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index cda391347286..e72b3cb59358 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -22,6 +22,7 @@
22 * Authors: Christian König <christian.koenig@amd.com> 22 * Authors: Christian König <christian.koenig@amd.com>
23 */ 23 */
24 24
25#include <linux/firmware.h>
25#include <drm/drmP.h> 26#include <drm/drmP.h>
26#include "radeon.h" 27#include "radeon.h"
27#include "radeon_asic.h" 28#include "radeon_asic.h"
@@ -70,6 +71,82 @@ void uvd_v1_0_set_wptr(struct radeon_device *rdev,
70} 71}
71 72
72/** 73/**
74 * uvd_v1_0_fence_emit - emit an fence & trap command
75 *
76 * @rdev: radeon_device pointer
77 * @fence: fence to emit
78 *
79 * Write a fence and a trap command to the ring.
80 */
81void uvd_v1_0_fence_emit(struct radeon_device *rdev,
82 struct radeon_fence *fence)
83{
84 struct radeon_ring *ring = &rdev->ring[fence->ring];
85 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
86
87 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
88 radeon_ring_write(ring, addr & 0xffffffff);
89 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
90 radeon_ring_write(ring, fence->seq);
91 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
92 radeon_ring_write(ring, 0);
93
94 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
95 radeon_ring_write(ring, 0);
96 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
97 radeon_ring_write(ring, 0);
98 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
99 radeon_ring_write(ring, 2);
100 return;
101}
102
103/**
104 * uvd_v1_0_resume - memory controller programming
105 *
106 * @rdev: radeon_device pointer
107 *
108 * Let the UVD memory controller know it's offsets
109 */
110int uvd_v1_0_resume(struct radeon_device *rdev)
111{
112 uint64_t addr;
113 uint32_t size;
114 int r;
115
116 r = radeon_uvd_resume(rdev);
117 if (r)
118 return r;
119
120 /* programm the VCPU memory controller bits 0-27 */
121 addr = (rdev->uvd.gpu_addr >> 3) + 16;
122 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size) >> 3;
123 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
124 WREG32(UVD_VCPU_CACHE_SIZE0, size);
125
126 addr += size;
127 size = RADEON_UVD_STACK_SIZE >> 3;
128 WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
129 WREG32(UVD_VCPU_CACHE_SIZE1, size);
130
131 addr += size;
132 size = RADEON_UVD_HEAP_SIZE >> 3;
133 WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
134 WREG32(UVD_VCPU_CACHE_SIZE2, size);
135
136 /* bits 28-31 */
137 addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
138 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
139
140 /* bits 32-39 */
141 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
142 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
143
144 WREG32(UVD_FW_START, *((uint32_t*)rdev->uvd.cpu_addr));
145
146 return 0;
147}
148
149/**
73 * uvd_v1_0_init - start and test UVD block 150 * uvd_v1_0_init - start and test UVD block
74 * 151 *
75 * @rdev: radeon_device pointer 152 * @rdev: radeon_device pointer
@@ -130,8 +207,32 @@ done:
130 /* lower clocks again */ 207 /* lower clocks again */
131 radeon_set_uvd_clocks(rdev, 0, 0); 208 radeon_set_uvd_clocks(rdev, 0, 0);
132 209
133 if (!r) 210 if (!r) {
211 switch (rdev->family) {
212 case CHIP_RV610:
213 case CHIP_RV630:
214 case CHIP_RV620:
215 /* 64byte granularity workaround */
216 WREG32(MC_CONFIG, 0);
217 WREG32(MC_CONFIG, 1 << 4);
218 WREG32(RS_DQ_RD_RET_CONF, 0x3f);
219 WREG32(MC_CONFIG, 0x1f);
220
221 /* fall through */
222 case CHIP_RV670:
223 case CHIP_RV635:
224
225 /* write clean workaround */
226 WREG32_P(UVD_VCPU_CNTL, 0x10, ~0x10);
227 break;
228
229 default:
230 /* TODO: Do we need more? */
231 break;
232 }
233
134 DRM_INFO("UVD initialized successfully.\n"); 234 DRM_INFO("UVD initialized successfully.\n");
235 }
135 236
136 return r; 237 return r;
137} 238}
@@ -218,12 +319,12 @@ int uvd_v1_0_start(struct radeon_device *rdev)
218 /* enable UMC */ 319 /* enable UMC */
219 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); 320 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
220 321
322 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
323
221 /* boot up the VCPU */ 324 /* boot up the VCPU */
222 WREG32(UVD_SOFT_RESET, 0); 325 WREG32(UVD_SOFT_RESET, 0);
223 mdelay(10); 326 mdelay(10);
224 327
225 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
226
227 for (i = 0; i < 10; ++i) { 328 for (i = 0; i < 10; ++i) {
228 uint32_t status; 329 uint32_t status;
229 for (j = 0; j < 100; ++j) { 330 for (j = 0; j < 100; ++j) {
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 8bfdadd56598..89193519f8a1 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -72,6 +72,10 @@ int uvd_v2_2_resume(struct radeon_device *rdev)
72 uint32_t chip_id, size; 72 uint32_t chip_id, size;
73 int r; 73 int r;
74 74
75 /* RV770 uses V1.0 MC */
76 if (rdev->family == CHIP_RV770)
77 return uvd_v1_0_resume(rdev);
78
75 r = radeon_uvd_resume(rdev); 79 r = radeon_uvd_resume(rdev);
76 if (r) 80 if (r)
77 return r; 81 return r;
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 2e3d7b5b0ad7..c96f6089f8bf 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -6,6 +6,7 @@ config DRM_RCAR_DU
6 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
7 select DRM_GEM_CMA_HELPER 7 select DRM_GEM_CMA_HELPER
8 select DRM_KMS_FB_HELPER 8 select DRM_KMS_FB_HELPER
9 select VIDEOMODE_HELPERS
9 help 10 help
10 Choose this option if you have an R-Car chipset. 11 Choose this option if you have an R-Car chipset.
11 If M is selected the module will be called rcar-du-drm. 12 If M is selected the module will be called rcar-du-drm.
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 299267db2898..148b50589181 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_crtc.c -- R-Car Display Unit CRTCs 2 * rcar_du_crtc.c -- R-Car Display Unit CRTCs
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 43e7575c700c..e97ae502dec5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_crtc.h -- R-Car Display Unit CRTCs 2 * rcar_du_crtc.h -- R-Car Display Unit CRTCs
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index fda64b7b73e8..d212efa6a495 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_drv.c -- R-Car Display Unit DRM driver 2 * rcar_du_drv.c -- R-Car Display Unit DRM driver
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
@@ -15,6 +15,7 @@
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/of_device.h>
18#include <linux/platform_device.h> 19#include <linux/platform_device.h>
19#include <linux/pm.h> 20#include <linux/pm.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
@@ -30,6 +31,97 @@
30#include "rcar_du_regs.h" 31#include "rcar_du_regs.h"
31 32
32/* ----------------------------------------------------------------------------- 33/* -----------------------------------------------------------------------------
34 * Device Information
35 */
36
37static const struct rcar_du_device_info rcar_du_r8a7779_info = {
38 .features = 0,
39 .num_crtcs = 2,
40 .routes = {
41 /* R8A7779 has two RGB outputs and one (currently unsupported)
42 * TCON output.
43 */
44 [RCAR_DU_OUTPUT_DPAD0] = {
45 .possible_crtcs = BIT(0),
46 .encoder_type = DRM_MODE_ENCODER_NONE,
47 .port = 0,
48 },
49 [RCAR_DU_OUTPUT_DPAD1] = {
50 .possible_crtcs = BIT(1) | BIT(0),
51 .encoder_type = DRM_MODE_ENCODER_NONE,
52 .port = 1,
53 },
54 },
55 .num_lvds = 0,
56};
57
58static const struct rcar_du_device_info rcar_du_r8a7790_info = {
59 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
60 .quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES,
61 .num_crtcs = 3,
62 .routes = {
63 /* R8A7790 has one RGB output, two LVDS outputs and one
64 * (currently unsupported) TCON output.
65 */
66 [RCAR_DU_OUTPUT_DPAD0] = {
67 .possible_crtcs = BIT(2) | BIT(1) | BIT(0),
68 .encoder_type = DRM_MODE_ENCODER_NONE,
69 .port = 0,
70 },
71 [RCAR_DU_OUTPUT_LVDS0] = {
72 .possible_crtcs = BIT(0),
73 .encoder_type = DRM_MODE_ENCODER_LVDS,
74 .port = 1,
75 },
76 [RCAR_DU_OUTPUT_LVDS1] = {
77 .possible_crtcs = BIT(2) | BIT(1),
78 .encoder_type = DRM_MODE_ENCODER_LVDS,
79 .port = 2,
80 },
81 },
82 .num_lvds = 2,
83};
84
85static const struct rcar_du_device_info rcar_du_r8a7791_info = {
86 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
87 .num_crtcs = 2,
88 .routes = {
89 /* R8A7791 has one RGB output, one LVDS output and one
90 * (currently unsupported) TCON output.
91 */
92 [RCAR_DU_OUTPUT_DPAD0] = {
93 .possible_crtcs = BIT(1),
94 .encoder_type = DRM_MODE_ENCODER_NONE,
95 .port = 0,
96 },
97 [RCAR_DU_OUTPUT_LVDS0] = {
98 .possible_crtcs = BIT(0),
99 .encoder_type = DRM_MODE_ENCODER_LVDS,
100 .port = 1,
101 },
102 },
103 .num_lvds = 1,
104};
105
106static const struct platform_device_id rcar_du_id_table[] = {
107 { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info },
108 { "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info },
109 { "rcar-du-r8a7791", (kernel_ulong_t)&rcar_du_r8a7791_info },
110 { }
111};
112
113MODULE_DEVICE_TABLE(platform, rcar_du_id_table);
114
115static const struct of_device_id rcar_du_of_table[] = {
116 { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
117 { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
118 { .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
119 { }
120};
121
122MODULE_DEVICE_TABLE(of, rcar_du_of_table);
123
124/* -----------------------------------------------------------------------------
33 * DRM operations 125 * DRM operations
34 */ 126 */
35 127
@@ -53,12 +145,13 @@ static int rcar_du_unload(struct drm_device *dev)
53static int rcar_du_load(struct drm_device *dev, unsigned long flags) 145static int rcar_du_load(struct drm_device *dev, unsigned long flags)
54{ 146{
55 struct platform_device *pdev = dev->platformdev; 147 struct platform_device *pdev = dev->platformdev;
148 struct device_node *np = pdev->dev.of_node;
56 struct rcar_du_platform_data *pdata = pdev->dev.platform_data; 149 struct rcar_du_platform_data *pdata = pdev->dev.platform_data;
57 struct rcar_du_device *rcdu; 150 struct rcar_du_device *rcdu;
58 struct resource *mem; 151 struct resource *mem;
59 int ret; 152 int ret;
60 153
61 if (pdata == NULL) { 154 if (pdata == NULL && np == NULL) {
62 dev_err(dev->dev, "no platform data\n"); 155 dev_err(dev->dev, "no platform data\n");
63 return -ENODEV; 156 return -ENODEV;
64 } 157 }
@@ -71,7 +164,8 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
71 164
72 rcdu->dev = &pdev->dev; 165 rcdu->dev = &pdev->dev;
73 rcdu->pdata = pdata; 166 rcdu->pdata = pdata;
74 rcdu->info = (struct rcar_du_device_info *)pdev->id_entry->driver_data; 167 rcdu->info = np ? of_match_device(rcar_du_of_table, rcdu->dev)->data
168 : (void *)platform_get_device_id(pdev)->driver_data;
75 rcdu->ddev = dev; 169 rcdu->ddev = dev;
76 dev->dev_private = rcdu; 170 dev->dev_private = rcdu;
77 171
@@ -158,6 +252,7 @@ static struct drm_driver rcar_du_driver = {
158 .unload = rcar_du_unload, 252 .unload = rcar_du_unload,
159 .preclose = rcar_du_preclose, 253 .preclose = rcar_du_preclose,
160 .lastclose = rcar_du_lastclose, 254 .lastclose = rcar_du_lastclose,
255 .set_busid = drm_platform_set_busid,
161 .get_vblank_counter = drm_vblank_count, 256 .get_vblank_counter = drm_vblank_count,
162 .enable_vblank = rcar_du_enable_vblank, 257 .enable_vblank = rcar_du_enable_vblank,
163 .disable_vblank = rcar_du_disable_vblank, 258 .disable_vblank = rcar_du_disable_vblank,
@@ -231,77 +326,6 @@ static int rcar_du_remove(struct platform_device *pdev)
231 return 0; 326 return 0;
232} 327}
233 328
234static const struct rcar_du_device_info rcar_du_r8a7779_info = {
235 .features = 0,
236 .num_crtcs = 2,
237 .routes = {
238 /* R8A7779 has two RGB outputs and one (currently unsupported)
239 * TCON output.
240 */
241 [RCAR_DU_OUTPUT_DPAD0] = {
242 .possible_crtcs = BIT(0),
243 .encoder_type = DRM_MODE_ENCODER_NONE,
244 },
245 [RCAR_DU_OUTPUT_DPAD1] = {
246 .possible_crtcs = BIT(1) | BIT(0),
247 .encoder_type = DRM_MODE_ENCODER_NONE,
248 },
249 },
250 .num_lvds = 0,
251};
252
253static const struct rcar_du_device_info rcar_du_r8a7790_info = {
254 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
255 .quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES,
256 .num_crtcs = 3,
257 .routes = {
258 /* R8A7790 has one RGB output, two LVDS outputs and one
259 * (currently unsupported) TCON output.
260 */
261 [RCAR_DU_OUTPUT_DPAD0] = {
262 .possible_crtcs = BIT(2) | BIT(1) | BIT(0),
263 .encoder_type = DRM_MODE_ENCODER_NONE,
264 },
265 [RCAR_DU_OUTPUT_LVDS0] = {
266 .possible_crtcs = BIT(0),
267 .encoder_type = DRM_MODE_ENCODER_LVDS,
268 },
269 [RCAR_DU_OUTPUT_LVDS1] = {
270 .possible_crtcs = BIT(2) | BIT(1),
271 .encoder_type = DRM_MODE_ENCODER_LVDS,
272 },
273 },
274 .num_lvds = 2,
275};
276
277static const struct rcar_du_device_info rcar_du_r8a7791_info = {
278 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
279 .num_crtcs = 2,
280 .routes = {
281 /* R8A7791 has one RGB output, one LVDS output and one
282 * (currently unsupported) TCON output.
283 */
284 [RCAR_DU_OUTPUT_DPAD0] = {
285 .possible_crtcs = BIT(1),
286 .encoder_type = DRM_MODE_ENCODER_NONE,
287 },
288 [RCAR_DU_OUTPUT_LVDS0] = {
289 .possible_crtcs = BIT(0),
290 .encoder_type = DRM_MODE_ENCODER_LVDS,
291 },
292 },
293 .num_lvds = 1,
294};
295
296static const struct platform_device_id rcar_du_id_table[] = {
297 { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info },
298 { "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info },
299 { "rcar-du-r8a7791", (kernel_ulong_t)&rcar_du_r8a7791_info },
300 { }
301};
302
303MODULE_DEVICE_TABLE(platform, rcar_du_id_table);
304
305static struct platform_driver rcar_du_platform_driver = { 329static struct platform_driver rcar_du_platform_driver = {
306 .probe = rcar_du_probe, 330 .probe = rcar_du_probe,
307 .remove = rcar_du_remove, 331 .remove = rcar_du_remove,
@@ -309,6 +333,7 @@ static struct platform_driver rcar_du_platform_driver = {
309 .owner = THIS_MODULE, 333 .owner = THIS_MODULE,
310 .name = "rcar-du", 334 .name = "rcar-du",
311 .pm = &rcar_du_pm_ops, 335 .pm = &rcar_du_pm_ops,
336 .of_match_table = rcar_du_of_table,
312 }, 337 },
313 .id_table = rcar_du_id_table, 338 .id_table = rcar_du_id_table,
314}; 339};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index e31b735d3f25..8e494633c3b3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_drv.h -- R-Car Display Unit DRM driver 2 * rcar_du_drv.h -- R-Car Display Unit DRM driver
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
@@ -37,6 +37,7 @@ struct rcar_du_lvdsenc;
37 * struct rcar_du_output_routing - Output routing specification 37 * struct rcar_du_output_routing - Output routing specification
38 * @possible_crtcs: bitmask of possible CRTCs for the output 38 * @possible_crtcs: bitmask of possible CRTCs for the output
39 * @encoder_type: DRM type of the internal encoder associated with the output 39 * @encoder_type: DRM type of the internal encoder associated with the output
40 * @port: device tree port number corresponding to this output route
40 * 41 *
41 * The DU has 5 possible outputs (DPAD0/1, LVDS0/1, TCON). Output routing data 42 * The DU has 5 possible outputs (DPAD0/1, LVDS0/1, TCON). Output routing data
42 * specify the valid SoC outputs, which CRTCs can drive the output, and the type 43 * specify the valid SoC outputs, which CRTCs can drive the output, and the type
@@ -45,6 +46,7 @@ struct rcar_du_lvdsenc;
45struct rcar_du_output_routing { 46struct rcar_du_output_routing {
46 unsigned int possible_crtcs; 47 unsigned int possible_crtcs;
47 unsigned int encoder_type; 48 unsigned int encoder_type;
49 unsigned int port;
48}; 50};
49 51
50/* 52/*
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 3daa7a168dc6..7c0ec95915ef 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_encoder.c -- R-Car Display Unit Encoder 2 * rcar_du_encoder.c -- R-Car Display Unit Encoder
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
@@ -142,7 +142,8 @@ static const struct drm_encoder_funcs encoder_funcs = {
142int rcar_du_encoder_init(struct rcar_du_device *rcdu, 142int rcar_du_encoder_init(struct rcar_du_device *rcdu,
143 enum rcar_du_encoder_type type, 143 enum rcar_du_encoder_type type,
144 enum rcar_du_output output, 144 enum rcar_du_output output,
145 const struct rcar_du_encoder_data *data) 145 const struct rcar_du_encoder_data *data,
146 struct device_node *np)
146{ 147{
147 struct rcar_du_encoder *renc; 148 struct rcar_du_encoder *renc;
148 unsigned int encoder_type; 149 unsigned int encoder_type;
@@ -189,9 +190,11 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
189 drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs); 190 drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
190 191
191 switch (encoder_type) { 192 switch (encoder_type) {
192 case DRM_MODE_ENCODER_LVDS: 193 case DRM_MODE_ENCODER_LVDS: {
193 return rcar_du_lvds_connector_init(rcdu, renc, 194 const struct rcar_du_panel_data *pdata =
194 &data->connector.lvds.panel); 195 data ? &data->connector.lvds.panel : NULL;
196 return rcar_du_lvds_connector_init(rcdu, renc, pdata, np);
197 }
195 198
196 case DRM_MODE_ENCODER_DAC: 199 case DRM_MODE_ENCODER_DAC:
197 return rcar_du_vga_connector_init(rcdu, renc); 200 return rcar_du_vga_connector_init(rcdu, renc);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index 0e5a65e45d0e..bd624135ef1f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_encoder.h -- R-Car Display Unit Encoder 2 * rcar_du_encoder.h -- R-Car Display Unit Encoder
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
@@ -44,6 +44,7 @@ rcar_du_connector_best_encoder(struct drm_connector *connector);
44int rcar_du_encoder_init(struct rcar_du_device *rcdu, 44int rcar_du_encoder_init(struct rcar_du_device *rcdu,
45 enum rcar_du_encoder_type type, 45 enum rcar_du_encoder_type type,
46 enum rcar_du_output output, 46 enum rcar_du_output output,
47 const struct rcar_du_encoder_data *data); 47 const struct rcar_du_encoder_data *data,
48 struct device_node *np);
48 49
49#endif /* __RCAR_DU_ENCODER_H__ */ 50#endif /* __RCAR_DU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index eb53cd97e8c6..4e7614b145db 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_group.c -- R-Car Display Unit Channels Pair 2 * rcar_du_group.c -- R-Car Display Unit Channels Pair
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
index 5025930972ec..0c38cdcda4ca 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_group.c -- R-Car Display Unit Planes and CRTCs Group 2 * rcar_du_group.c -- R-Car Display Unit Planes and CRTCs Group
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 76026104d000..6c24ad7d03ef 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_kms.c -- R-Car Display Unit Mode Setting 2 * rcar_du_kms.c -- R-Car Display Unit Mode Setting
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
@@ -17,6 +17,8 @@
17#include <drm/drm_fb_cma_helper.h> 17#include <drm/drm_fb_cma_helper.h>
18#include <drm/drm_gem_cma_helper.h> 18#include <drm/drm_gem_cma_helper.h>
19 19
20#include <linux/of_graph.h>
21
20#include "rcar_du_crtc.h" 22#include "rcar_du_crtc.h"
21#include "rcar_du_drv.h" 23#include "rcar_du_drv.h"
22#include "rcar_du_encoder.h" 24#include "rcar_du_encoder.h"
@@ -188,6 +190,205 @@ static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
188 .output_poll_changed = rcar_du_output_poll_changed, 190 .output_poll_changed = rcar_du_output_poll_changed,
189}; 191};
190 192
193static int rcar_du_encoders_init_pdata(struct rcar_du_device *rcdu)
194{
195 unsigned int num_encoders = 0;
196 unsigned int i;
197 int ret;
198
199 for (i = 0; i < rcdu->pdata->num_encoders; ++i) {
200 const struct rcar_du_encoder_data *pdata =
201 &rcdu->pdata->encoders[i];
202 const struct rcar_du_output_routing *route =
203 &rcdu->info->routes[pdata->output];
204
205 if (pdata->type == RCAR_DU_ENCODER_UNUSED)
206 continue;
207
208 if (pdata->output >= RCAR_DU_OUTPUT_MAX ||
209 route->possible_crtcs == 0) {
210 dev_warn(rcdu->dev,
211 "encoder %u references unexisting output %u, skipping\n",
212 i, pdata->output);
213 continue;
214 }
215
216 ret = rcar_du_encoder_init(rcdu, pdata->type, pdata->output,
217 pdata, NULL);
218 if (ret < 0)
219 return ret;
220
221 num_encoders++;
222 }
223
224 return num_encoders;
225}
226
227static int rcar_du_encoders_init_dt_one(struct rcar_du_device *rcdu,
228 enum rcar_du_output output,
229 struct of_endpoint *ep)
230{
231 static const struct {
232 const char *compatible;
233 enum rcar_du_encoder_type type;
234 } encoders[] = {
235 { "adi,adv7123", RCAR_DU_ENCODER_VGA },
236 { "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS },
237 };
238
239 enum rcar_du_encoder_type enc_type = RCAR_DU_ENCODER_NONE;
240 struct device_node *connector = NULL;
241 struct device_node *encoder = NULL;
242 struct device_node *prev = NULL;
243 struct device_node *entity_ep_node;
244 struct device_node *entity;
245 int ret;
246
247 /*
248 * Locate the connected entity and infer its type from the number of
249 * endpoints.
250 */
251 entity = of_graph_get_remote_port_parent(ep->local_node);
252 if (!entity) {
253 dev_dbg(rcdu->dev, "unconnected endpoint %s, skipping\n",
254 ep->local_node->full_name);
255 return 0;
256 }
257
258 entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0);
259
260 while (1) {
261 struct device_node *ep_node;
262
263 ep_node = of_graph_get_next_endpoint(entity, prev);
264 of_node_put(prev);
265 prev = ep_node;
266
267 if (!ep_node)
268 break;
269
270 if (ep_node == entity_ep_node)
271 continue;
272
273 /*
274 * We've found one endpoint other than the input, this must
275 * be an encoder. Locate the connector.
276 */
277 encoder = entity;
278 connector = of_graph_get_remote_port_parent(ep_node);
279 of_node_put(ep_node);
280
281 if (!connector) {
282 dev_warn(rcdu->dev,
283 "no connector for encoder %s, skipping\n",
284 encoder->full_name);
285 of_node_put(entity_ep_node);
286 of_node_put(encoder);
287 return 0;
288 }
289
290 break;
291 }
292
293 of_node_put(entity_ep_node);
294
295 if (encoder) {
296 /*
297 * If an encoder has been found, get its type based on its
298 * compatible string.
299 */
300 unsigned int i;
301
302 for (i = 0; i < ARRAY_SIZE(encoders); ++i) {
303 if (of_device_is_compatible(encoder,
304 encoders[i].compatible)) {
305 enc_type = encoders[i].type;
306 break;
307 }
308 }
309
310 if (i == ARRAY_SIZE(encoders)) {
311 dev_warn(rcdu->dev,
312 "unknown encoder type for %s, skipping\n",
313 encoder->full_name);
314 of_node_put(encoder);
315 of_node_put(connector);
316 return 0;
317 }
318 } else {
319 /*
320 * If no encoder has been found the entity must be the
321 * connector.
322 */
323 connector = entity;
324 }
325
326 ret = rcar_du_encoder_init(rcdu, enc_type, output, NULL, connector);
327 of_node_put(encoder);
328 of_node_put(connector);
329
330 return ret < 0 ? ret : 1;
331}
332
333static int rcar_du_encoders_init_dt(struct rcar_du_device *rcdu)
334{
335 struct device_node *np = rcdu->dev->of_node;
336 struct device_node *prev = NULL;
337 unsigned int num_encoders = 0;
338
339 /*
340 * Iterate over the endpoints and create one encoder for each output
341 * pipeline.
342 */
343 while (1) {
344 struct device_node *ep_node;
345 enum rcar_du_output output;
346 struct of_endpoint ep;
347 unsigned int i;
348 int ret;
349
350 ep_node = of_graph_get_next_endpoint(np, prev);
351 of_node_put(prev);
352 prev = ep_node;
353
354 if (ep_node == NULL)
355 break;
356
357 ret = of_graph_parse_endpoint(ep_node, &ep);
358 if (ret < 0) {
359 of_node_put(ep_node);
360 return ret;
361 }
362
363 /* Find the output route corresponding to the port number. */
364 for (i = 0; i < RCAR_DU_OUTPUT_MAX; ++i) {
365 if (rcdu->info->routes[i].possible_crtcs &&
366 rcdu->info->routes[i].port == ep.port) {
367 output = i;
368 break;
369 }
370 }
371
372 if (i == RCAR_DU_OUTPUT_MAX) {
373 dev_warn(rcdu->dev,
374 "port %u references unexisting output, skipping\n",
375 ep.port);
376 continue;
377 }
378
379 /* Process the output pipeline. */
380 ret = rcar_du_encoders_init_dt_one(rcdu, output, &ep);
381 if (ret < 0) {
382 of_node_put(ep_node);
383 return ret;
384 }
385
386 num_encoders += ret;
387 }
388
389 return num_encoders;
390}
391
191int rcar_du_modeset_init(struct rcar_du_device *rcdu) 392int rcar_du_modeset_init(struct rcar_du_device *rcdu)
192{ 393{
193 static const unsigned int mmio_offsets[] = { 394 static const unsigned int mmio_offsets[] = {
@@ -197,6 +398,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
197 struct drm_device *dev = rcdu->ddev; 398 struct drm_device *dev = rcdu->ddev;
198 struct drm_encoder *encoder; 399 struct drm_encoder *encoder;
199 struct drm_fbdev_cma *fbdev; 400 struct drm_fbdev_cma *fbdev;
401 unsigned int num_encoders;
200 unsigned int num_groups; 402 unsigned int num_groups;
201 unsigned int i; 403 unsigned int i;
202 int ret; 404 int ret;
@@ -240,28 +442,15 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
240 if (ret < 0) 442 if (ret < 0)
241 return ret; 443 return ret;
242 444
243 for (i = 0; i < rcdu->pdata->num_encoders; ++i) { 445 if (rcdu->pdata)
244 const struct rcar_du_encoder_data *pdata = 446 ret = rcar_du_encoders_init_pdata(rcdu);
245 &rcdu->pdata->encoders[i]; 447 else
246 const struct rcar_du_output_routing *route = 448 ret = rcar_du_encoders_init_dt(rcdu);
247 &rcdu->info->routes[pdata->output];
248
249 if (pdata->type == RCAR_DU_ENCODER_UNUSED)
250 continue;
251 449
252 if (pdata->output >= RCAR_DU_OUTPUT_MAX || 450 if (ret < 0)
253 route->possible_crtcs == 0) { 451 return ret;
254 dev_warn(rcdu->dev,
255 "encoder %u references unexisting output %u, skipping\n",
256 i, pdata->output);
257 continue;
258 }
259 452
260 ret = rcar_du_encoder_init(rcdu, pdata->type, pdata->output, 453 num_encoders = ret;
261 pdata);
262 if (ret < 0)
263 return ret;
264 }
265 454
266 /* Set the possible CRTCs and possible clones. There's always at least 455 /* Set the possible CRTCs and possible clones. There's always at least
267 * one way for all encoders to clone each other, set all bits in the 456 * one way for all encoders to clone each other, set all bits in the
@@ -273,7 +462,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
273 &rcdu->info->routes[renc->output]; 462 &rcdu->info->routes[renc->output];
274 463
275 encoder->possible_crtcs = route->possible_crtcs; 464 encoder->possible_crtcs = route->possible_crtcs;
276 encoder->possible_clones = (1 << rcdu->pdata->num_encoders) - 1; 465 encoder->possible_clones = (1 << num_encoders) - 1;
277 } 466 }
278 467
279 /* Now that the CRTCs have been initialized register the planes. */ 468 /* Now that the CRTCs have been initialized register the planes. */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
index 5750e6af5655..07951d5fe38b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_kms.h -- R-Car Display Unit Mode Setting 2 * rcar_du_kms.h -- R-Car Display Unit Mode Setting
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 21426bd234eb..115eed20db12 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_lvdscon.c -- R-Car Display Unit LVDS Connector 2 * rcar_du_lvdscon.c -- R-Car Display Unit LVDS Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
@@ -15,6 +15,10 @@
15#include <drm/drm_crtc.h> 15#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17 17
18#include <video/display_timing.h>
19#include <video/of_display_timing.h>
20#include <video/videomode.h>
21
18#include "rcar_du_drv.h" 22#include "rcar_du_drv.h"
19#include "rcar_du_encoder.h" 23#include "rcar_du_encoder.h"
20#include "rcar_du_kms.h" 24#include "rcar_du_kms.h"
@@ -23,7 +27,7 @@
23struct rcar_du_lvds_connector { 27struct rcar_du_lvds_connector {
24 struct rcar_du_connector connector; 28 struct rcar_du_connector connector;
25 29
26 const struct rcar_du_panel_data *panel; 30 struct rcar_du_panel_data panel;
27}; 31};
28 32
29#define to_rcar_lvds_connector(c) \ 33#define to_rcar_lvds_connector(c) \
@@ -40,18 +44,9 @@ static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
40 return 0; 44 return 0;
41 45
42 mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; 46 mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
43 mode->clock = lvdscon->panel->mode.clock; 47
44 mode->hdisplay = lvdscon->panel->mode.hdisplay; 48 drm_display_mode_from_videomode(&lvdscon->panel.mode, mode);
45 mode->hsync_start = lvdscon->panel->mode.hsync_start; 49
46 mode->hsync_end = lvdscon->panel->mode.hsync_end;
47 mode->htotal = lvdscon->panel->mode.htotal;
48 mode->vdisplay = lvdscon->panel->mode.vdisplay;
49 mode->vsync_start = lvdscon->panel->mode.vsync_start;
50 mode->vsync_end = lvdscon->panel->mode.vsync_end;
51 mode->vtotal = lvdscon->panel->mode.vtotal;
52 mode->flags = lvdscon->panel->mode.flags;
53
54 drm_mode_set_name(mode);
55 drm_mode_probed_add(connector, mode); 50 drm_mode_probed_add(connector, mode);
56 51
57 return 1; 52 return 1;
@@ -83,7 +78,8 @@ static const struct drm_connector_funcs connector_funcs = {
83 78
84int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, 79int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
85 struct rcar_du_encoder *renc, 80 struct rcar_du_encoder *renc,
86 const struct rcar_du_panel_data *panel) 81 const struct rcar_du_panel_data *panel,
82 /* TODO const */ struct device_node *np)
87{ 83{
88 struct rcar_du_lvds_connector *lvdscon; 84 struct rcar_du_lvds_connector *lvdscon;
89 struct drm_connector *connector; 85 struct drm_connector *connector;
@@ -93,11 +89,24 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
93 if (lvdscon == NULL) 89 if (lvdscon == NULL)
94 return -ENOMEM; 90 return -ENOMEM;
95 91
96 lvdscon->panel = panel; 92 if (panel) {
93 lvdscon->panel = *panel;
94 } else {
95 struct display_timing timing;
96
97 ret = of_get_display_timing(np, "panel-timing", &timing);
98 if (ret < 0)
99 return ret;
100
101 videomode_from_timing(&timing, &lvdscon->panel.mode);
102
103 of_property_read_u32(np, "width-mm", &lvdscon->panel.width_mm);
104 of_property_read_u32(np, "height-mm", &lvdscon->panel.height_mm);
105 }
97 106
98 connector = &lvdscon->connector.connector; 107 connector = &lvdscon->connector.connector;
99 connector->display_info.width_mm = panel->width_mm; 108 connector->display_info.width_mm = lvdscon->panel.width_mm;
100 connector->display_info.height_mm = panel->height_mm; 109 connector->display_info.height_mm = lvdscon->panel.height_mm;
101 110
102 ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs, 111 ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
103 DRM_MODE_CONNECTOR_LVDS); 112 DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
index bff8683699ca..d11424d537f9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_lvdscon.h -- R-Car Display Unit LVDS Connector 2 * rcar_du_lvdscon.h -- R-Car Display Unit LVDS Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
@@ -20,6 +20,7 @@ struct rcar_du_panel_data;
20 20
21int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, 21int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
22 struct rcar_du_encoder *renc, 22 struct rcar_du_encoder *renc,
23 const struct rcar_du_panel_data *panel); 23 const struct rcar_du_panel_data *panel,
24 struct device_node *np);
24 25
25#endif /* __RCAR_DU_LVDSCON_H__ */ 26#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index df30a075d793..7cfb48ce1791 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_lvdsenc.c -- R-Car Display Unit LVDS Encoder 2 * rcar_du_lvdsenc.c -- R-Car Display Unit LVDS Encoder
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
index 7051c6de19ae..3303a55cec79 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_lvdsenc.h -- R-Car Display Unit LVDS Encoder 2 * rcar_du_lvdsenc.h -- R-Car Display Unit LVDS Encoder
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 3fb69d9ae61b..72a7cb47bd9f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_plane.c -- R-Car Display Unit Planes 2 * rcar_du_plane.c -- R-Car Display Unit Planes
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index f94f9ce84998..3021288b1a89 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_plane.h -- R-Car Display Unit Planes 2 * rcar_du_plane.h -- R-Car Display Unit Planes
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 8af3944d31b9..564a723ede03 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_vgacon.c -- R-Car Display Unit VGA Connector 2 * rcar_du_vgacon.c -- R-Car Display Unit VGA Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
index b12b0cf7f117..112f50316e01 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * rcar_du_vgacon.h -- R-Car Display Unit VGA Connector 2 * rcar_du_vgacon.h -- R-Car Display Unit VGA Connector
3 * 3 *
4 * Copyright (C) 2013 Renesas Corporation 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 * 5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index c97cdc9ab239..d47dff95fe52 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -556,7 +556,7 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
556/* 556/*
557 * Initialize mappings. On Savage4 and SavageIX the alignment 557 * Initialize mappings. On Savage4 and SavageIX the alignment
558 * and size of the aperture is not suitable for automatic MTRR setup 558 * and size of the aperture is not suitable for automatic MTRR setup
559 * in drm_addmap. Therefore we add them manually before the maps are 559 * in drm_legacy_addmap. Therefore we add them manually before the maps are
560 * initialized, and tear them down on last close. 560 * initialized, and tear them down on last close.
561 */ 561 */
562int savage_driver_firstopen(struct drm_device *dev) 562int savage_driver_firstopen(struct drm_device *dev)
@@ -624,19 +624,20 @@ int savage_driver_firstopen(struct drm_device *dev)
624 /* Automatic MTRR setup will do the right thing. */ 624 /* Automatic MTRR setup will do the right thing. */
625 } 625 }
626 626
627 ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS, 627 ret = drm_legacy_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE,
628 _DRM_READ_ONLY, &dev_priv->mmio); 628 _DRM_REGISTERS, _DRM_READ_ONLY,
629 &dev_priv->mmio);
629 if (ret) 630 if (ret)
630 return ret; 631 return ret;
631 632
632 ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER, 633 ret = drm_legacy_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
633 _DRM_WRITE_COMBINING, &dev_priv->fb); 634 _DRM_WRITE_COMBINING, &dev_priv->fb);
634 if (ret) 635 if (ret)
635 return ret; 636 return ret;
636 637
637 ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE, 638 ret = drm_legacy_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
638 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, 639 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
639 &dev_priv->aperture); 640 &dev_priv->aperture);
640 return ret; 641 return ret;
641} 642}
642 643
@@ -698,14 +699,14 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
698 dev_priv->texture_offset = init->texture_offset; 699 dev_priv->texture_offset = init->texture_offset;
699 dev_priv->texture_size = init->texture_size; 700 dev_priv->texture_size = init->texture_size;
700 701
701 dev_priv->sarea = drm_getsarea(dev); 702 dev_priv->sarea = drm_legacy_getsarea(dev);
702 if (!dev_priv->sarea) { 703 if (!dev_priv->sarea) {
703 DRM_ERROR("could not find sarea!\n"); 704 DRM_ERROR("could not find sarea!\n");
704 savage_do_cleanup_bci(dev); 705 savage_do_cleanup_bci(dev);
705 return -EINVAL; 706 return -EINVAL;
706 } 707 }
707 if (init->status_offset != 0) { 708 if (init->status_offset != 0) {
708 dev_priv->status = drm_core_findmap(dev, init->status_offset); 709 dev_priv->status = drm_legacy_findmap(dev, init->status_offset);
709 if (!dev_priv->status) { 710 if (!dev_priv->status) {
710 DRM_ERROR("could not find shadow status region!\n"); 711 DRM_ERROR("could not find shadow status region!\n");
711 savage_do_cleanup_bci(dev); 712 savage_do_cleanup_bci(dev);
@@ -716,14 +717,14 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
716 } 717 }
717 if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) { 718 if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
718 dev->agp_buffer_token = init->buffers_offset; 719 dev->agp_buffer_token = init->buffers_offset;
719 dev->agp_buffer_map = drm_core_findmap(dev, 720 dev->agp_buffer_map = drm_legacy_findmap(dev,
720 init->buffers_offset); 721 init->buffers_offset);
721 if (!dev->agp_buffer_map) { 722 if (!dev->agp_buffer_map) {
722 DRM_ERROR("could not find DMA buffer region!\n"); 723 DRM_ERROR("could not find DMA buffer region!\n");
723 savage_do_cleanup_bci(dev); 724 savage_do_cleanup_bci(dev);
724 return -EINVAL; 725 return -EINVAL;
725 } 726 }
726 drm_core_ioremap(dev->agp_buffer_map, dev); 727 drm_legacy_ioremap(dev->agp_buffer_map, dev);
727 if (!dev->agp_buffer_map->handle) { 728 if (!dev->agp_buffer_map->handle) {
728 DRM_ERROR("failed to ioremap DMA buffer region!\n"); 729 DRM_ERROR("failed to ioremap DMA buffer region!\n");
729 savage_do_cleanup_bci(dev); 730 savage_do_cleanup_bci(dev);
@@ -732,7 +733,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
732 } 733 }
733 if (init->agp_textures_offset) { 734 if (init->agp_textures_offset) {
734 dev_priv->agp_textures = 735 dev_priv->agp_textures =
735 drm_core_findmap(dev, init->agp_textures_offset); 736 drm_legacy_findmap(dev, init->agp_textures_offset);
736 if (!dev_priv->agp_textures) { 737 if (!dev_priv->agp_textures) {
737 DRM_ERROR("could not find agp texture region!\n"); 738 DRM_ERROR("could not find agp texture region!\n");
738 savage_do_cleanup_bci(dev); 739 savage_do_cleanup_bci(dev);
@@ -755,7 +756,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
755 savage_do_cleanup_bci(dev); 756 savage_do_cleanup_bci(dev);
756 return -EINVAL; 757 return -EINVAL;
757 } 758 }
758 dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); 759 dev_priv->cmd_dma = drm_legacy_findmap(dev, init->cmd_dma_offset);
759 if (!dev_priv->cmd_dma) { 760 if (!dev_priv->cmd_dma) {
760 DRM_ERROR("could not find command DMA region!\n"); 761 DRM_ERROR("could not find command DMA region!\n");
761 savage_do_cleanup_bci(dev); 762 savage_do_cleanup_bci(dev);
@@ -768,7 +769,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
768 savage_do_cleanup_bci(dev); 769 savage_do_cleanup_bci(dev);
769 return -EINVAL; 770 return -EINVAL;
770 } 771 }
771 drm_core_ioremap(dev_priv->cmd_dma, dev); 772 drm_legacy_ioremap(dev_priv->cmd_dma, dev);
772 if (!dev_priv->cmd_dma->handle) { 773 if (!dev_priv->cmd_dma->handle) {
773 DRM_ERROR("failed to ioremap command " 774 DRM_ERROR("failed to ioremap command "
774 "DMA region!\n"); 775 "DMA region!\n");
@@ -894,11 +895,11 @@ static int savage_do_cleanup_bci(struct drm_device * dev)
894 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && 895 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
895 dev_priv->cmd_dma->type == _DRM_AGP && 896 dev_priv->cmd_dma->type == _DRM_AGP &&
896 dev_priv->dma_type == SAVAGE_DMA_AGP) 897 dev_priv->dma_type == SAVAGE_DMA_AGP)
897 drm_core_ioremapfree(dev_priv->cmd_dma, dev); 898 drm_legacy_ioremapfree(dev_priv->cmd_dma, dev);
898 899
899 if (dev_priv->dma_type == SAVAGE_DMA_AGP && 900 if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
900 dev->agp_buffer_map && dev->agp_buffer_map->handle) { 901 dev->agp_buffer_map && dev->agp_buffer_map->handle) {
901 drm_core_ioremapfree(dev->agp_buffer_map, dev); 902 drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
902 /* make sure the next instance (which may be running 903 /* make sure the next instance (which may be running
903 * in PCI mode) doesn't try to use an old 904 * in PCI mode) doesn't try to use an old
904 * agp_buffer_map. */ 905 * agp_buffer_map. */
@@ -1050,7 +1051,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1050 return; 1051 return;
1051 1052
1052 if (file_priv->master && file_priv->master->lock.hw_lock) { 1053 if (file_priv->master && file_priv->master->lock.hw_lock) {
1053 drm_idlelock_take(&file_priv->master->lock); 1054 drm_legacy_idlelock_take(&file_priv->master->lock);
1054 release_idlelock = 1; 1055 release_idlelock = 1;
1055 } 1056 }
1056 1057
@@ -1069,7 +1070,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1069 } 1070 }
1070 1071
1071 if (release_idlelock) 1072 if (release_idlelock)
1072 drm_idlelock_release(&file_priv->master->lock); 1073 drm_legacy_idlelock_release(&file_priv->master->lock);
1073} 1074}
1074 1075
1075const struct drm_ioctl_desc savage_ioctls[] = { 1076const struct drm_ioctl_desc savage_ioctls[] = {
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 3c030216e888..21aed1febeb4 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -40,7 +40,7 @@ static const struct file_operations savage_driver_fops = {
40 .open = drm_open, 40 .open = drm_open,
41 .release = drm_release, 41 .release = drm_release,
42 .unlocked_ioctl = drm_ioctl, 42 .unlocked_ioctl = drm_ioctl,
43 .mmap = drm_mmap, 43 .mmap = drm_legacy_mmap,
44 .poll = drm_poll, 44 .poll = drm_poll,
45#ifdef CONFIG_COMPAT 45#ifdef CONFIG_COMPAT
46 .compat_ioctl = drm_compat_ioctl, 46 .compat_ioctl = drm_compat_ioctl,
@@ -57,6 +57,7 @@ static struct drm_driver driver = {
57 .preclose = savage_reclaim_buffers, 57 .preclose = savage_reclaim_buffers,
58 .lastclose = savage_driver_lastclose, 58 .lastclose = savage_driver_lastclose,
59 .unload = savage_driver_unload, 59 .unload = savage_driver_unload,
60 .set_busid = drm_pci_set_busid,
60 .ioctls = savage_ioctls, 61 .ioctls = savage_ioctls,
61 .dma_ioctl = savage_bci_buffers, 62 .dma_ioctl = savage_bci_buffers,
62 .fops = &savage_driver_fops, 63 .fops = &savage_driver_fops,
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
index 335f8fcf1041..37b699571ad0 100644
--- a/drivers/gpu/drm/savage/savage_drv.h
+++ b/drivers/gpu/drm/savage/savage_drv.h
@@ -26,6 +26,8 @@
26#ifndef __SAVAGE_DRV_H__ 26#ifndef __SAVAGE_DRV_H__
27#define __SAVAGE_DRV_H__ 27#define __SAVAGE_DRV_H__
28 28
29#include <drm/drm_legacy.h>
30
29#define DRIVER_AUTHOR "Felix Kuehling" 31#define DRIVER_AUTHOR "Felix Kuehling"
30 32
31#define DRIVER_NAME "savage" 33#define DRIVER_NAME "savage"
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.c b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
index 463aee18f774..33dd41afea0e 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * shmob_drm_backlight.c -- SH Mobile DRM Backlight 2 * shmob_drm_backlight.c -- SH Mobile DRM Backlight
3 * 3 *
4 * Copyright (C) 2012 Renesas Corporation 4 * Copyright (C) 2012 Renesas Electronics Corporation
5 * 5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.h b/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
index 9477595d2ff3..bac719ecc301 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * shmob_drm_backlight.h -- SH Mobile DRM Backlight 2 * shmob_drm_backlight.h -- SH Mobile DRM Backlight
3 * 3 *
4 * Copyright (C) 2012 Renesas Corporation 4 * Copyright (C) 2012 Renesas Electronics Corporation
5 * 5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 47875de89010..0ddce4d046d9 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * shmob_drm_crtc.c -- SH Mobile DRM CRTCs 2 * shmob_drm_crtc.c -- SH Mobile DRM CRTCs
3 * 3 *
4 * Copyright (C) 2012 Renesas Corporation 4 * Copyright (C) 2012 Renesas Electronics Corporation
5 * 5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
index e5bd109c4c38..eddad6dcc88a 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * shmob_drm_crtc.h -- SH Mobile DRM CRTCs 2 * shmob_drm_crtc.h -- SH Mobile DRM CRTCs
3 * 3 *
4 * Copyright (C) 2012 Renesas Corporation 4 * Copyright (C) 2012 Renesas Electronics Corporation
5 * 5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index ff4ba483b602..e62cbde81e50 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * shmob_drm_drv.c -- SH Mobile DRM driver 2 * shmob_drm_drv.c -- SH Mobile DRM driver
3 * 3 *
4 * Copyright (C) 2012 Renesas Corporation 4 * Copyright (C) 2012 Renesas Electronics Corporation
5 * 5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
@@ -267,6 +267,7 @@ static struct drm_driver shmob_drm_driver = {
267 .load = shmob_drm_load, 267 .load = shmob_drm_load,
268 .unload = shmob_drm_unload, 268 .unload = shmob_drm_unload,
269 .preclose = shmob_drm_preclose, 269 .preclose = shmob_drm_preclose,
270 .set_busid = drm_platform_set_busid,
270 .irq_handler = shmob_drm_irq, 271 .irq_handler = shmob_drm_irq,
271 .get_vblank_counter = drm_vblank_count, 272 .get_vblank_counter = drm_vblank_count,
272 .enable_vblank = shmob_drm_enable_vblank, 273 .enable_vblank = shmob_drm_enable_vblank,
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.h b/drivers/gpu/drm/shmobile/shmob_drm_drv.h
index 4d46b811b5a7..02ea315ba69a 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * shmob_drm.h -- SH Mobile DRM driver 2 * shmob_drm.h -- SH Mobile DRM driver
3 * 3 *
4 * Copyright (C) 2012 Renesas Corporation 4 * Copyright (C) 2012 Renesas Electronics Corporation
5 * 5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index fc0ef0ca7d04..aaf98ace4a90 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * shmob_drm_kms.c -- SH Mobile DRM Mode Setting 2 * shmob_drm_kms.c -- SH Mobile DRM Mode Setting
3 * 3 *
4 * Copyright (C) 2012 Renesas Corporation 4 * Copyright (C) 2012 Renesas Electronics Corporation
5 * 5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.h b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
index 9495c9111308..06d5b7caa026 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * shmob_drm_kms.h -- SH Mobile DRM Mode Setting 2 * shmob_drm_kms.h -- SH Mobile DRM Mode Setting
3 * 3 *
4 * Copyright (C) 2012 Renesas Corporation 4 * Copyright (C) 2012 Renesas Electronics Corporation
5 * 5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
index 060ae03e5f9b..1805bb23b113 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * shmob_drm_plane.c -- SH Mobile DRM Planes 2 * shmob_drm_plane.c -- SH Mobile DRM Planes
3 * 3 *
4 * Copyright (C) 2012 Renesas Corporation 4 * Copyright (C) 2012 Renesas Electronics Corporation
5 * 5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.h b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
index 99623d05e3b0..a58cc1fc3240 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * shmob_drm_plane.h -- SH Mobile DRM Planes 2 * shmob_drm_plane.h -- SH Mobile DRM Planes
3 * 3 *
4 * Copyright (C) 2012 Renesas Corporation 4 * Copyright (C) 2012 Renesas Electronics Corporation
5 * 5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_regs.h b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
index 7923cdd6368e..ea17d4415b9e 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_regs.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * shmob_drm_regs.h -- SH Mobile DRM registers 2 * shmob_drm_regs.h -- SH Mobile DRM registers
3 * 3 *
4 * Copyright (C) 2012 Renesas Corporation 4 * Copyright (C) 2012 Renesas Electronics Corporation
5 * 5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 * 7 *
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 756f787b7143..79bce76cb8f7 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -70,7 +70,7 @@ static const struct file_operations sis_driver_fops = {
70 .open = drm_open, 70 .open = drm_open,
71 .release = drm_release, 71 .release = drm_release,
72 .unlocked_ioctl = drm_ioctl, 72 .unlocked_ioctl = drm_ioctl,
73 .mmap = drm_mmap, 73 .mmap = drm_legacy_mmap,
74 .poll = drm_poll, 74 .poll = drm_poll,
75#ifdef CONFIG_COMPAT 75#ifdef CONFIG_COMPAT
76 .compat_ioctl = drm_compat_ioctl, 76 .compat_ioctl = drm_compat_ioctl,
@@ -108,6 +108,7 @@ static struct drm_driver driver = {
108 .open = sis_driver_open, 108 .open = sis_driver_open,
109 .preclose = sis_reclaim_buffers_locked, 109 .preclose = sis_reclaim_buffers_locked,
110 .postclose = sis_driver_postclose, 110 .postclose = sis_driver_postclose,
111 .set_busid = drm_pci_set_busid,
111 .dma_quiescent = sis_idle, 112 .dma_quiescent = sis_idle,
112 .lastclose = sis_lastclose, 113 .lastclose = sis_lastclose,
113 .ioctls = sis_ioctls, 114 .ioctls = sis_ioctls,
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index c31c0253054d..16f972b2a76a 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -28,6 +28,8 @@
28#ifndef _SIS_DRV_H_ 28#ifndef _SIS_DRV_H_
29#define _SIS_DRV_H_ 29#define _SIS_DRV_H_
30 30
31#include <drm/drm_legacy.h>
32
31/* General customization: 33/* General customization:
32 */ 34 */
33 35
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 77f288e4a0a6..93ad8a5704d1 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -319,12 +319,12 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
319 if (!(file->minor->master && file->master->lock.hw_lock)) 319 if (!(file->minor->master && file->master->lock.hw_lock))
320 return; 320 return;
321 321
322 drm_idlelock_take(&file->master->lock); 322 drm_legacy_idlelock_take(&file->master->lock);
323 323
324 mutex_lock(&dev->struct_mutex); 324 mutex_lock(&dev->struct_mutex);
325 if (list_empty(&file_priv->obj_list)) { 325 if (list_empty(&file_priv->obj_list)) {
326 mutex_unlock(&dev->struct_mutex); 326 mutex_unlock(&dev->struct_mutex);
327 drm_idlelock_release(&file->master->lock); 327 drm_legacy_idlelock_release(&file->master->lock);
328 328
329 return; 329 return;
330 } 330 }
@@ -345,7 +345,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
345 } 345 }
346 mutex_unlock(&dev->struct_mutex); 346 mutex_unlock(&dev->struct_mutex);
347 347
348 drm_idlelock_release(&file->master->lock); 348 drm_legacy_idlelock_release(&file->master->lock);
349 349
350 return; 350 return;
351} 351}
diff --git a/drivers/gpu/drm/sti/sti_vtac.c b/drivers/gpu/drm/sti/sti_vtac.c
index 82a51d488434..97bcdac23ae1 100644
--- a/drivers/gpu/drm/sti/sti_vtac.c
+++ b/drivers/gpu/drm/sti/sti_vtac.c
@@ -56,8 +56,16 @@ struct sti_vtac_mode {
56 u32 phyts_per_pixel; 56 u32 phyts_per_pixel;
57}; 57};
58 58
59static const struct sti_vtac_mode vtac_mode_main = {0x2, 0x2, VTAC_5_PPP}; 59static const struct sti_vtac_mode vtac_mode_main = {
60static const struct sti_vtac_mode vtac_mode_aux = {0x1, 0x0, VTAC_17_PPP}; 60 .vid_in_width = 0x2,
61 .phyts_width = 0x2,
62 .phyts_per_pixel = VTAC_5_PPP,
63};
64static const struct sti_vtac_mode vtac_mode_aux = {
65 .vid_in_width = 0x1,
66 .phyts_width = 0x0,
67 .phyts_per_pixel = VTAC_17_PPP,
68};
61 69
62/** 70/**
63 * VTAC structure 71 * VTAC structure
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 3492ca5c46d3..fab5ebcb0fef 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -36,6 +36,7 @@
36#include "tdfx_drv.h" 36#include "tdfx_drv.h"
37 37
38#include <drm/drm_pciids.h> 38#include <drm/drm_pciids.h>
39#include <drm/drm_legacy.h>
39 40
40static struct pci_device_id pciidlist[] = { 41static struct pci_device_id pciidlist[] = {
41 tdfx_PCI_IDS 42 tdfx_PCI_IDS
@@ -46,7 +47,7 @@ static const struct file_operations tdfx_driver_fops = {
46 .open = drm_open, 47 .open = drm_open,
47 .release = drm_release, 48 .release = drm_release,
48 .unlocked_ioctl = drm_ioctl, 49 .unlocked_ioctl = drm_ioctl,
49 .mmap = drm_mmap, 50 .mmap = drm_legacy_mmap,
50 .poll = drm_poll, 51 .poll = drm_poll,
51#ifdef CONFIG_COMPAT 52#ifdef CONFIG_COMPAT
52 .compat_ioctl = drm_compat_ioctl, 53 .compat_ioctl = drm_compat_ioctl,
@@ -55,6 +56,7 @@ static const struct file_operations tdfx_driver_fops = {
55}; 56};
56 57
57static struct drm_driver driver = { 58static struct drm_driver driver = {
59 .set_busid = drm_pci_set_busid,
58 .fops = &tdfx_driver_fops, 60 .fops = &tdfx_driver_fops,
59 .name = DRIVER_NAME, 61 .name = DRIVER_NAME,
60 .desc = DRIVER_DESC, 62 .desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 708f783ead47..d6b55e3e3716 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -533,9 +533,9 @@ int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
533 533
534 for (i = 0; i < link->num_lanes; i++) 534 for (i = 0; i < link->num_lanes; i++)
535 values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED | 535 values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
536 DP_TRAIN_PRE_EMPHASIS_0 | 536 DP_TRAIN_PRE_EMPH_LEVEL_0 |
537 DP_TRAIN_MAX_SWING_REACHED | 537 DP_TRAIN_MAX_SWING_REACHED |
538 DP_TRAIN_VOLTAGE_SWING_400; 538 DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
539 539
540 err = drm_dp_dpcd_write(&dpaux->aux, DP_TRAINING_LANE0_SET, values, 540 err = drm_dp_dpcd_write(&dpaux->aux, DP_TRAINING_LANE0_SET, values,
541 link->num_lanes); 541 link->num_lanes);
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 43a25c853357..6538b56780c2 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -15,6 +15,7 @@
15 15
16#include <drm/drm.h> 16#include <drm/drm.h>
17#include <drm/drmP.h> 17#include <drm/drmP.h>
18#include <drm/drm_gem.h>
18 19
19#define TEGRA_BO_BOTTOM_UP (1 << 0) 20#define TEGRA_BO_BOTTOM_UP (1 << 0)
20 21
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 6be623b4a86f..79a34cbd29f5 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -84,6 +84,7 @@ static int modeset_init(struct drm_device *dev)
84 if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) { 84 if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
85 /* oh nos! */ 85 /* oh nos! */
86 dev_err(dev->dev, "no encoders/connectors found\n"); 86 dev_err(dev->dev, "no encoders/connectors found\n");
87 drm_mode_config_cleanup(dev);
87 return -ENXIO; 88 return -ENXIO;
88 } 89 }
89 90
@@ -172,33 +173,37 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
172 dev->dev_private = priv; 173 dev->dev_private = priv;
173 174
174 priv->wq = alloc_ordered_workqueue("tilcdc", 0); 175 priv->wq = alloc_ordered_workqueue("tilcdc", 0);
176 if (!priv->wq) {
177 ret = -ENOMEM;
178 goto fail_free_priv;
179 }
175 180
176 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 181 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
177 if (!res) { 182 if (!res) {
178 dev_err(dev->dev, "failed to get memory resource\n"); 183 dev_err(dev->dev, "failed to get memory resource\n");
179 ret = -EINVAL; 184 ret = -EINVAL;
180 goto fail; 185 goto fail_free_wq;
181 } 186 }
182 187
183 priv->mmio = ioremap_nocache(res->start, resource_size(res)); 188 priv->mmio = ioremap_nocache(res->start, resource_size(res));
184 if (!priv->mmio) { 189 if (!priv->mmio) {
185 dev_err(dev->dev, "failed to ioremap\n"); 190 dev_err(dev->dev, "failed to ioremap\n");
186 ret = -ENOMEM; 191 ret = -ENOMEM;
187 goto fail; 192 goto fail_free_wq;
188 } 193 }
189 194
190 priv->clk = clk_get(dev->dev, "fck"); 195 priv->clk = clk_get(dev->dev, "fck");
191 if (IS_ERR(priv->clk)) { 196 if (IS_ERR(priv->clk)) {
192 dev_err(dev->dev, "failed to get functional clock\n"); 197 dev_err(dev->dev, "failed to get functional clock\n");
193 ret = -ENODEV; 198 ret = -ENODEV;
194 goto fail; 199 goto fail_iounmap;
195 } 200 }
196 201
197 priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck"); 202 priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
198 if (IS_ERR(priv->clk)) { 203 if (IS_ERR(priv->clk)) {
199 dev_err(dev->dev, "failed to get display clock\n"); 204 dev_err(dev->dev, "failed to get display clock\n");
200 ret = -ENODEV; 205 ret = -ENODEV;
201 goto fail; 206 goto fail_put_clk;
202 } 207 }
203 208
204#ifdef CONFIG_CPU_FREQ 209#ifdef CONFIG_CPU_FREQ
@@ -208,7 +213,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
208 CPUFREQ_TRANSITION_NOTIFIER); 213 CPUFREQ_TRANSITION_NOTIFIER);
209 if (ret) { 214 if (ret) {
210 dev_err(dev->dev, "failed to register cpufreq notifier\n"); 215 dev_err(dev->dev, "failed to register cpufreq notifier\n");
211 goto fail; 216 goto fail_put_disp_clk;
212 } 217 }
213#endif 218#endif
214 219
@@ -253,13 +258,13 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
253 ret = modeset_init(dev); 258 ret = modeset_init(dev);
254 if (ret < 0) { 259 if (ret < 0) {
255 dev_err(dev->dev, "failed to initialize mode setting\n"); 260 dev_err(dev->dev, "failed to initialize mode setting\n");
256 goto fail; 261 goto fail_cpufreq_unregister;
257 } 262 }
258 263
259 ret = drm_vblank_init(dev, 1); 264 ret = drm_vblank_init(dev, 1);
260 if (ret < 0) { 265 if (ret < 0) {
261 dev_err(dev->dev, "failed to initialize vblank\n"); 266 dev_err(dev->dev, "failed to initialize vblank\n");
262 goto fail; 267 goto fail_mode_config_cleanup;
263 } 268 }
264 269
265 pm_runtime_get_sync(dev->dev); 270 pm_runtime_get_sync(dev->dev);
@@ -267,7 +272,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
267 pm_runtime_put_sync(dev->dev); 272 pm_runtime_put_sync(dev->dev);
268 if (ret < 0) { 273 if (ret < 0) {
269 dev_err(dev->dev, "failed to install IRQ handler\n"); 274 dev_err(dev->dev, "failed to install IRQ handler\n");
270 goto fail; 275 goto fail_vblank_cleanup;
271 } 276 }
272 277
273 platform_set_drvdata(pdev, dev); 278 platform_set_drvdata(pdev, dev);
@@ -283,13 +288,48 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
283 priv->fbdev = drm_fbdev_cma_init(dev, bpp, 288 priv->fbdev = drm_fbdev_cma_init(dev, bpp,
284 dev->mode_config.num_crtc, 289 dev->mode_config.num_crtc,
285 dev->mode_config.num_connector); 290 dev->mode_config.num_connector);
291 if (IS_ERR(priv->fbdev)) {
292 ret = PTR_ERR(priv->fbdev);
293 goto fail_irq_uninstall;
294 }
286 295
287 drm_kms_helper_poll_init(dev); 296 drm_kms_helper_poll_init(dev);
288 297
289 return 0; 298 return 0;
290 299
291fail: 300fail_irq_uninstall:
292 tilcdc_unload(dev); 301 pm_runtime_get_sync(dev->dev);
302 drm_irq_uninstall(dev);
303 pm_runtime_put_sync(dev->dev);
304
305fail_vblank_cleanup:
306 drm_vblank_cleanup(dev);
307
308fail_mode_config_cleanup:
309 drm_mode_config_cleanup(dev);
310
311fail_cpufreq_unregister:
312 pm_runtime_disable(dev->dev);
313#ifdef CONFIG_CPU_FREQ
314 cpufreq_unregister_notifier(&priv->freq_transition,
315 CPUFREQ_TRANSITION_NOTIFIER);
316fail_put_disp_clk:
317 clk_put(priv->disp_clk);
318#endif
319
320fail_put_clk:
321 clk_put(priv->clk);
322
323fail_iounmap:
324 iounmap(priv->mmio);
325
326fail_free_wq:
327 flush_workqueue(priv->wq);
328 destroy_workqueue(priv->wq);
329
330fail_free_priv:
331 dev->dev_private = NULL;
332 kfree(priv);
293 return ret; 333 return ret;
294} 334}
295 335
@@ -502,6 +542,7 @@ static struct drm_driver tilcdc_driver = {
502 .unload = tilcdc_unload, 542 .unload = tilcdc_unload,
503 .preclose = tilcdc_preclose, 543 .preclose = tilcdc_preclose,
504 .lastclose = tilcdc_lastclose, 544 .lastclose = tilcdc_lastclose,
545 .set_busid = drm_platform_set_busid,
505 .irq_handler = tilcdc_irq, 546 .irq_handler = tilcdc_irq,
506 .irq_preinstall = tilcdc_irq_preinstall, 547 .irq_preinstall = tilcdc_irq_preinstall,
507 .irq_postinstall = tilcdc_irq_postinstall, 548 .irq_postinstall = tilcdc_irq_postinstall,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 4c7aa1d8134f..7a0315855e90 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -18,6 +18,7 @@
18#include <linux/pinctrl/pinmux.h> 18#include <linux/pinctrl/pinmux.h>
19#include <linux/pinctrl/consumer.h> 19#include <linux/pinctrl/consumer.h>
20#include <linux/backlight.h> 20#include <linux/backlight.h>
21#include <linux/gpio/consumer.h>
21#include <video/display_timing.h> 22#include <video/display_timing.h>
22#include <video/of_display_timing.h> 23#include <video/of_display_timing.h>
23#include <video/videomode.h> 24#include <video/videomode.h>
@@ -29,6 +30,7 @@ struct panel_module {
29 struct tilcdc_panel_info *info; 30 struct tilcdc_panel_info *info;
30 struct display_timings *timings; 31 struct display_timings *timings;
31 struct backlight_device *backlight; 32 struct backlight_device *backlight;
33 struct gpio_desc *enable_gpio;
32}; 34};
33#define to_panel_module(x) container_of(x, struct panel_module, base) 35#define to_panel_module(x) container_of(x, struct panel_module, base)
34 36
@@ -55,13 +57,17 @@ static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
55{ 57{
56 struct panel_encoder *panel_encoder = to_panel_encoder(encoder); 58 struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
57 struct backlight_device *backlight = panel_encoder->mod->backlight; 59 struct backlight_device *backlight = panel_encoder->mod->backlight;
60 struct gpio_desc *gpio = panel_encoder->mod->enable_gpio;
58 61
59 if (!backlight) 62 if (backlight) {
60 return; 63 backlight->props.power = mode == DRM_MODE_DPMS_ON ?
64 FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
65 backlight_update_status(backlight);
66 }
61 67
62 backlight->props.power = mode == DRM_MODE_DPMS_ON 68 if (gpio)
63 ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; 69 gpiod_set_value_cansleep(gpio,
64 backlight_update_status(backlight); 70 mode == DRM_MODE_DPMS_ON ? 1 : 0);
65} 71}
66 72
67static bool panel_encoder_mode_fixup(struct drm_encoder *encoder, 73static bool panel_encoder_mode_fixup(struct drm_encoder *encoder,
@@ -311,6 +317,7 @@ static struct tilcdc_panel_info *of_get_panel_info(struct device_node *np)
311 info = kzalloc(sizeof(*info), GFP_KERNEL); 317 info = kzalloc(sizeof(*info), GFP_KERNEL);
312 if (!info) { 318 if (!info) {
313 pr_err("%s: allocation failed\n", __func__); 319 pr_err("%s: allocation failed\n", __func__);
320 of_node_put(info_np);
314 return NULL; 321 return NULL;
315 } 322 }
316 323
@@ -331,22 +338,21 @@ static struct tilcdc_panel_info *of_get_panel_info(struct device_node *np)
331 if (ret) { 338 if (ret) {
332 pr_err("%s: error reading panel-info properties\n", __func__); 339 pr_err("%s: error reading panel-info properties\n", __func__);
333 kfree(info); 340 kfree(info);
341 of_node_put(info_np);
334 return NULL; 342 return NULL;
335 } 343 }
344 of_node_put(info_np);
336 345
337 return info; 346 return info;
338} 347}
339 348
340static struct of_device_id panel_of_match[];
341
342static int panel_probe(struct platform_device *pdev) 349static int panel_probe(struct platform_device *pdev)
343{ 350{
344 struct device_node *node = pdev->dev.of_node; 351 struct device_node *bl_node, *node = pdev->dev.of_node;
345 struct panel_module *panel_mod; 352 struct panel_module *panel_mod;
346 struct tilcdc_module *mod; 353 struct tilcdc_module *mod;
347 struct pinctrl *pinctrl; 354 struct pinctrl *pinctrl;
348 int ret = -EINVAL; 355 int ret;
349
350 356
351 /* bail out early if no DT data: */ 357 /* bail out early if no DT data: */
352 if (!node) { 358 if (!node) {
@@ -354,10 +360,40 @@ static int panel_probe(struct platform_device *pdev)
354 return -ENXIO; 360 return -ENXIO;
355 } 361 }
356 362
357 panel_mod = kzalloc(sizeof(*panel_mod), GFP_KERNEL); 363 panel_mod = devm_kzalloc(&pdev->dev, sizeof(*panel_mod), GFP_KERNEL);
358 if (!panel_mod) 364 if (!panel_mod)
359 return -ENOMEM; 365 return -ENOMEM;
360 366
367 bl_node = of_parse_phandle(node, "backlight", 0);
368 if (bl_node) {
369 panel_mod->backlight = of_find_backlight_by_node(bl_node);
370 of_node_put(bl_node);
371
372 if (!panel_mod->backlight)
373 return -EPROBE_DEFER;
374
375 dev_info(&pdev->dev, "found backlight\n");
376 }
377
378 panel_mod->enable_gpio = devm_gpiod_get(&pdev->dev, "enable");
379 if (IS_ERR(panel_mod->enable_gpio)) {
380 ret = PTR_ERR(panel_mod->enable_gpio);
381 if (ret != -ENOENT) {
382 dev_err(&pdev->dev, "failed to request enable GPIO\n");
383 goto fail_backlight;
384 }
385
386 /* Optional GPIO is not here, continue silently. */
387 panel_mod->enable_gpio = NULL;
388 } else {
389 ret = gpiod_direction_output(panel_mod->enable_gpio, 0);
390 if (ret < 0) {
391 dev_err(&pdev->dev, "failed to setup GPIO\n");
392 goto fail_backlight;
393 }
394 dev_info(&pdev->dev, "found enable GPIO\n");
395 }
396
361 mod = &panel_mod->base; 397 mod = &panel_mod->base;
362 pdev->dev.platform_data = mod; 398 pdev->dev.platform_data = mod;
363 399
@@ -370,29 +406,30 @@ static int panel_probe(struct platform_device *pdev)
370 panel_mod->timings = of_get_display_timings(node); 406 panel_mod->timings = of_get_display_timings(node);
371 if (!panel_mod->timings) { 407 if (!panel_mod->timings) {
372 dev_err(&pdev->dev, "could not get panel timings\n"); 408 dev_err(&pdev->dev, "could not get panel timings\n");
409 ret = -EINVAL;
373 goto fail_free; 410 goto fail_free;
374 } 411 }
375 412
376 panel_mod->info = of_get_panel_info(node); 413 panel_mod->info = of_get_panel_info(node);
377 if (!panel_mod->info) { 414 if (!panel_mod->info) {
378 dev_err(&pdev->dev, "could not get panel info\n"); 415 dev_err(&pdev->dev, "could not get panel info\n");
416 ret = -EINVAL;
379 goto fail_timings; 417 goto fail_timings;
380 } 418 }
381 419
382 mod->preferred_bpp = panel_mod->info->bpp; 420 mod->preferred_bpp = panel_mod->info->bpp;
383 421
384 panel_mod->backlight = of_find_backlight_by_node(node);
385 if (panel_mod->backlight)
386 dev_info(&pdev->dev, "found backlight\n");
387
388 return 0; 422 return 0;
389 423
390fail_timings: 424fail_timings:
391 display_timings_release(panel_mod->timings); 425 display_timings_release(panel_mod->timings);
392 426
393fail_free: 427fail_free:
394 kfree(panel_mod);
395 tilcdc_module_cleanup(mod); 428 tilcdc_module_cleanup(mod);
429
430fail_backlight:
431 if (panel_mod->backlight)
432 put_device(&panel_mod->backlight->dev);
396 return ret; 433 return ret;
397} 434}
398 435
@@ -400,12 +437,15 @@ static int panel_remove(struct platform_device *pdev)
400{ 437{
401 struct tilcdc_module *mod = dev_get_platdata(&pdev->dev); 438 struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
402 struct panel_module *panel_mod = to_panel_module(mod); 439 struct panel_module *panel_mod = to_panel_module(mod);
440 struct backlight_device *backlight = panel_mod->backlight;
441
442 if (backlight)
443 put_device(&backlight->dev);
403 444
404 display_timings_release(panel_mod->timings); 445 display_timings_release(panel_mod->timings);
405 446
406 tilcdc_module_cleanup(mod); 447 tilcdc_module_cleanup(mod);
407 kfree(panel_mod->info); 448 kfree(panel_mod->info);
408 kfree(panel_mod);
409 449
410 return 0; 450 return 0;
411} 451}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3da89d5dab60..8f5cec67c47d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -40,6 +40,7 @@
40#include <linux/file.h> 40#include <linux/file.h>
41#include <linux/module.h> 41#include <linux/module.h>
42#include <linux/atomic.h> 42#include <linux/atomic.h>
43#include <linux/reservation.h>
43 44
44#define TTM_ASSERT_LOCKED(param) 45#define TTM_ASSERT_LOCKED(param)
45#define TTM_DEBUG(fmt, arg...) 46#define TTM_DEBUG(fmt, arg...)
@@ -53,12 +54,13 @@ static struct attribute ttm_bo_count = {
53 .mode = S_IRUGO 54 .mode = S_IRUGO
54}; 55};
55 56
56static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) 57static inline int ttm_mem_type_from_place(const struct ttm_place *place,
58 uint32_t *mem_type)
57{ 59{
58 int i; 60 int i;
59 61
60 for (i = 0; i <= TTM_PL_PRIV5; i++) 62 for (i = 0; i <= TTM_PL_PRIV5; i++)
61 if (flags & (1 << i)) { 63 if (place->flags & (1 << i)) {
62 *mem_type = i; 64 *mem_type = i;
63 return 0; 65 return 0;
64 } 66 }
@@ -89,12 +91,12 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
89 bo, bo->mem.num_pages, bo->mem.size >> 10, 91 bo, bo->mem.num_pages, bo->mem.size >> 10,
90 bo->mem.size >> 20); 92 bo->mem.size >> 20);
91 for (i = 0; i < placement->num_placement; i++) { 93 for (i = 0; i < placement->num_placement; i++) {
92 ret = ttm_mem_type_from_flags(placement->placement[i], 94 ret = ttm_mem_type_from_place(&placement->placement[i],
93 &mem_type); 95 &mem_type);
94 if (ret) 96 if (ret)
95 return; 97 return;
96 pr_err(" placement[%d]=0x%08X (%d)\n", 98 pr_err(" placement[%d]=0x%08X (%d)\n",
97 i, placement->placement[i], mem_type); 99 i, placement->placement[i].flags, mem_type);
98 ttm_mem_type_debug(bo->bdev, mem_type); 100 ttm_mem_type_debug(bo->bdev, mem_type);
99 } 101 }
100} 102}
@@ -141,7 +143,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
141 BUG_ON(atomic_read(&bo->list_kref.refcount)); 143 BUG_ON(atomic_read(&bo->list_kref.refcount));
142 BUG_ON(atomic_read(&bo->kref.refcount)); 144 BUG_ON(atomic_read(&bo->kref.refcount));
143 BUG_ON(atomic_read(&bo->cpu_writers)); 145 BUG_ON(atomic_read(&bo->cpu_writers));
144 BUG_ON(bo->sync_obj != NULL);
145 BUG_ON(bo->mem.mm_node != NULL); 146 BUG_ON(bo->mem.mm_node != NULL);
146 BUG_ON(!list_empty(&bo->lru)); 147 BUG_ON(!list_empty(&bo->lru));
147 BUG_ON(!list_empty(&bo->ddestroy)); 148 BUG_ON(!list_empty(&bo->ddestroy));
@@ -402,36 +403,48 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
402 ww_mutex_unlock (&bo->resv->lock); 403 ww_mutex_unlock (&bo->resv->lock);
403} 404}
404 405
406static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
407{
408 struct reservation_object_list *fobj;
409 struct fence *fence;
410 int i;
411
412 fobj = reservation_object_get_list(bo->resv);
413 fence = reservation_object_get_excl(bo->resv);
414 if (fence && !fence->ops->signaled)
415 fence_enable_sw_signaling(fence);
416
417 for (i = 0; fobj && i < fobj->shared_count; ++i) {
418 fence = rcu_dereference_protected(fobj->shared[i],
419 reservation_object_held(bo->resv));
420
421 if (!fence->ops->signaled)
422 fence_enable_sw_signaling(fence);
423 }
424}
425
405static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 426static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
406{ 427{
407 struct ttm_bo_device *bdev = bo->bdev; 428 struct ttm_bo_device *bdev = bo->bdev;
408 struct ttm_bo_global *glob = bo->glob; 429 struct ttm_bo_global *glob = bo->glob;
409 struct ttm_bo_driver *driver = bdev->driver;
410 void *sync_obj = NULL;
411 int put_count; 430 int put_count;
412 int ret; 431 int ret;
413 432
414 spin_lock(&glob->lru_lock); 433 spin_lock(&glob->lru_lock);
415 ret = __ttm_bo_reserve(bo, false, true, false, NULL); 434 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
416 435
417 spin_lock(&bdev->fence_lock); 436 if (!ret) {
418 (void) ttm_bo_wait(bo, false, false, true); 437 if (!ttm_bo_wait(bo, false, false, true)) {
419 if (!ret && !bo->sync_obj) { 438 put_count = ttm_bo_del_from_lru(bo);
420 spin_unlock(&bdev->fence_lock);
421 put_count = ttm_bo_del_from_lru(bo);
422
423 spin_unlock(&glob->lru_lock);
424 ttm_bo_cleanup_memtype_use(bo);
425 439
426 ttm_bo_list_ref_sub(bo, put_count, true); 440 spin_unlock(&glob->lru_lock);
441 ttm_bo_cleanup_memtype_use(bo);
427 442
428 return; 443 ttm_bo_list_ref_sub(bo, put_count, true);
429 }
430 if (bo->sync_obj)
431 sync_obj = driver->sync_obj_ref(bo->sync_obj);
432 spin_unlock(&bdev->fence_lock);
433 444
434 if (!ret) { 445 return;
446 } else
447 ttm_bo_flush_all_fences(bo);
435 448
436 /* 449 /*
437 * Make NO_EVICT bos immediately available to 450 * Make NO_EVICT bos immediately available to
@@ -450,10 +463,6 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
450 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 463 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
451 spin_unlock(&glob->lru_lock); 464 spin_unlock(&glob->lru_lock);
452 465
453 if (sync_obj) {
454 driver->sync_obj_flush(sync_obj);
455 driver->sync_obj_unref(&sync_obj);
456 }
457 schedule_delayed_work(&bdev->wq, 466 schedule_delayed_work(&bdev->wq,
458 ((HZ / 100) < 1) ? 1 : HZ / 100); 467 ((HZ / 100) < 1) ? 1 : HZ / 100);
459} 468}
@@ -474,44 +483,26 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
474 bool interruptible, 483 bool interruptible,
475 bool no_wait_gpu) 484 bool no_wait_gpu)
476{ 485{
477 struct ttm_bo_device *bdev = bo->bdev;
478 struct ttm_bo_driver *driver = bdev->driver;
479 struct ttm_bo_global *glob = bo->glob; 486 struct ttm_bo_global *glob = bo->glob;
480 int put_count; 487 int put_count;
481 int ret; 488 int ret;
482 489
483 spin_lock(&bdev->fence_lock);
484 ret = ttm_bo_wait(bo, false, false, true); 490 ret = ttm_bo_wait(bo, false, false, true);
485 491
486 if (ret && !no_wait_gpu) { 492 if (ret && !no_wait_gpu) {
487 void *sync_obj; 493 long lret;
488 494 ww_mutex_unlock(&bo->resv->lock);
489 /*
490 * Take a reference to the fence and unreserve,
491 * at this point the buffer should be dead, so
492 * no new sync objects can be attached.
493 */
494 sync_obj = driver->sync_obj_ref(bo->sync_obj);
495 spin_unlock(&bdev->fence_lock);
496
497 __ttm_bo_unreserve(bo);
498 spin_unlock(&glob->lru_lock); 495 spin_unlock(&glob->lru_lock);
499 496
500 ret = driver->sync_obj_wait(sync_obj, false, interruptible); 497 lret = reservation_object_wait_timeout_rcu(bo->resv,
501 driver->sync_obj_unref(&sync_obj); 498 true,
502 if (ret) 499 interruptible,
503 return ret; 500 30 * HZ);
504 501
505 /* 502 if (lret < 0)
506 * remove sync_obj with ttm_bo_wait, the wait should be 503 return lret;
507 * finished, and no new wait object should have been added. 504 else if (lret == 0)
508 */ 505 return -EBUSY;
509 spin_lock(&bdev->fence_lock);
510 ret = ttm_bo_wait(bo, false, false, true);
511 WARN_ON(ret);
512 spin_unlock(&bdev->fence_lock);
513 if (ret)
514 return ret;
515 506
516 spin_lock(&glob->lru_lock); 507 spin_lock(&glob->lru_lock);
517 ret = __ttm_bo_reserve(bo, false, true, false, NULL); 508 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
@@ -528,8 +519,14 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
528 spin_unlock(&glob->lru_lock); 519 spin_unlock(&glob->lru_lock);
529 return 0; 520 return 0;
530 } 521 }
531 } else 522
532 spin_unlock(&bdev->fence_lock); 523 /*
524 * remove sync_obj with ttm_bo_wait, the wait should be
525 * finished, and no new wait object should have been added.
526 */
527 ret = ttm_bo_wait(bo, false, false, true);
528 WARN_ON(ret);
529 }
533 530
534 if (ret || unlikely(list_empty(&bo->ddestroy))) { 531 if (ret || unlikely(list_empty(&bo->ddestroy))) {
535 __ttm_bo_unreserve(bo); 532 __ttm_bo_unreserve(bo);
@@ -667,9 +664,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
667 struct ttm_placement placement; 664 struct ttm_placement placement;
668 int ret = 0; 665 int ret = 0;
669 666
670 spin_lock(&bdev->fence_lock);
671 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 667 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
672 spin_unlock(&bdev->fence_lock);
673 668
674 if (unlikely(ret != 0)) { 669 if (unlikely(ret != 0)) {
675 if (ret != -ERESTARTSYS) { 670 if (ret != -ERESTARTSYS) {
@@ -685,8 +680,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
685 evict_mem.bus.io_reserved_vm = false; 680 evict_mem.bus.io_reserved_vm = false;
686 evict_mem.bus.io_reserved_count = 0; 681 evict_mem.bus.io_reserved_count = 0;
687 682
688 placement.fpfn = 0;
689 placement.lpfn = 0;
690 placement.num_placement = 0; 683 placement.num_placement = 0;
691 placement.num_busy_placement = 0; 684 placement.num_busy_placement = 0;
692 bdev->driver->evict_flags(bo, &placement); 685 bdev->driver->evict_flags(bo, &placement);
@@ -774,7 +767,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
774 */ 767 */
775static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 768static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
776 uint32_t mem_type, 769 uint32_t mem_type,
777 struct ttm_placement *placement, 770 const struct ttm_place *place,
778 struct ttm_mem_reg *mem, 771 struct ttm_mem_reg *mem,
779 bool interruptible, 772 bool interruptible,
780 bool no_wait_gpu) 773 bool no_wait_gpu)
@@ -784,7 +777,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
784 int ret; 777 int ret;
785 778
786 do { 779 do {
787 ret = (*man->func->get_node)(man, bo, placement, 0, mem); 780 ret = (*man->func->get_node)(man, bo, place, mem);
788 if (unlikely(ret != 0)) 781 if (unlikely(ret != 0))
789 return ret; 782 return ret;
790 if (mem->mm_node) 783 if (mem->mm_node)
@@ -827,18 +820,18 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
827 820
828static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 821static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
829 uint32_t mem_type, 822 uint32_t mem_type,
830 uint32_t proposed_placement, 823 const struct ttm_place *place,
831 uint32_t *masked_placement) 824 uint32_t *masked_placement)
832{ 825{
833 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 826 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
834 827
835 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) 828 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
836 return false; 829 return false;
837 830
838 if ((proposed_placement & man->available_caching) == 0) 831 if ((place->flags & man->available_caching) == 0)
839 return false; 832 return false;
840 833
841 cur_flags |= (proposed_placement & man->available_caching); 834 cur_flags |= (place->flags & man->available_caching);
842 835
843 *masked_placement = cur_flags; 836 *masked_placement = cur_flags;
844 return true; 837 return true;
@@ -869,15 +862,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
869 862
870 mem->mm_node = NULL; 863 mem->mm_node = NULL;
871 for (i = 0; i < placement->num_placement; ++i) { 864 for (i = 0; i < placement->num_placement; ++i) {
872 ret = ttm_mem_type_from_flags(placement->placement[i], 865 const struct ttm_place *place = &placement->placement[i];
873 &mem_type); 866
867 ret = ttm_mem_type_from_place(place, &mem_type);
874 if (ret) 868 if (ret)
875 return ret; 869 return ret;
876 man = &bdev->man[mem_type]; 870 man = &bdev->man[mem_type];
877 871
878 type_ok = ttm_bo_mt_compatible(man, 872 type_ok = ttm_bo_mt_compatible(man, mem_type, place,
879 mem_type,
880 placement->placement[i],
881 &cur_flags); 873 &cur_flags);
882 874
883 if (!type_ok) 875 if (!type_ok)
@@ -889,7 +881,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
889 * Use the access and other non-mapping-related flag bits from 881 * Use the access and other non-mapping-related flag bits from
890 * the memory placement flags to the current flags 882 * the memory placement flags to the current flags
891 */ 883 */
892 ttm_flag_masked(&cur_flags, placement->placement[i], 884 ttm_flag_masked(&cur_flags, place->flags,
893 ~TTM_PL_MASK_MEMTYPE); 885 ~TTM_PL_MASK_MEMTYPE);
894 886
895 if (mem_type == TTM_PL_SYSTEM) 887 if (mem_type == TTM_PL_SYSTEM)
@@ -897,8 +889,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
897 889
898 if (man->has_type && man->use_type) { 890 if (man->has_type && man->use_type) {
899 type_found = true; 891 type_found = true;
900 ret = (*man->func->get_node)(man, bo, placement, 892 ret = (*man->func->get_node)(man, bo, place, mem);
901 cur_flags, mem);
902 if (unlikely(ret)) 893 if (unlikely(ret))
903 return ret; 894 return ret;
904 } 895 }
@@ -916,17 +907,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
916 return -EINVAL; 907 return -EINVAL;
917 908
918 for (i = 0; i < placement->num_busy_placement; ++i) { 909 for (i = 0; i < placement->num_busy_placement; ++i) {
919 ret = ttm_mem_type_from_flags(placement->busy_placement[i], 910 const struct ttm_place *place = &placement->busy_placement[i];
920 &mem_type); 911
912 ret = ttm_mem_type_from_place(place, &mem_type);
921 if (ret) 913 if (ret)
922 return ret; 914 return ret;
923 man = &bdev->man[mem_type]; 915 man = &bdev->man[mem_type];
924 if (!man->has_type) 916 if (!man->has_type)
925 continue; 917 continue;
926 if (!ttm_bo_mt_compatible(man, 918 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
927 mem_type,
928 placement->busy_placement[i],
929 &cur_flags))
930 continue; 919 continue;
931 920
932 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 921 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
@@ -935,7 +924,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
935 * Use the access and other non-mapping-related flag bits from 924 * Use the access and other non-mapping-related flag bits from
936 * the memory placement flags to the current flags 925 * the memory placement flags to the current flags
937 */ 926 */
938 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 927 ttm_flag_masked(&cur_flags, place->flags,
939 ~TTM_PL_MASK_MEMTYPE); 928 ~TTM_PL_MASK_MEMTYPE);
940 929
941 if (mem_type == TTM_PL_SYSTEM) { 930 if (mem_type == TTM_PL_SYSTEM) {
@@ -945,7 +934,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
945 return 0; 934 return 0;
946 } 935 }
947 936
948 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 937 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
949 interruptible, no_wait_gpu); 938 interruptible, no_wait_gpu);
950 if (ret == 0 && mem->mm_node) { 939 if (ret == 0 && mem->mm_node) {
951 mem->placement = cur_flags; 940 mem->placement = cur_flags;
@@ -966,7 +955,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
966{ 955{
967 int ret = 0; 956 int ret = 0;
968 struct ttm_mem_reg mem; 957 struct ttm_mem_reg mem;
969 struct ttm_bo_device *bdev = bo->bdev;
970 958
971 lockdep_assert_held(&bo->resv->lock.base); 959 lockdep_assert_held(&bo->resv->lock.base);
972 960
@@ -975,9 +963,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
975 * Have the driver move function wait for idle when necessary, 963 * Have the driver move function wait for idle when necessary,
976 * instead of doing it here. 964 * instead of doing it here.
977 */ 965 */
978 spin_lock(&bdev->fence_lock);
979 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 966 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
980 spin_unlock(&bdev->fence_lock);
981 if (ret) 967 if (ret)
982 return ret; 968 return ret;
983 mem.num_pages = bo->num_pages; 969 mem.num_pages = bo->num_pages;
@@ -1006,20 +992,27 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
1006{ 992{
1007 int i; 993 int i;
1008 994
1009 if (mem->mm_node && placement->lpfn != 0 &&
1010 (mem->start < placement->fpfn ||
1011 mem->start + mem->num_pages > placement->lpfn))
1012 return false;
1013
1014 for (i = 0; i < placement->num_placement; i++) { 995 for (i = 0; i < placement->num_placement; i++) {
1015 *new_flags = placement->placement[i]; 996 const struct ttm_place *heap = &placement->placement[i];
997 if (mem->mm_node && heap->lpfn != 0 &&
998 (mem->start < heap->fpfn ||
999 mem->start + mem->num_pages > heap->lpfn))
1000 continue;
1001
1002 *new_flags = heap->flags;
1016 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1003 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1017 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1004 (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1018 return true; 1005 return true;
1019 } 1006 }
1020 1007
1021 for (i = 0; i < placement->num_busy_placement; i++) { 1008 for (i = 0; i < placement->num_busy_placement; i++) {
1022 *new_flags = placement->busy_placement[i]; 1009 const struct ttm_place *heap = &placement->busy_placement[i];
1010 if (mem->mm_node && heap->lpfn != 0 &&
1011 (mem->start < heap->fpfn ||
1012 mem->start + mem->num_pages > heap->lpfn))
1013 continue;
1014
1015 *new_flags = heap->flags;
1023 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1016 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1024 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1017 (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1025 return true; 1018 return true;
@@ -1037,11 +1030,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1037 uint32_t new_flags; 1030 uint32_t new_flags;
1038 1031
1039 lockdep_assert_held(&bo->resv->lock.base); 1032 lockdep_assert_held(&bo->resv->lock.base);
1040 /* Check that range is valid */
1041 if (placement->lpfn || placement->fpfn)
1042 if (placement->fpfn > placement->lpfn ||
1043 (placement->lpfn - placement->fpfn) < bo->num_pages)
1044 return -EINVAL;
1045 /* 1033 /*
1046 * Check whether we need to move buffer. 1034 * Check whether we need to move buffer.
1047 */ 1035 */
@@ -1070,15 +1058,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1070} 1058}
1071EXPORT_SYMBOL(ttm_bo_validate); 1059EXPORT_SYMBOL(ttm_bo_validate);
1072 1060
1073int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1074 struct ttm_placement *placement)
1075{
1076 BUG_ON((placement->fpfn || placement->lpfn) &&
1077 (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1078
1079 return 0;
1080}
1081
1082int ttm_bo_init(struct ttm_bo_device *bdev, 1061int ttm_bo_init(struct ttm_bo_device *bdev,
1083 struct ttm_buffer_object *bo, 1062 struct ttm_buffer_object *bo,
1084 unsigned long size, 1063 unsigned long size,
@@ -1089,6 +1068,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1089 struct file *persistent_swap_storage, 1068 struct file *persistent_swap_storage,
1090 size_t acc_size, 1069 size_t acc_size,
1091 struct sg_table *sg, 1070 struct sg_table *sg,
1071 struct reservation_object *resv,
1092 void (*destroy) (struct ttm_buffer_object *)) 1072 void (*destroy) (struct ttm_buffer_object *))
1093{ 1073{
1094 int ret = 0; 1074 int ret = 0;
@@ -1142,30 +1122,38 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1142 bo->persistent_swap_storage = persistent_swap_storage; 1122 bo->persistent_swap_storage = persistent_swap_storage;
1143 bo->acc_size = acc_size; 1123 bo->acc_size = acc_size;
1144 bo->sg = sg; 1124 bo->sg = sg;
1145 bo->resv = &bo->ttm_resv; 1125 if (resv) {
1146 reservation_object_init(bo->resv); 1126 bo->resv = resv;
1127 lockdep_assert_held(&bo->resv->lock.base);
1128 } else {
1129 bo->resv = &bo->ttm_resv;
1130 reservation_object_init(&bo->ttm_resv);
1131 }
1147 atomic_inc(&bo->glob->bo_count); 1132 atomic_inc(&bo->glob->bo_count);
1148 drm_vma_node_reset(&bo->vma_node); 1133 drm_vma_node_reset(&bo->vma_node);
1149 1134
1150 ret = ttm_bo_check_placement(bo, placement);
1151
1152 /* 1135 /*
1153 * For ttm_bo_type_device buffers, allocate 1136 * For ttm_bo_type_device buffers, allocate
1154 * address space from the device. 1137 * address space from the device.
1155 */ 1138 */
1156 if (likely(!ret) && 1139 if (bo->type == ttm_bo_type_device ||
1157 (bo->type == ttm_bo_type_device || 1140 bo->type == ttm_bo_type_sg)
1158 bo->type == ttm_bo_type_sg))
1159 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, 1141 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1160 bo->mem.num_pages); 1142 bo->mem.num_pages);
1161 1143
1162 locked = ww_mutex_trylock(&bo->resv->lock); 1144 /* passed reservation objects should already be locked,
1163 WARN_ON(!locked); 1145 * since otherwise lockdep will be angered in radeon.
1146 */
1147 if (!resv) {
1148 locked = ww_mutex_trylock(&bo->resv->lock);
1149 WARN_ON(!locked);
1150 }
1164 1151
1165 if (likely(!ret)) 1152 if (likely(!ret))
1166 ret = ttm_bo_validate(bo, placement, interruptible, false); 1153 ret = ttm_bo_validate(bo, placement, interruptible, false);
1167 1154
1168 ttm_bo_unreserve(bo); 1155 if (!resv)
1156 ttm_bo_unreserve(bo);
1169 1157
1170 if (unlikely(ret)) 1158 if (unlikely(ret))
1171 ttm_bo_unref(&bo); 1159 ttm_bo_unref(&bo);
@@ -1223,7 +1211,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
1223 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1211 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1224 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1212 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1225 interruptible, persistent_swap_storage, acc_size, 1213 interruptible, persistent_swap_storage, acc_size,
1226 NULL, NULL); 1214 NULL, NULL, NULL);
1227 if (likely(ret == 0)) 1215 if (likely(ret == 0))
1228 *p_bo = bo; 1216 *p_bo = bo;
1229 1217
@@ -1477,7 +1465,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1477 bdev->glob = glob; 1465 bdev->glob = glob;
1478 bdev->need_dma32 = need_dma32; 1466 bdev->need_dma32 = need_dma32;
1479 bdev->val_seq = 0; 1467 bdev->val_seq = 0;
1480 spin_lock_init(&bdev->fence_lock);
1481 mutex_lock(&glob->device_list_mutex); 1468 mutex_lock(&glob->device_list_mutex);
1482 list_add_tail(&bdev->device_list, &glob->device_list); 1469 list_add_tail(&bdev->device_list, &glob->device_list);
1483 mutex_unlock(&glob->device_list_mutex); 1470 mutex_unlock(&glob->device_list_mutex);
@@ -1530,65 +1517,56 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1530 1517
1531EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1518EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1532 1519
1533
1534int ttm_bo_wait(struct ttm_buffer_object *bo, 1520int ttm_bo_wait(struct ttm_buffer_object *bo,
1535 bool lazy, bool interruptible, bool no_wait) 1521 bool lazy, bool interruptible, bool no_wait)
1536{ 1522{
1537 struct ttm_bo_driver *driver = bo->bdev->driver; 1523 struct reservation_object_list *fobj;
1538 struct ttm_bo_device *bdev = bo->bdev; 1524 struct reservation_object *resv;
1539 void *sync_obj; 1525 struct fence *excl;
1540 int ret = 0; 1526 long timeout = 15 * HZ;
1541 1527 int i;
1542 if (likely(bo->sync_obj == NULL))
1543 return 0;
1544 1528
1545 while (bo->sync_obj) { 1529 resv = bo->resv;
1530 fobj = reservation_object_get_list(resv);
1531 excl = reservation_object_get_excl(resv);
1532 if (excl) {
1533 if (!fence_is_signaled(excl)) {
1534 if (no_wait)
1535 return -EBUSY;
1546 1536
1547 if (driver->sync_obj_signaled(bo->sync_obj)) { 1537 timeout = fence_wait_timeout(excl,
1548 void *tmp_obj = bo->sync_obj; 1538 interruptible, timeout);
1549 bo->sync_obj = NULL;
1550 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1551 spin_unlock(&bdev->fence_lock);
1552 driver->sync_obj_unref(&tmp_obj);
1553 spin_lock(&bdev->fence_lock);
1554 continue;
1555 } 1539 }
1540 }
1556 1541
1557 if (no_wait) 1542 for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
1558 return -EBUSY; 1543 struct fence *fence;
1544 fence = rcu_dereference_protected(fobj->shared[i],
1545 reservation_object_held(resv));
1559 1546
1560 sync_obj = driver->sync_obj_ref(bo->sync_obj); 1547 if (!fence_is_signaled(fence)) {
1561 spin_unlock(&bdev->fence_lock); 1548 if (no_wait)
1562 ret = driver->sync_obj_wait(sync_obj, 1549 return -EBUSY;
1563 lazy, interruptible); 1550
1564 if (unlikely(ret != 0)) { 1551 timeout = fence_wait_timeout(fence,
1565 driver->sync_obj_unref(&sync_obj); 1552 interruptible, timeout);
1566 spin_lock(&bdev->fence_lock);
1567 return ret;
1568 }
1569 spin_lock(&bdev->fence_lock);
1570 if (likely(bo->sync_obj == sync_obj)) {
1571 void *tmp_obj = bo->sync_obj;
1572 bo->sync_obj = NULL;
1573 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1574 &bo->priv_flags);
1575 spin_unlock(&bdev->fence_lock);
1576 driver->sync_obj_unref(&sync_obj);
1577 driver->sync_obj_unref(&tmp_obj);
1578 spin_lock(&bdev->fence_lock);
1579 } else {
1580 spin_unlock(&bdev->fence_lock);
1581 driver->sync_obj_unref(&sync_obj);
1582 spin_lock(&bdev->fence_lock);
1583 } 1553 }
1584 } 1554 }
1555
1556 if (timeout < 0)
1557 return timeout;
1558
1559 if (timeout == 0)
1560 return -EBUSY;
1561
1562 reservation_object_add_excl_fence(resv, NULL);
1563 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1585 return 0; 1564 return 0;
1586} 1565}
1587EXPORT_SYMBOL(ttm_bo_wait); 1566EXPORT_SYMBOL(ttm_bo_wait);
1588 1567
1589int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1568int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1590{ 1569{
1591 struct ttm_bo_device *bdev = bo->bdev;
1592 int ret = 0; 1570 int ret = 0;
1593 1571
1594 /* 1572 /*
@@ -1598,9 +1576,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1598 ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); 1576 ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
1599 if (unlikely(ret != 0)) 1577 if (unlikely(ret != 0))
1600 return ret; 1578 return ret;
1601 spin_lock(&bdev->fence_lock);
1602 ret = ttm_bo_wait(bo, false, true, no_wait); 1579 ret = ttm_bo_wait(bo, false, true, no_wait);
1603 spin_unlock(&bdev->fence_lock);
1604 if (likely(ret == 0)) 1580 if (likely(ret == 0))
1605 atomic_inc(&bo->cpu_writers); 1581 atomic_inc(&bo->cpu_writers);
1606 ttm_bo_unreserve(bo); 1582 ttm_bo_unreserve(bo);
@@ -1657,9 +1633,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1657 * Wait for GPU, then move to system cached. 1633 * Wait for GPU, then move to system cached.
1658 */ 1634 */
1659 1635
1660 spin_lock(&bo->bdev->fence_lock);
1661 ret = ttm_bo_wait(bo, false, false, false); 1636 ret = ttm_bo_wait(bo, false, false, false);
1662 spin_unlock(&bo->bdev->fence_lock);
1663 1637
1664 if (unlikely(ret != 0)) 1638 if (unlikely(ret != 0))
1665 goto out; 1639 goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 9e103a4875c8..964387fc5c8f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -49,8 +49,7 @@ struct ttm_range_manager {
49 49
50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, 50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
51 struct ttm_buffer_object *bo, 51 struct ttm_buffer_object *bo,
52 struct ttm_placement *placement, 52 const struct ttm_place *place,
53 uint32_t flags,
54 struct ttm_mem_reg *mem) 53 struct ttm_mem_reg *mem)
55{ 54{
56 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; 55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
@@ -60,7 +59,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
60 unsigned long lpfn; 59 unsigned long lpfn;
61 int ret; 60 int ret;
62 61
63 lpfn = placement->lpfn; 62 lpfn = place->lpfn;
64 if (!lpfn) 63 if (!lpfn)
65 lpfn = man->size; 64 lpfn = man->size;
66 65
@@ -68,13 +67,13 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
68 if (!node) 67 if (!node)
69 return -ENOMEM; 68 return -ENOMEM;
70 69
71 if (flags & TTM_PL_FLAG_TOPDOWN) 70 if (place->flags & TTM_PL_FLAG_TOPDOWN)
72 aflags = DRM_MM_CREATE_TOP; 71 aflags = DRM_MM_CREATE_TOP;
73 72
74 spin_lock(&rman->lock); 73 spin_lock(&rman->lock);
75 ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages, 74 ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
76 mem->page_alignment, 0, 75 mem->page_alignment, 0,
77 placement->fpfn, lpfn, 76 place->fpfn, lpfn,
78 DRM_MM_SEARCH_BEST, 77 DRM_MM_SEARCH_BEST,
79 aflags); 78 aflags);
80 spin_unlock(&rman->lock); 79 spin_unlock(&rman->lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 30e5d90cb7bc..882cccdad272 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -37,6 +37,7 @@
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/reservation.h>
40 41
41void ttm_bo_free_old_node(struct ttm_buffer_object *bo) 42void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
42{ 43{
@@ -444,8 +445,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
444 struct ttm_buffer_object **new_obj) 445 struct ttm_buffer_object **new_obj)
445{ 446{
446 struct ttm_buffer_object *fbo; 447 struct ttm_buffer_object *fbo;
447 struct ttm_bo_device *bdev = bo->bdev;
448 struct ttm_bo_driver *driver = bdev->driver;
449 int ret; 448 int ret;
450 449
451 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); 450 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
@@ -466,12 +465,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
466 drm_vma_node_reset(&fbo->vma_node); 465 drm_vma_node_reset(&fbo->vma_node);
467 atomic_set(&fbo->cpu_writers, 0); 466 atomic_set(&fbo->cpu_writers, 0);
468 467
469 spin_lock(&bdev->fence_lock);
470 if (bo->sync_obj)
471 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
472 else
473 fbo->sync_obj = NULL;
474 spin_unlock(&bdev->fence_lock);
475 kref_init(&fbo->list_kref); 468 kref_init(&fbo->list_kref);
476 kref_init(&fbo->kref); 469 kref_init(&fbo->kref);
477 fbo->destroy = &ttm_transfered_destroy; 470 fbo->destroy = &ttm_transfered_destroy;
@@ -487,28 +480,24 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
487 480
488pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) 481pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
489{ 482{
483 /* Cached mappings need no adjustment */
484 if (caching_flags & TTM_PL_FLAG_CACHED)
485 return tmp;
486
490#if defined(__i386__) || defined(__x86_64__) 487#if defined(__i386__) || defined(__x86_64__)
491 if (caching_flags & TTM_PL_FLAG_WC) 488 if (caching_flags & TTM_PL_FLAG_WC)
492 tmp = pgprot_writecombine(tmp); 489 tmp = pgprot_writecombine(tmp);
493 else if (boot_cpu_data.x86 > 3) 490 else if (boot_cpu_data.x86 > 3)
494 tmp = pgprot_noncached(tmp); 491 tmp = pgprot_noncached(tmp);
495
496#elif defined(__powerpc__)
497 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
498 pgprot_val(tmp) |= _PAGE_NO_CACHE;
499 if (caching_flags & TTM_PL_FLAG_UNCACHED)
500 pgprot_val(tmp) |= _PAGE_GUARDED;
501 }
502#endif 492#endif
503#if defined(__ia64__) || defined(__arm__) 493#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__)
504 if (caching_flags & TTM_PL_FLAG_WC) 494 if (caching_flags & TTM_PL_FLAG_WC)
505 tmp = pgprot_writecombine(tmp); 495 tmp = pgprot_writecombine(tmp);
506 else 496 else
507 tmp = pgprot_noncached(tmp); 497 tmp = pgprot_noncached(tmp);
508#endif 498#endif
509#if defined(__sparc__) || defined(__mips__) 499#if defined(__sparc__) || defined(__mips__)
510 if (!(caching_flags & TTM_PL_FLAG_CACHED)) 500 tmp = pgprot_noncached(tmp);
511 tmp = pgprot_noncached(tmp);
512#endif 501#endif
513 return tmp; 502 return tmp;
514} 503}
@@ -567,9 +556,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
567 * We need to use vmap to get the desired page protection 556 * We need to use vmap to get the desired page protection
568 * or to make the buffer object look contiguous. 557 * or to make the buffer object look contiguous.
569 */ 558 */
570 prot = (mem->placement & TTM_PL_FLAG_CACHED) ? 559 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
571 PAGE_KERNEL :
572 ttm_io_prot(mem->placement, PAGE_KERNEL);
573 map->bo_kmap_type = ttm_bo_map_vmap; 560 map->bo_kmap_type = ttm_bo_map_vmap;
574 map->virtual = vmap(ttm->pages + start_page, num_pages, 561 map->virtual = vmap(ttm->pages + start_page, num_pages,
575 0, prot); 562 0, prot);
@@ -644,30 +631,20 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
644EXPORT_SYMBOL(ttm_bo_kunmap); 631EXPORT_SYMBOL(ttm_bo_kunmap);
645 632
646int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 633int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
647 void *sync_obj, 634 struct fence *fence,
648 bool evict, 635 bool evict,
649 bool no_wait_gpu, 636 bool no_wait_gpu,
650 struct ttm_mem_reg *new_mem) 637 struct ttm_mem_reg *new_mem)
651{ 638{
652 struct ttm_bo_device *bdev = bo->bdev; 639 struct ttm_bo_device *bdev = bo->bdev;
653 struct ttm_bo_driver *driver = bdev->driver;
654 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 640 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
655 struct ttm_mem_reg *old_mem = &bo->mem; 641 struct ttm_mem_reg *old_mem = &bo->mem;
656 int ret; 642 int ret;
657 struct ttm_buffer_object *ghost_obj; 643 struct ttm_buffer_object *ghost_obj;
658 void *tmp_obj = NULL;
659 644
660 spin_lock(&bdev->fence_lock); 645 reservation_object_add_excl_fence(bo->resv, fence);
661 if (bo->sync_obj) {
662 tmp_obj = bo->sync_obj;
663 bo->sync_obj = NULL;
664 }
665 bo->sync_obj = driver->sync_obj_ref(sync_obj);
666 if (evict) { 646 if (evict) {
667 ret = ttm_bo_wait(bo, false, false, false); 647 ret = ttm_bo_wait(bo, false, false, false);
668 spin_unlock(&bdev->fence_lock);
669 if (tmp_obj)
670 driver->sync_obj_unref(&tmp_obj);
671 if (ret) 648 if (ret)
672 return ret; 649 return ret;
673 650
@@ -688,14 +665,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
688 */ 665 */
689 666
690 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 667 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
691 spin_unlock(&bdev->fence_lock);
692 if (tmp_obj)
693 driver->sync_obj_unref(&tmp_obj);
694 668
695 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 669 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
696 if (ret) 670 if (ret)
697 return ret; 671 return ret;
698 672
673 reservation_object_add_excl_fence(ghost_obj->resv, fence);
674
699 /** 675 /**
700 * If we're not moving to fixed memory, the TTM object 676 * If we're not moving to fixed memory, the TTM object
701 * needs to stay alive. Otherwhise hang it on the ghost 677 * needs to stay alive. Otherwhise hang it on the ghost
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 0ce48e5a9cb4..8fb7213277cc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -45,10 +45,8 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
45 struct vm_area_struct *vma, 45 struct vm_area_struct *vma,
46 struct vm_fault *vmf) 46 struct vm_fault *vmf)
47{ 47{
48 struct ttm_bo_device *bdev = bo->bdev;
49 int ret = 0; 48 int ret = 0;
50 49
51 spin_lock(&bdev->fence_lock);
52 if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags))) 50 if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
53 goto out_unlock; 51 goto out_unlock;
54 52
@@ -82,7 +80,6 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
82 VM_FAULT_NOPAGE; 80 VM_FAULT_NOPAGE;
83 81
84out_unlock: 82out_unlock:
85 spin_unlock(&bdev->fence_lock);
86 return ret; 83 return ret;
87} 84}
88 85
@@ -200,9 +197,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
200 cvma.vm_page_prot); 197 cvma.vm_page_prot);
201 } else { 198 } else {
202 ttm = bo->ttm; 199 ttm = bo->ttm;
203 if (!(bo->mem.placement & TTM_PL_FLAG_CACHED)) 200 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
204 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, 201 cvma.vm_page_prot);
205 cvma.vm_page_prot);
206 202
207 /* Allocate all page at once, most common usage */ 203 /* Allocate all page at once, most common usage */
208 if (ttm->bdev->driver->ttm_tt_populate(ttm)) { 204 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index e8dac8758528..8ce508e76208 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,20 +32,12 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/module.h> 33#include <linux/module.h>
34 34
35static void ttm_eu_backoff_reservation_locked(struct list_head *list) 35static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
36 struct ttm_validate_buffer *entry)
36{ 37{
37 struct ttm_validate_buffer *entry; 38 list_for_each_entry_continue_reverse(entry, list, head) {
38
39 list_for_each_entry(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo; 39 struct ttm_buffer_object *bo = entry->bo;
41 if (!entry->reserved)
42 continue;
43 40
44 entry->reserved = false;
45 if (entry->removed) {
46 ttm_bo_add_to_lru(bo);
47 entry->removed = false;
48 }
49 __ttm_bo_unreserve(bo); 41 __ttm_bo_unreserve(bo);
50 } 42 }
51} 43}
@@ -56,27 +48,9 @@ static void ttm_eu_del_from_lru_locked(struct list_head *list)
56 48
57 list_for_each_entry(entry, list, head) { 49 list_for_each_entry(entry, list, head) {
58 struct ttm_buffer_object *bo = entry->bo; 50 struct ttm_buffer_object *bo = entry->bo;
59 if (!entry->reserved) 51 unsigned put_count = ttm_bo_del_from_lru(bo);
60 continue;
61
62 if (!entry->removed) {
63 entry->put_count = ttm_bo_del_from_lru(bo);
64 entry->removed = true;
65 }
66 }
67}
68
69static void ttm_eu_list_ref_sub(struct list_head *list)
70{
71 struct ttm_validate_buffer *entry;
72
73 list_for_each_entry(entry, list, head) {
74 struct ttm_buffer_object *bo = entry->bo;
75 52
76 if (entry->put_count) { 53 ttm_bo_list_ref_sub(bo, put_count, true);
77 ttm_bo_list_ref_sub(bo, entry->put_count, true);
78 entry->put_count = 0;
79 }
80 } 54 }
81} 55}
82 56
@@ -91,11 +65,18 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
91 65
92 entry = list_first_entry(list, struct ttm_validate_buffer, head); 66 entry = list_first_entry(list, struct ttm_validate_buffer, head);
93 glob = entry->bo->glob; 67 glob = entry->bo->glob;
68
94 spin_lock(&glob->lru_lock); 69 spin_lock(&glob->lru_lock);
95 ttm_eu_backoff_reservation_locked(list); 70 list_for_each_entry(entry, list, head) {
71 struct ttm_buffer_object *bo = entry->bo;
72
73 ttm_bo_add_to_lru(bo);
74 __ttm_bo_unreserve(bo);
75 }
76 spin_unlock(&glob->lru_lock);
77
96 if (ticket) 78 if (ticket)
97 ww_acquire_fini(ticket); 79 ww_acquire_fini(ticket);
98 spin_unlock(&glob->lru_lock);
99} 80}
100EXPORT_SYMBOL(ttm_eu_backoff_reservation); 81EXPORT_SYMBOL(ttm_eu_backoff_reservation);
101 82
@@ -112,7 +93,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
112 */ 93 */
113 94
114int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 95int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
115 struct list_head *list) 96 struct list_head *list, bool intr)
116{ 97{
117 struct ttm_bo_global *glob; 98 struct ttm_bo_global *glob;
118 struct ttm_validate_buffer *entry; 99 struct ttm_validate_buffer *entry;
@@ -121,60 +102,64 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
121 if (list_empty(list)) 102 if (list_empty(list))
122 return 0; 103 return 0;
123 104
124 list_for_each_entry(entry, list, head) {
125 entry->reserved = false;
126 entry->put_count = 0;
127 entry->removed = false;
128 }
129
130 entry = list_first_entry(list, struct ttm_validate_buffer, head); 105 entry = list_first_entry(list, struct ttm_validate_buffer, head);
131 glob = entry->bo->glob; 106 glob = entry->bo->glob;
132 107
133 if (ticket) 108 if (ticket)
134 ww_acquire_init(ticket, &reservation_ww_class); 109 ww_acquire_init(ticket, &reservation_ww_class);
135retry: 110
136 list_for_each_entry(entry, list, head) { 111 list_for_each_entry(entry, list, head) {
137 struct ttm_buffer_object *bo = entry->bo; 112 struct ttm_buffer_object *bo = entry->bo;
138 113
139 /* already slowpath reserved? */ 114 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
140 if (entry->reserved)
141 continue;
142
143 ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
144 ticket); 115 ticket);
116 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
117 __ttm_bo_unreserve(bo);
118
119 ret = -EBUSY;
120 }
145 121
146 if (ret == -EDEADLK) { 122 if (!ret) {
147 /* uh oh, we lost out, drop every reservation and try 123 if (!entry->shared)
148 * to only reserve this buffer, then start over if 124 continue;
149 * this succeeds. 125
150 */ 126 ret = reservation_object_reserve_shared(bo->resv);
151 BUG_ON(ticket == NULL); 127 if (!ret)
152 spin_lock(&glob->lru_lock); 128 continue;
153 ttm_eu_backoff_reservation_locked(list); 129 }
154 spin_unlock(&glob->lru_lock); 130
155 ttm_eu_list_ref_sub(list); 131 /* uh oh, we lost out, drop every reservation and try
132 * to only reserve this buffer, then start over if
133 * this succeeds.
134 */
135 ttm_eu_backoff_reservation_reverse(list, entry);
136
137 if (ret == -EDEADLK && intr) {
156 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, 138 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
157 ticket); 139 ticket);
158 if (unlikely(ret != 0)) { 140 } else if (ret == -EDEADLK) {
159 if (ret == -EINTR) 141 ww_mutex_lock_slow(&bo->resv->lock, ticket);
160 ret = -ERESTARTSYS; 142 ret = 0;
161 goto err_fini; 143 }
162 }
163 144
164 entry->reserved = true; 145 if (!ret && entry->shared)
165 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 146 ret = reservation_object_reserve_shared(bo->resv);
166 ret = -EBUSY;
167 goto err;
168 }
169 goto retry;
170 } else if (ret)
171 goto err;
172 147
173 entry->reserved = true; 148 if (unlikely(ret != 0)) {
174 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 149 if (ret == -EINTR)
175 ret = -EBUSY; 150 ret = -ERESTARTSYS;
176 goto err; 151 if (ticket) {
152 ww_acquire_done(ticket);
153 ww_acquire_fini(ticket);
154 }
155 return ret;
177 } 156 }
157
158 /* move this item to the front of the list,
159 * forces correct iteration of the loop without keeping track
160 */
161 list_del(&entry->head);
162 list_add(&entry->head, list);
178 } 163 }
179 164
180 if (ticket) 165 if (ticket)
@@ -182,25 +167,12 @@ retry:
182 spin_lock(&glob->lru_lock); 167 spin_lock(&glob->lru_lock);
183 ttm_eu_del_from_lru_locked(list); 168 ttm_eu_del_from_lru_locked(list);
184 spin_unlock(&glob->lru_lock); 169 spin_unlock(&glob->lru_lock);
185 ttm_eu_list_ref_sub(list);
186 return 0; 170 return 0;
187
188err:
189 spin_lock(&glob->lru_lock);
190 ttm_eu_backoff_reservation_locked(list);
191 spin_unlock(&glob->lru_lock);
192 ttm_eu_list_ref_sub(list);
193err_fini:
194 if (ticket) {
195 ww_acquire_done(ticket);
196 ww_acquire_fini(ticket);
197 }
198 return ret;
199} 171}
200EXPORT_SYMBOL(ttm_eu_reserve_buffers); 172EXPORT_SYMBOL(ttm_eu_reserve_buffers);
201 173
202void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, 174void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
203 struct list_head *list, void *sync_obj) 175 struct list_head *list, struct fence *fence)
204{ 176{
205 struct ttm_validate_buffer *entry; 177 struct ttm_validate_buffer *entry;
206 struct ttm_buffer_object *bo; 178 struct ttm_buffer_object *bo;
@@ -217,24 +189,18 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
217 glob = bo->glob; 189 glob = bo->glob;
218 190
219 spin_lock(&glob->lru_lock); 191 spin_lock(&glob->lru_lock);
220 spin_lock(&bdev->fence_lock);
221 192
222 list_for_each_entry(entry, list, head) { 193 list_for_each_entry(entry, list, head) {
223 bo = entry->bo; 194 bo = entry->bo;
224 entry->old_sync_obj = bo->sync_obj; 195 if (entry->shared)
225 bo->sync_obj = driver->sync_obj_ref(sync_obj); 196 reservation_object_add_shared_fence(bo->resv, fence);
197 else
198 reservation_object_add_excl_fence(bo->resv, fence);
226 ttm_bo_add_to_lru(bo); 199 ttm_bo_add_to_lru(bo);
227 __ttm_bo_unreserve(bo); 200 __ttm_bo_unreserve(bo);
228 entry->reserved = false;
229 } 201 }
230 spin_unlock(&bdev->fence_lock);
231 spin_unlock(&glob->lru_lock); 202 spin_unlock(&glob->lru_lock);
232 if (ticket) 203 if (ticket)
233 ww_acquire_fini(ticket); 204 ww_acquire_fini(ticket);
234
235 list_for_each_entry(entry, list, head) {
236 if (entry->old_sync_obj)
237 driver->sync_obj_unref(&entry->old_sync_obj);
238 }
239} 205}
240EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); 206EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index dbc2def887cd..a1803fbcc898 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -300,7 +300,8 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
300 zone->glob = glob; 300 zone->glob = glob;
301 glob->zone_highmem = zone; 301 glob->zone_highmem = zone;
302 ret = kobject_init_and_add( 302 ret = kobject_init_and_add(
303 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); 303 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
304 zone->name);
304 if (unlikely(ret != 0)) { 305 if (unlikely(ret != 0)) {
305 kobject_put(&zone->kobj); 306 kobject_put(&zone->kobj);
306 return ret; 307 return ret;
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index f02528686cd5..613ab0622d6e 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -1,8 +1,9 @@
1config DRM_UDL 1config DRM_UDL
2 tristate "DisplayLink" 2 tristate "DisplayLink"
3 depends on DRM 3 depends on DRM
4 depends on USB_SUPPORT
4 depends on USB_ARCH_HAS_HCD 5 depends on USB_ARCH_HAS_HCD
5 select DRM_USB 6 select USB
6 select FB_SYS_FILLRECT 7 select FB_SYS_FILLRECT
7 select FB_SYS_COPYAREA 8 select FB_SYS_COPYAREA
8 select FB_SYS_IMAGEBLIT 9 select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index e026a9e2942a..0110d95522f3 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -34,8 +34,8 @@ static u8 *udl_get_edid(struct udl_device *udl)
34 goto error; 34 goto error;
35 35
36 for (i = 0; i < EDID_LENGTH; i++) { 36 for (i = 0; i < EDID_LENGTH; i++) {
37 ret = usb_control_msg(udl->ddev->usbdev, 37 ret = usb_control_msg(udl->udev,
38 usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02), 38 usb_rcvctrlpipe(udl->udev, 0), (0x02),
39 (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2, 39 (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
40 HZ); 40 HZ);
41 if (ret < 1) { 41 if (ret < 1) {
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 3ddd6cd98ac1..8607e9e513db 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -7,48 +7,13 @@
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <drm/drm_usb.h> 10#include <drm/drmP.h>
11#include <drm/drm_crtc_helper.h> 11#include <drm/drm_crtc_helper.h>
12#include "udl_drv.h" 12#include "udl_drv.h"
13 13
14static struct drm_driver driver; 14static int udl_driver_set_busid(struct drm_device *d, struct drm_master *m)
15
16/*
17 * There are many DisplayLink-based graphics products, all with unique PIDs.
18 * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
19 * We also require a match on SubClass (0x00) and Protocol (0x00),
20 * which is compatible with all known USB 2.0 era graphics chips and firmware,
21 * but allows DisplayLink to increment those for any future incompatible chips
22 */
23static struct usb_device_id id_table[] = {
24 {.idVendor = 0x17e9, .bInterfaceClass = 0xff,
25 .bInterfaceSubClass = 0x00,
26 .bInterfaceProtocol = 0x00,
27 .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
28 USB_DEVICE_ID_MATCH_INT_CLASS |
29 USB_DEVICE_ID_MATCH_INT_SUBCLASS |
30 USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
31 {},
32};
33MODULE_DEVICE_TABLE(usb, id_table);
34
35MODULE_LICENSE("GPL");
36
37static int udl_usb_probe(struct usb_interface *interface,
38 const struct usb_device_id *id)
39{ 15{
40 return drm_get_usb_dev(interface, id, &driver); 16 return 0;
41}
42
43static void udl_usb_disconnect(struct usb_interface *interface)
44{
45 struct drm_device *dev = usb_get_intfdata(interface);
46
47 drm_kms_helper_poll_disable(dev);
48 drm_connector_unplug_all(dev);
49 udl_fbdev_unplug(dev);
50 udl_drop_usb(dev);
51 drm_unplug_dev(dev);
52} 17}
53 18
54static const struct vm_operations_struct udl_gem_vm_ops = { 19static const struct vm_operations_struct udl_gem_vm_ops = {
@@ -75,6 +40,7 @@ static struct drm_driver driver = {
75 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, 40 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
76 .load = udl_driver_load, 41 .load = udl_driver_load,
77 .unload = udl_driver_unload, 42 .unload = udl_driver_unload,
43 .set_busid = udl_driver_set_busid,
78 44
79 /* gem hooks */ 45 /* gem hooks */
80 .gem_free_object = udl_gem_free_object, 46 .gem_free_object = udl_gem_free_object,
@@ -96,6 +62,61 @@ static struct drm_driver driver = {
96 .patchlevel = DRIVER_PATCHLEVEL, 62 .patchlevel = DRIVER_PATCHLEVEL,
97}; 63};
98 64
65static int udl_usb_probe(struct usb_interface *interface,
66 const struct usb_device_id *id)
67{
68 struct usb_device *udev = interface_to_usbdev(interface);
69 struct drm_device *dev;
70 int r;
71
72 dev = drm_dev_alloc(&driver, &interface->dev);
73 if (!dev)
74 return -ENOMEM;
75
76 r = drm_dev_register(dev, (unsigned long)udev);
77 if (r)
78 goto err_free;
79
80 usb_set_intfdata(interface, dev);
81 DRM_INFO("Initialized udl on minor %d\n", dev->primary->index);
82
83 return 0;
84
85err_free:
86 drm_dev_unref(dev);
87 return r;
88}
89
90static void udl_usb_disconnect(struct usb_interface *interface)
91{
92 struct drm_device *dev = usb_get_intfdata(interface);
93
94 drm_kms_helper_poll_disable(dev);
95 drm_connector_unplug_all(dev);
96 udl_fbdev_unplug(dev);
97 udl_drop_usb(dev);
98 drm_unplug_dev(dev);
99}
100
101/*
102 * There are many DisplayLink-based graphics products, all with unique PIDs.
103 * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
104 * We also require a match on SubClass (0x00) and Protocol (0x00),
105 * which is compatible with all known USB 2.0 era graphics chips and firmware,
106 * but allows DisplayLink to increment those for any future incompatible chips
107 */
108static struct usb_device_id id_table[] = {
109 {.idVendor = 0x17e9, .bInterfaceClass = 0xff,
110 .bInterfaceSubClass = 0x00,
111 .bInterfaceProtocol = 0x00,
112 .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
113 USB_DEVICE_ID_MATCH_INT_CLASS |
114 USB_DEVICE_ID_MATCH_INT_SUBCLASS |
115 USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
116 {},
117};
118MODULE_DEVICE_TABLE(usb, id_table);
119
99static struct usb_driver udl_driver = { 120static struct usb_driver udl_driver = {
100 .name = "udl", 121 .name = "udl",
101 .probe = udl_usb_probe, 122 .probe = udl_usb_probe,
@@ -105,13 +126,14 @@ static struct usb_driver udl_driver = {
105 126
106static int __init udl_init(void) 127static int __init udl_init(void)
107{ 128{
108 return drm_usb_init(&driver, &udl_driver); 129 return usb_register(&udl_driver);
109} 130}
110 131
111static void __exit udl_exit(void) 132static void __exit udl_exit(void)
112{ 133{
113 drm_usb_exit(&driver, &udl_driver); 134 usb_deregister(&udl_driver);
114} 135}
115 136
116module_init(udl_init); 137module_init(udl_init);
117module_exit(udl_exit); 138module_exit(udl_exit);
139MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 1fbf7b357f16..c7490a2489a7 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -15,6 +15,7 @@
15#define UDL_DRV_H 15#define UDL_DRV_H
16 16
17#include <linux/usb.h> 17#include <linux/usb.h>
18#include <drm/drm_gem.h>
18 19
19#define DRIVER_NAME "udl" 20#define DRIVER_NAME "udl"
20#define DRIVER_DESC "DisplayLink" 21#define DRIVER_DESC "DisplayLink"
@@ -47,6 +48,7 @@ struct udl_fbdev;
47struct udl_device { 48struct udl_device {
48 struct device *dev; 49 struct device *dev;
49 struct drm_device *ddev; 50 struct drm_device *ddev;
51 struct usb_device *udev;
50 52
51 int sku_pixel_limit; 53 int sku_pixel_limit;
52 54
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index d1da339843ca..8cbcb4589bd3 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -472,7 +472,8 @@ udl_framebuffer_init(struct drm_device *dev,
472static int udlfb_create(struct drm_fb_helper *helper, 472static int udlfb_create(struct drm_fb_helper *helper,
473 struct drm_fb_helper_surface_size *sizes) 473 struct drm_fb_helper_surface_size *sizes)
474{ 474{
475 struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper; 475 struct udl_fbdev *ufbdev =
476 container_of(helper, struct udl_fbdev, helper);
476 struct drm_device *dev = ufbdev->helper.dev; 477 struct drm_device *dev = ufbdev->helper.dev;
477 struct fb_info *info; 478 struct fb_info *info;
478 struct device *device = dev->dev; 479 struct device *device = dev->dev;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 42795674bc07..33dbfb2c4748 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -202,7 +202,7 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
202 } 202 }
203 unode->urb = urb; 203 unode->urb = urb;
204 204
205 buf = usb_alloc_coherent(udl->ddev->usbdev, MAX_TRANSFER, GFP_KERNEL, 205 buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
206 &urb->transfer_dma); 206 &urb->transfer_dma);
207 if (!buf) { 207 if (!buf) {
208 kfree(unode); 208 kfree(unode);
@@ -211,7 +211,7 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
211 } 211 }
212 212
213 /* urb->transfer_buffer_length set to actual before submit */ 213 /* urb->transfer_buffer_length set to actual before submit */
214 usb_fill_bulk_urb(urb, udl->ddev->usbdev, usb_sndbulkpipe(udl->ddev->usbdev, 1), 214 usb_fill_bulk_urb(urb, udl->udev, usb_sndbulkpipe(udl->udev, 1),
215 buf, size, udl_urb_completion, unode); 215 buf, size, udl_urb_completion, unode);
216 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 216 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
217 217
@@ -282,6 +282,7 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
282 282
283int udl_driver_load(struct drm_device *dev, unsigned long flags) 283int udl_driver_load(struct drm_device *dev, unsigned long flags)
284{ 284{
285 struct usb_device *udev = (void*)flags;
285 struct udl_device *udl; 286 struct udl_device *udl;
286 int ret = -ENOMEM; 287 int ret = -ENOMEM;
287 288
@@ -290,10 +291,11 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
290 if (!udl) 291 if (!udl)
291 return -ENOMEM; 292 return -ENOMEM;
292 293
294 udl->udev = udev;
293 udl->ddev = dev; 295 udl->ddev = dev;
294 dev->dev_private = udl; 296 dev->dev_private = udl;
295 297
296 if (!udl_parse_vendor_descriptor(dev, dev->usbdev)) { 298 if (!udl_parse_vendor_descriptor(dev, udl->udev)) {
297 ret = -ENODEV; 299 ret = -ENODEV;
298 DRM_ERROR("firmware not recognized. Assume incompatible device\n"); 300 DRM_ERROR("firmware not recognized. Assume incompatible device\n");
299 goto err; 301 goto err;
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 6fc0648dd37f..d17d8f245c1a 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -161,7 +161,7 @@ int via_dma_cleanup(struct drm_device *dev)
161 if (dev_priv->ring.virtual_start) { 161 if (dev_priv->ring.virtual_start) {
162 via_cmdbuf_reset(dev_priv); 162 via_cmdbuf_reset(dev_priv);
163 163
164 drm_core_ioremapfree(&dev_priv->ring.map, dev); 164 drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
165 dev_priv->ring.virtual_start = NULL; 165 dev_priv->ring.virtual_start = NULL;
166 } 166 }
167 167
@@ -200,7 +200,7 @@ static int via_initialize(struct drm_device *dev,
200 dev_priv->ring.map.flags = 0; 200 dev_priv->ring.map.flags = 0;
201 dev_priv->ring.map.mtrr = 0; 201 dev_priv->ring.map.mtrr = 0;
202 202
203 drm_core_ioremap(&dev_priv->ring.map, dev); 203 drm_legacy_ioremap(&dev_priv->ring.map, dev);
204 204
205 if (dev_priv->ring.map.handle == NULL) { 205 if (dev_priv->ring.map.handle == NULL) {
206 via_dma_cleanup(dev); 206 via_dma_cleanup(dev);
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 50abc2adfaee..ed8aa8ff861a 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -62,7 +62,7 @@ static const struct file_operations via_driver_fops = {
62 .open = drm_open, 62 .open = drm_open,
63 .release = drm_release, 63 .release = drm_release,
64 .unlocked_ioctl = drm_ioctl, 64 .unlocked_ioctl = drm_ioctl,
65 .mmap = drm_mmap, 65 .mmap = drm_legacy_mmap,
66 .poll = drm_poll, 66 .poll = drm_poll,
67#ifdef CONFIG_COMPAT 67#ifdef CONFIG_COMPAT
68 .compat_ioctl = drm_compat_ioctl, 68 .compat_ioctl = drm_compat_ioctl,
@@ -79,6 +79,7 @@ static struct drm_driver driver = {
79 .open = via_driver_open, 79 .open = via_driver_open,
80 .preclose = via_reclaim_buffers_locked, 80 .preclose = via_reclaim_buffers_locked,
81 .postclose = via_driver_postclose, 81 .postclose = via_driver_postclose,
82 .set_busid = drm_pci_set_busid,
82 .context_dtor = via_final_context, 83 .context_dtor = via_final_context,
83 .get_vblank_counter = via_get_vblank_counter, 84 .get_vblank_counter = via_get_vblank_counter,
84 .enable_vblank = via_enable_vblank, 85 .enable_vblank = via_enable_vblank,
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index ad0273256beb..ef8c500b4a00 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -25,6 +25,8 @@
25#define _VIA_DRV_H_ 25#define _VIA_DRV_H_
26 26
27#include <drm/drm_mm.h> 27#include <drm/drm_mm.h>
28#include <drm/drm_legacy.h>
29
28#define DRIVER_AUTHOR "Various" 30#define DRIVER_AUTHOR "Various"
29 31
30#define DRIVER_NAME "via" 32#define DRIVER_NAME "via"
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index d0ab3fb32acd..0b3522dba6e8 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -31,7 +31,7 @@ static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
31 31
32 DRM_DEBUG("\n"); 32 DRM_DEBUG("\n");
33 33
34 dev_priv->sarea = drm_getsarea(dev); 34 dev_priv->sarea = drm_legacy_getsarea(dev);
35 if (!dev_priv->sarea) { 35 if (!dev_priv->sarea) {
36 DRM_ERROR("could not find sarea!\n"); 36 DRM_ERROR("could not find sarea!\n");
37 dev->dev_private = (void *)dev_priv; 37 dev->dev_private = (void *)dev_priv;
@@ -39,14 +39,14 @@ static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
39 return -EINVAL; 39 return -EINVAL;
40 } 40 }
41 41
42 dev_priv->fb = drm_core_findmap(dev, init->fb_offset); 42 dev_priv->fb = drm_legacy_findmap(dev, init->fb_offset);
43 if (!dev_priv->fb) { 43 if (!dev_priv->fb) {
44 DRM_ERROR("could not find framebuffer!\n"); 44 DRM_ERROR("could not find framebuffer!\n");
45 dev->dev_private = (void *)dev_priv; 45 dev->dev_private = (void *)dev_priv;
46 via_do_cleanup_map(dev); 46 via_do_cleanup_map(dev);
47 return -EINVAL; 47 return -EINVAL;
48 } 48 }
49 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 49 dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset);
50 if (!dev_priv->mmio) { 50 if (!dev_priv->mmio) {
51 DRM_ERROR("could not find mmio region!\n"); 51 DRM_ERROR("could not find mmio region!\n");
52 dev->dev_private = (void *)dev_priv; 52 dev->dev_private = (void *)dev_priv;
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index d70b1e1544bf..4f20742e7788 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -211,12 +211,12 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
211 if (!(file->minor->master && file->master->lock.hw_lock)) 211 if (!(file->minor->master && file->master->lock.hw_lock))
212 return; 212 return;
213 213
214 drm_idlelock_take(&file->master->lock); 214 drm_legacy_idlelock_take(&file->master->lock);
215 215
216 mutex_lock(&dev->struct_mutex); 216 mutex_lock(&dev->struct_mutex);
217 if (list_empty(&file_priv->obj_list)) { 217 if (list_empty(&file_priv->obj_list)) {
218 mutex_unlock(&dev->struct_mutex); 218 mutex_unlock(&dev->struct_mutex);
219 drm_idlelock_release(&file->master->lock); 219 drm_legacy_idlelock_release(&file->master->lock);
220 220
221 return; 221 return;
222 } 222 }
@@ -231,7 +231,7 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
231 } 231 }
232 mutex_unlock(&dev->struct_mutex); 232 mutex_unlock(&dev->struct_mutex);
233 233
234 drm_idlelock_release(&file->master->lock); 234 drm_legacy_idlelock_release(&file->master->lock);
235 235
236 return; 236 return;
237} 237}
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
index 9dbc92bd1512..0677bbf4ec7e 100644
--- a/drivers/gpu/drm/via/via_verifier.c
+++ b/drivers/gpu/drm/via/via_verifier.c
@@ -31,6 +31,7 @@
31#include "via_3d_reg.h" 31#include "via_3d_reg.h"
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33#include <drm/via_drm.h> 33#include <drm/via_drm.h>
34#include <drm/drm_legacy.h>
34#include "via_verifier.h" 35#include "via_verifier.h"
35#include "via_drv.h" 36#include "via_drv.h"
36 37
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 6327cfc36805..cff2bf9db9d2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -30,66 +30,101 @@
30#include <drm/ttm/ttm_placement.h> 30#include <drm/ttm/ttm_placement.h>
31#include <drm/ttm/ttm_page_alloc.h> 31#include <drm/ttm/ttm_page_alloc.h>
32 32
33static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM | 33static struct ttm_place vram_placement_flags = {
34 TTM_PL_FLAG_CACHED; 34 .fpfn = 0,
35 35 .lpfn = 0,
36static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | 36 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
37 TTM_PL_FLAG_CACHED | 37};
38 TTM_PL_FLAG_NO_EVICT;
39 38
40static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | 39static struct ttm_place vram_ne_placement_flags = {
41 TTM_PL_FLAG_CACHED; 40 .fpfn = 0,
41 .lpfn = 0,
42 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
43};
42 44
43static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | 45static struct ttm_place sys_placement_flags = {
44 TTM_PL_FLAG_CACHED | 46 .fpfn = 0,
45 TTM_PL_FLAG_NO_EVICT; 47 .lpfn = 0,
48 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
49};
46 50
47static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | 51static struct ttm_place sys_ne_placement_flags = {
48 TTM_PL_FLAG_CACHED; 52 .fpfn = 0,
53 .lpfn = 0,
54 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
55};
49 56
50static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | 57static struct ttm_place gmr_placement_flags = {
51 TTM_PL_FLAG_CACHED | 58 .fpfn = 0,
52 TTM_PL_FLAG_NO_EVICT; 59 .lpfn = 0,
60 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
61};
53 62
54static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | 63static struct ttm_place gmr_ne_placement_flags = {
55 TTM_PL_FLAG_CACHED; 64 .fpfn = 0,
65 .lpfn = 0,
66 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
67};
56 68
57struct ttm_placement vmw_vram_placement = { 69static struct ttm_place mob_placement_flags = {
58 .fpfn = 0, 70 .fpfn = 0,
59 .lpfn = 0, 71 .lpfn = 0,
72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
73};
74
75struct ttm_placement vmw_vram_placement = {
60 .num_placement = 1, 76 .num_placement = 1,
61 .placement = &vram_placement_flags, 77 .placement = &vram_placement_flags,
62 .num_busy_placement = 1, 78 .num_busy_placement = 1,
63 .busy_placement = &vram_placement_flags 79 .busy_placement = &vram_placement_flags
64}; 80};
65 81
66static uint32_t vram_gmr_placement_flags[] = { 82static struct ttm_place vram_gmr_placement_flags[] = {
67 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, 83 {
68 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED 84 .fpfn = 0,
85 .lpfn = 0,
86 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
87 }, {
88 .fpfn = 0,
89 .lpfn = 0,
90 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
91 }
69}; 92};
70 93
71static uint32_t gmr_vram_placement_flags[] = { 94static struct ttm_place gmr_vram_placement_flags[] = {
72 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, 95 {
73 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED 96 .fpfn = 0,
97 .lpfn = 0,
98 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
99 }, {
100 .fpfn = 0,
101 .lpfn = 0,
102 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
103 }
74}; 104};
75 105
76struct ttm_placement vmw_vram_gmr_placement = { 106struct ttm_placement vmw_vram_gmr_placement = {
77 .fpfn = 0,
78 .lpfn = 0,
79 .num_placement = 2, 107 .num_placement = 2,
80 .placement = vram_gmr_placement_flags, 108 .placement = vram_gmr_placement_flags,
81 .num_busy_placement = 1, 109 .num_busy_placement = 1,
82 .busy_placement = &gmr_placement_flags 110 .busy_placement = &gmr_placement_flags
83}; 111};
84 112
85static uint32_t vram_gmr_ne_placement_flags[] = { 113static struct ttm_place vram_gmr_ne_placement_flags[] = {
86 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT, 114 {
87 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 115 .fpfn = 0,
116 .lpfn = 0,
117 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
118 TTM_PL_FLAG_NO_EVICT
119 }, {
120 .fpfn = 0,
121 .lpfn = 0,
122 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
123 TTM_PL_FLAG_NO_EVICT
124 }
88}; 125};
89 126
90struct ttm_placement vmw_vram_gmr_ne_placement = { 127struct ttm_placement vmw_vram_gmr_ne_placement = {
91 .fpfn = 0,
92 .lpfn = 0,
93 .num_placement = 2, 128 .num_placement = 2,
94 .placement = vram_gmr_ne_placement_flags, 129 .placement = vram_gmr_ne_placement_flags,
95 .num_busy_placement = 1, 130 .num_busy_placement = 1,
@@ -97,8 +132,6 @@ struct ttm_placement vmw_vram_gmr_ne_placement = {
97}; 132};
98 133
99struct ttm_placement vmw_vram_sys_placement = { 134struct ttm_placement vmw_vram_sys_placement = {
100 .fpfn = 0,
101 .lpfn = 0,
102 .num_placement = 1, 135 .num_placement = 1,
103 .placement = &vram_placement_flags, 136 .placement = &vram_placement_flags,
104 .num_busy_placement = 1, 137 .num_busy_placement = 1,
@@ -106,8 +139,6 @@ struct ttm_placement vmw_vram_sys_placement = {
106}; 139};
107 140
108struct ttm_placement vmw_vram_ne_placement = { 141struct ttm_placement vmw_vram_ne_placement = {
109 .fpfn = 0,
110 .lpfn = 0,
111 .num_placement = 1, 142 .num_placement = 1,
112 .placement = &vram_ne_placement_flags, 143 .placement = &vram_ne_placement_flags,
113 .num_busy_placement = 1, 144 .num_busy_placement = 1,
@@ -115,8 +146,6 @@ struct ttm_placement vmw_vram_ne_placement = {
115}; 146};
116 147
117struct ttm_placement vmw_sys_placement = { 148struct ttm_placement vmw_sys_placement = {
118 .fpfn = 0,
119 .lpfn = 0,
120 .num_placement = 1, 149 .num_placement = 1,
121 .placement = &sys_placement_flags, 150 .placement = &sys_placement_flags,
122 .num_busy_placement = 1, 151 .num_busy_placement = 1,
@@ -124,24 +153,33 @@ struct ttm_placement vmw_sys_placement = {
124}; 153};
125 154
126struct ttm_placement vmw_sys_ne_placement = { 155struct ttm_placement vmw_sys_ne_placement = {
127 .fpfn = 0,
128 .lpfn = 0,
129 .num_placement = 1, 156 .num_placement = 1,
130 .placement = &sys_ne_placement_flags, 157 .placement = &sys_ne_placement_flags,
131 .num_busy_placement = 1, 158 .num_busy_placement = 1,
132 .busy_placement = &sys_ne_placement_flags 159 .busy_placement = &sys_ne_placement_flags
133}; 160};
134 161
135static uint32_t evictable_placement_flags[] = { 162static struct ttm_place evictable_placement_flags[] = {
136 TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, 163 {
137 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, 164 .fpfn = 0,
138 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, 165 .lpfn = 0,
139 VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED 166 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
167 }, {
168 .fpfn = 0,
169 .lpfn = 0,
170 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
171 }, {
172 .fpfn = 0,
173 .lpfn = 0,
174 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
175 }, {
176 .fpfn = 0,
177 .lpfn = 0,
178 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
179 }
140}; 180};
141 181
142struct ttm_placement vmw_evictable_placement = { 182struct ttm_placement vmw_evictable_placement = {
143 .fpfn = 0,
144 .lpfn = 0,
145 .num_placement = 4, 183 .num_placement = 4,
146 .placement = evictable_placement_flags, 184 .placement = evictable_placement_flags,
147 .num_busy_placement = 1, 185 .num_busy_placement = 1,
@@ -149,8 +187,6 @@ struct ttm_placement vmw_evictable_placement = {
149}; 187};
150 188
151struct ttm_placement vmw_srf_placement = { 189struct ttm_placement vmw_srf_placement = {
152 .fpfn = 0,
153 .lpfn = 0,
154 .num_placement = 1, 190 .num_placement = 1,
155 .num_busy_placement = 2, 191 .num_busy_placement = 2,
156 .placement = &gmr_placement_flags, 192 .placement = &gmr_placement_flags,
@@ -158,8 +194,6 @@ struct ttm_placement vmw_srf_placement = {
158}; 194};
159 195
160struct ttm_placement vmw_mob_placement = { 196struct ttm_placement vmw_mob_placement = {
161 .fpfn = 0,
162 .lpfn = 0,
163 .num_placement = 1, 197 .num_placement = 1,
164 .num_busy_placement = 1, 198 .num_busy_placement = 1,
165 .placement = &mob_placement_flags, 199 .placement = &mob_placement_flags,
@@ -768,44 +802,6 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
768} 802}
769 803
770/** 804/**
771 * FIXME: We're using the old vmware polling method to sync.
772 * Do this with fences instead.
773 */
774
775static void *vmw_sync_obj_ref(void *sync_obj)
776{
777
778 return (void *)
779 vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
780}
781
782static void vmw_sync_obj_unref(void **sync_obj)
783{
784 vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
785}
786
787static int vmw_sync_obj_flush(void *sync_obj)
788{
789 vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
790 return 0;
791}
792
793static bool vmw_sync_obj_signaled(void *sync_obj)
794{
795 return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
796 DRM_VMW_FENCE_FLAG_EXEC);
797
798}
799
800static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
801{
802 return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
803 DRM_VMW_FENCE_FLAG_EXEC,
804 lazy, interruptible,
805 VMW_FENCE_WAIT_TIMEOUT);
806}
807
808/**
809 * vmw_move_notify - TTM move_notify_callback 805 * vmw_move_notify - TTM move_notify_callback
810 * 806 *
811 * @bo: The TTM buffer object about to move. 807 * @bo: The TTM buffer object about to move.
@@ -829,11 +825,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
829 */ 825 */
830static void vmw_swap_notify(struct ttm_buffer_object *bo) 826static void vmw_swap_notify(struct ttm_buffer_object *bo)
831{ 827{
832 struct ttm_bo_device *bdev = bo->bdev;
833
834 spin_lock(&bdev->fence_lock);
835 ttm_bo_wait(bo, false, false, false); 828 ttm_bo_wait(bo, false, false, false);
836 spin_unlock(&bdev->fence_lock);
837} 829}
838 830
839 831
@@ -846,11 +838,6 @@ struct ttm_bo_driver vmw_bo_driver = {
846 .evict_flags = vmw_evict_flags, 838 .evict_flags = vmw_evict_flags,
847 .move = NULL, 839 .move = NULL,
848 .verify_access = vmw_verify_access, 840 .verify_access = vmw_verify_access,
849 .sync_obj_signaled = vmw_sync_obj_signaled,
850 .sync_obj_wait = vmw_sync_obj_wait,
851 .sync_obj_flush = vmw_sync_obj_flush,
852 .sync_obj_unref = vmw_sync_obj_unref,
853 .sync_obj_ref = vmw_sync_obj_ref,
854 .move_notify = vmw_move_notify, 841 .move_notify = vmw_move_notify,
855 .swap_notify = vmw_swap_notify, 842 .swap_notify = vmw_swap_notify,
856 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, 843 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index ed1d51006ab1..914b375763dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -198,13 +198,19 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
198{ 198{
199 struct ttm_buffer_object *bo = &buf->base; 199 struct ttm_buffer_object *bo = &buf->base;
200 struct ttm_placement placement; 200 struct ttm_placement placement;
201 struct ttm_place place;
201 int ret = 0; 202 int ret = 0;
202 203
203 if (pin) 204 if (pin)
204 placement = vmw_vram_ne_placement; 205 place = vmw_vram_ne_placement.placement[0];
205 else 206 else
206 placement = vmw_vram_placement; 207 place = vmw_vram_placement.placement[0];
207 placement.lpfn = bo->num_pages; 208 place.lpfn = bo->num_pages;
209
210 placement.num_placement = 1;
211 placement.placement = &place;
212 placement.num_busy_placement = 1;
213 placement.busy_placement = &place;
208 214
209 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); 215 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
210 if (unlikely(ret != 0)) 216 if (unlikely(ret != 0))
@@ -293,21 +299,23 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
293 */ 299 */
294void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) 300void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
295{ 301{
296 uint32_t pl_flags; 302 struct ttm_place pl;
297 struct ttm_placement placement; 303 struct ttm_placement placement;
298 uint32_t old_mem_type = bo->mem.mem_type; 304 uint32_t old_mem_type = bo->mem.mem_type;
299 int ret; 305 int ret;
300 306
301 lockdep_assert_held(&bo->resv->lock.base); 307 lockdep_assert_held(&bo->resv->lock.base);
302 308
303 pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB 309 pl.fpfn = 0;
310 pl.lpfn = 0;
311 pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
304 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 312 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
305 if (pin) 313 if (pin)
306 pl_flags |= TTM_PL_FLAG_NO_EVICT; 314 pl.flags |= TTM_PL_FLAG_NO_EVICT;
307 315
308 memset(&placement, 0, sizeof(placement)); 316 memset(&placement, 0, sizeof(placement));
309 placement.num_placement = 1; 317 placement.num_placement = 1;
310 placement.placement = &pl_flags; 318 placement.placement = &pl;
311 319
312 ret = ttm_bo_validate(bo, &placement, false, true); 320 ret = ttm_bo_validate(bo, &placement, false, true);
313 321
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 18b54acacfbb..7197af157313 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1418,6 +1418,7 @@ static struct drm_driver driver = {
1418 .open = vmw_driver_open, 1418 .open = vmw_driver_open,
1419 .preclose = vmw_preclose, 1419 .preclose = vmw_preclose,
1420 .postclose = vmw_postclose, 1420 .postclose = vmw_postclose,
1421 .set_busid = drm_pci_set_busid,
1421 1422
1422 .dumb_create = vmw_dumb_create, 1423 .dumb_create = vmw_dumb_create,
1423 .dumb_map_offset = vmw_dumb_map_offset, 1424 .dumb_map_offset = vmw_dumb_map_offset,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 99f731757c4b..4ee799b43d5d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -342,7 +342,6 @@ struct vmw_sw_context{
342 uint32_t *cmd_bounce; 342 uint32_t *cmd_bounce;
343 uint32_t cmd_bounce_size; 343 uint32_t cmd_bounce_size;
344 struct list_head resource_list; 344 struct list_head resource_list;
345 uint32_t fence_flags;
346 struct ttm_buffer_object *cur_query_bo; 345 struct ttm_buffer_object *cur_query_bo;
347 struct list_head res_relocations; 346 struct list_head res_relocations;
348 uint32_t *buf_start; 347 uint32_t *buf_start;
@@ -704,6 +703,7 @@ extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
704extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); 703extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
705extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, 704extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
706 uint32_t *seqno); 705 uint32_t *seqno);
706extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
707extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); 707extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
708extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); 708extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
709extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); 709extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 36b871686d3c..596cd6dafd33 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -346,13 +346,11 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
346 ++sw_context->cur_val_buf; 346 ++sw_context->cur_val_buf;
347 val_buf = &vval_buf->base; 347 val_buf = &vval_buf->base;
348 val_buf->bo = ttm_bo_reference(bo); 348 val_buf->bo = ttm_bo_reference(bo);
349 val_buf->reserved = false; 349 val_buf->shared = false;
350 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 350 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
351 vval_buf->validate_as_mob = validate_as_mob; 351 vval_buf->validate_as_mob = validate_as_mob;
352 } 352 }
353 353
354 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
355
356 if (p_val_node) 354 if (p_val_node)
357 *p_val_node = val_node; 355 *p_val_node = val_node;
358 356
@@ -2337,13 +2335,9 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2337 2335
2338 if (p_handle != NULL) 2336 if (p_handle != NULL)
2339 ret = vmw_user_fence_create(file_priv, dev_priv->fman, 2337 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2340 sequence, 2338 sequence, p_fence, p_handle);
2341 DRM_VMW_FENCE_FLAG_EXEC,
2342 p_fence, p_handle);
2343 else 2339 else
2344 ret = vmw_fence_create(dev_priv->fman, sequence, 2340 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
2345 DRM_VMW_FENCE_FLAG_EXEC,
2346 p_fence);
2347 2341
2348 if (unlikely(ret != 0 && !synced)) { 2342 if (unlikely(ret != 0 && !synced)) {
2349 (void) vmw_fallback_wait(dev_priv, false, false, 2343 (void) vmw_fallback_wait(dev_priv, false, false,
@@ -2395,7 +2389,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2395 BUG_ON(fence == NULL); 2389 BUG_ON(fence == NULL);
2396 2390
2397 fence_rep.handle = fence_handle; 2391 fence_rep.handle = fence_handle;
2398 fence_rep.seqno = fence->seqno; 2392 fence_rep.seqno = fence->base.seqno;
2399 vmw_update_seqno(dev_priv, &dev_priv->fifo); 2393 vmw_update_seqno(dev_priv, &dev_priv->fifo);
2400 fence_rep.passed_seqno = dev_priv->last_read_seqno; 2394 fence_rep.passed_seqno = dev_priv->last_read_seqno;
2401 } 2395 }
@@ -2416,8 +2410,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2416 ttm_ref_object_base_unref(vmw_fp->tfile, 2410 ttm_ref_object_base_unref(vmw_fp->tfile,
2417 fence_handle, TTM_REF_USAGE); 2411 fence_handle, TTM_REF_USAGE);
2418 DRM_ERROR("Fence copy error. Syncing.\n"); 2412 DRM_ERROR("Fence copy error. Syncing.\n");
2419 (void) vmw_fence_obj_wait(fence, fence->signal_mask, 2413 (void) vmw_fence_obj_wait(fence, false, false,
2420 false, false,
2421 VMW_FENCE_WAIT_TIMEOUT); 2414 VMW_FENCE_WAIT_TIMEOUT);
2422 } 2415 }
2423} 2416}
@@ -2469,7 +2462,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2469 sw_context->fp = vmw_fpriv(file_priv); 2462 sw_context->fp = vmw_fpriv(file_priv);
2470 sw_context->cur_reloc = 0; 2463 sw_context->cur_reloc = 0;
2471 sw_context->cur_val_buf = 0; 2464 sw_context->cur_val_buf = 0;
2472 sw_context->fence_flags = 0;
2473 INIT_LIST_HEAD(&sw_context->resource_list); 2465 INIT_LIST_HEAD(&sw_context->resource_list);
2474 sw_context->cur_query_bo = dev_priv->pinned_bo; 2466 sw_context->cur_query_bo = dev_priv->pinned_bo;
2475 sw_context->last_query_ctx = NULL; 2467 sw_context->last_query_ctx = NULL;
@@ -2495,7 +2487,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2495 if (unlikely(ret != 0)) 2487 if (unlikely(ret != 0))
2496 goto out_err_nores; 2488 goto out_err_nores;
2497 2489
2498 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); 2490 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true);
2499 if (unlikely(ret != 0)) 2491 if (unlikely(ret != 0))
2500 goto out_err; 2492 goto out_err;
2501 2493
@@ -2678,15 +2670,14 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2678 INIT_LIST_HEAD(&validate_list); 2670 INIT_LIST_HEAD(&validate_list);
2679 2671
2680 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); 2672 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2673 pinned_val.shared = false;
2681 list_add_tail(&pinned_val.head, &validate_list); 2674 list_add_tail(&pinned_val.head, &validate_list);
2682 2675
2683 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); 2676 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2677 query_val.shared = false;
2684 list_add_tail(&query_val.head, &validate_list); 2678 list_add_tail(&query_val.head, &validate_list);
2685 2679
2686 do { 2680 ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
2687 ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
2688 } while (ret == -ERESTARTSYS);
2689
2690 if (unlikely(ret != 0)) { 2681 if (unlikely(ret != 0)) {
2691 vmw_execbuf_unpin_panic(dev_priv); 2682 vmw_execbuf_unpin_panic(dev_priv);
2692 goto out_no_reserve; 2683 goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index b031b48dbb3c..0a474f391fad 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -374,10 +374,16 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
374 size_t size, struct vmw_dma_buffer **out) 374 size_t size, struct vmw_dma_buffer **out)
375{ 375{
376 struct vmw_dma_buffer *vmw_bo; 376 struct vmw_dma_buffer *vmw_bo;
377 struct ttm_placement ne_placement = vmw_vram_ne_placement; 377 struct ttm_place ne_place = vmw_vram_ne_placement.placement[0];
378 struct ttm_placement ne_placement;
378 int ret; 379 int ret;
379 380
380 ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 381 ne_placement.num_placement = 1;
382 ne_placement.placement = &ne_place;
383 ne_placement.num_busy_placement = 1;
384 ne_placement.busy_placement = &ne_place;
385
386 ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
381 387
382 (void) ttm_write_lock(&vmw_priv->reservation_sem, false); 388 (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
383 389
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 436b013b4231..197164fd7803 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -35,7 +35,7 @@ struct vmw_fence_manager {
35 struct vmw_private *dev_priv; 35 struct vmw_private *dev_priv;
36 spinlock_t lock; 36 spinlock_t lock;
37 struct list_head fence_list; 37 struct list_head fence_list;
38 struct work_struct work; 38 struct work_struct work, ping_work;
39 u32 user_fence_size; 39 u32 user_fence_size;
40 u32 fence_size; 40 u32 fence_size;
41 u32 event_fence_action_size; 41 u32 event_fence_action_size;
@@ -46,6 +46,7 @@ struct vmw_fence_manager {
46 bool goal_irq_on; /* Protected by @goal_irq_mutex */ 46 bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 bool seqno_valid; /* Protected by @lock, and may not be set to true 47 bool seqno_valid; /* Protected by @lock, and may not be set to true
48 without the @goal_irq_mutex held. */ 48 without the @goal_irq_mutex held. */
49 unsigned ctx;
49}; 50};
50 51
51struct vmw_user_fence { 52struct vmw_user_fence {
@@ -80,6 +81,12 @@ struct vmw_event_fence_action {
80 uint32_t *tv_usec; 81 uint32_t *tv_usec;
81}; 82};
82 83
84static struct vmw_fence_manager *
85fman_from_fence(struct vmw_fence_obj *fence)
86{
87 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
88}
89
83/** 90/**
84 * Note on fencing subsystem usage of irqs: 91 * Note on fencing subsystem usage of irqs:
85 * Typically the vmw_fences_update function is called 92 * Typically the vmw_fences_update function is called
@@ -102,25 +109,143 @@ struct vmw_event_fence_action {
102 * objects with actions attached to them. 109 * objects with actions attached to them.
103 */ 110 */
104 111
105static void vmw_fence_obj_destroy_locked(struct kref *kref) 112static void vmw_fence_obj_destroy(struct fence *f)
106{ 113{
107 struct vmw_fence_obj *fence = 114 struct vmw_fence_obj *fence =
108 container_of(kref, struct vmw_fence_obj, kref); 115 container_of(f, struct vmw_fence_obj, base);
109 116
110 struct vmw_fence_manager *fman = fence->fman; 117 struct vmw_fence_manager *fman = fman_from_fence(fence);
111 unsigned int num_fences; 118 unsigned long irq_flags;
112 119
120 spin_lock_irqsave(&fman->lock, irq_flags);
113 list_del_init(&fence->head); 121 list_del_init(&fence->head);
114 num_fences = --fman->num_fence_objects; 122 --fman->num_fence_objects;
115 spin_unlock_irq(&fman->lock); 123 spin_unlock_irqrestore(&fman->lock, irq_flags);
116 if (fence->destroy) 124 fence->destroy(fence);
117 fence->destroy(fence); 125}
118 else
119 kfree(fence);
120 126
121 spin_lock_irq(&fman->lock); 127static const char *vmw_fence_get_driver_name(struct fence *f)
128{
129 return "vmwgfx";
130}
131
132static const char *vmw_fence_get_timeline_name(struct fence *f)
133{
134 return "svga";
135}
136
137static void vmw_fence_ping_func(struct work_struct *work)
138{
139 struct vmw_fence_manager *fman =
140 container_of(work, struct vmw_fence_manager, ping_work);
141
142 vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
143}
144
145static bool vmw_fence_enable_signaling(struct fence *f)
146{
147 struct vmw_fence_obj *fence =
148 container_of(f, struct vmw_fence_obj, base);
149
150 struct vmw_fence_manager *fman = fman_from_fence(fence);
151 struct vmw_private *dev_priv = fman->dev_priv;
152
153 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
154 u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
155 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
156 return false;
157
158 if (mutex_trylock(&dev_priv->hw_mutex)) {
159 vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
160 mutex_unlock(&dev_priv->hw_mutex);
161 } else
162 schedule_work(&fman->ping_work);
163
164 return true;
165}
166
167struct vmwgfx_wait_cb {
168 struct fence_cb base;
169 struct task_struct *task;
170};
171
172static void
173vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
174{
175 struct vmwgfx_wait_cb *wait =
176 container_of(cb, struct vmwgfx_wait_cb, base);
177
178 wake_up_process(wait->task);
179}
180
181static void __vmw_fences_update(struct vmw_fence_manager *fman);
182
183static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
184{
185 struct vmw_fence_obj *fence =
186 container_of(f, struct vmw_fence_obj, base);
187
188 struct vmw_fence_manager *fman = fman_from_fence(fence);
189 struct vmw_private *dev_priv = fman->dev_priv;
190 struct vmwgfx_wait_cb cb;
191 long ret = timeout;
192 unsigned long irq_flags;
193
194 if (likely(vmw_fence_obj_signaled(fence)))
195 return timeout;
196
197 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
198 vmw_seqno_waiter_add(dev_priv);
199
200 spin_lock_irqsave(f->lock, irq_flags);
201
202 if (intr && signal_pending(current)) {
203 ret = -ERESTARTSYS;
204 goto out;
205 }
206
207 cb.base.func = vmwgfx_wait_cb;
208 cb.task = current;
209 list_add(&cb.base.node, &f->cb_list);
210
211 while (ret > 0) {
212 __vmw_fences_update(fman);
213 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
214 break;
215
216 if (intr)
217 __set_current_state(TASK_INTERRUPTIBLE);
218 else
219 __set_current_state(TASK_UNINTERRUPTIBLE);
220 spin_unlock_irqrestore(f->lock, irq_flags);
221
222 ret = schedule_timeout(ret);
223
224 spin_lock_irqsave(f->lock, irq_flags);
225 if (ret > 0 && intr && signal_pending(current))
226 ret = -ERESTARTSYS;
227 }
228
229 if (!list_empty(&cb.base.node))
230 list_del(&cb.base.node);
231 __set_current_state(TASK_RUNNING);
232
233out:
234 spin_unlock_irqrestore(f->lock, irq_flags);
235
236 vmw_seqno_waiter_remove(dev_priv);
237
238 return ret;
122} 239}
123 240
241static struct fence_ops vmw_fence_ops = {
242 .get_driver_name = vmw_fence_get_driver_name,
243 .get_timeline_name = vmw_fence_get_timeline_name,
244 .enable_signaling = vmw_fence_enable_signaling,
245 .wait = vmw_fence_wait,
246 .release = vmw_fence_obj_destroy,
247};
248
124 249
125/** 250/**
126 * Execute signal actions on fences recently signaled. 251 * Execute signal actions on fences recently signaled.
@@ -180,12 +305,14 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
180 INIT_LIST_HEAD(&fman->fence_list); 305 INIT_LIST_HEAD(&fman->fence_list);
181 INIT_LIST_HEAD(&fman->cleanup_list); 306 INIT_LIST_HEAD(&fman->cleanup_list);
182 INIT_WORK(&fman->work, &vmw_fence_work_func); 307 INIT_WORK(&fman->work, &vmw_fence_work_func);
308 INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
183 fman->fifo_down = true; 309 fman->fifo_down = true;
184 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); 310 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
185 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); 311 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
186 fman->event_fence_action_size = 312 fman->event_fence_action_size =
187 ttm_round_pot(sizeof(struct vmw_event_fence_action)); 313 ttm_round_pot(sizeof(struct vmw_event_fence_action));
188 mutex_init(&fman->goal_irq_mutex); 314 mutex_init(&fman->goal_irq_mutex);
315 fman->ctx = fence_context_alloc(1);
189 316
190 return fman; 317 return fman;
191} 318}
@@ -196,6 +323,7 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
196 bool lists_empty; 323 bool lists_empty;
197 324
198 (void) cancel_work_sync(&fman->work); 325 (void) cancel_work_sync(&fman->work);
326 (void) cancel_work_sync(&fman->ping_work);
199 327
200 spin_lock_irqsave(&fman->lock, irq_flags); 328 spin_lock_irqsave(&fman->lock, irq_flags);
201 lists_empty = list_empty(&fman->fence_list) && 329 lists_empty = list_empty(&fman->fence_list) &&
@@ -207,23 +335,16 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
207} 335}
208 336
209static int vmw_fence_obj_init(struct vmw_fence_manager *fman, 337static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
210 struct vmw_fence_obj *fence, 338 struct vmw_fence_obj *fence, u32 seqno,
211 u32 seqno,
212 uint32_t mask,
213 void (*destroy) (struct vmw_fence_obj *fence)) 339 void (*destroy) (struct vmw_fence_obj *fence))
214{ 340{
215 unsigned long irq_flags; 341 unsigned long irq_flags;
216 unsigned int num_fences;
217 int ret = 0; 342 int ret = 0;
218 343
219 fence->seqno = seqno; 344 fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
345 fman->ctx, seqno);
220 INIT_LIST_HEAD(&fence->seq_passed_actions); 346 INIT_LIST_HEAD(&fence->seq_passed_actions);
221 fence->fman = fman;
222 fence->signaled = 0;
223 fence->signal_mask = mask;
224 kref_init(&fence->kref);
225 fence->destroy = destroy; 347 fence->destroy = destroy;
226 init_waitqueue_head(&fence->queue);
227 348
228 spin_lock_irqsave(&fman->lock, irq_flags); 349 spin_lock_irqsave(&fman->lock, irq_flags);
229 if (unlikely(fman->fifo_down)) { 350 if (unlikely(fman->fifo_down)) {
@@ -231,7 +352,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
231 goto out_unlock; 352 goto out_unlock;
232 } 353 }
233 list_add_tail(&fence->head, &fman->fence_list); 354 list_add_tail(&fence->head, &fman->fence_list);
234 num_fences = ++fman->num_fence_objects; 355 ++fman->num_fence_objects;
235 356
236out_unlock: 357out_unlock:
237 spin_unlock_irqrestore(&fman->lock, irq_flags); 358 spin_unlock_irqrestore(&fman->lock, irq_flags);
@@ -239,38 +360,6 @@ out_unlock:
239 360
240} 361}
241 362
242struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
243{
244 if (unlikely(fence == NULL))
245 return NULL;
246
247 kref_get(&fence->kref);
248 return fence;
249}
250
251/**
252 * vmw_fence_obj_unreference
253 *
254 * Note that this function may not be entered with disabled irqs since
255 * it may re-enable them in the destroy function.
256 *
257 */
258void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
259{
260 struct vmw_fence_obj *fence = *fence_p;
261 struct vmw_fence_manager *fman;
262
263 if (unlikely(fence == NULL))
264 return;
265
266 fman = fence->fman;
267 *fence_p = NULL;
268 spin_lock_irq(&fman->lock);
269 BUG_ON(atomic_read(&fence->kref.refcount) == 0);
270 kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
271 spin_unlock_irq(&fman->lock);
272}
273
274static void vmw_fences_perform_actions(struct vmw_fence_manager *fman, 363static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
275 struct list_head *list) 364 struct list_head *list)
276{ 365{
@@ -326,7 +415,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
326 list_for_each_entry(fence, &fman->fence_list, head) { 415 list_for_each_entry(fence, &fman->fence_list, head) {
327 if (!list_empty(&fence->seq_passed_actions)) { 416 if (!list_empty(&fence->seq_passed_actions)) {
328 fman->seqno_valid = true; 417 fman->seqno_valid = true;
329 iowrite32(fence->seqno, 418 iowrite32(fence->base.seqno,
330 fifo_mem + SVGA_FIFO_FENCE_GOAL); 419 fifo_mem + SVGA_FIFO_FENCE_GOAL);
331 break; 420 break;
332 } 421 }
@@ -353,27 +442,27 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
353 */ 442 */
354static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) 443static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
355{ 444{
445 struct vmw_fence_manager *fman = fman_from_fence(fence);
356 u32 goal_seqno; 446 u32 goal_seqno;
357 __le32 __iomem *fifo_mem; 447 __le32 __iomem *fifo_mem;
358 448
359 if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) 449 if (fence_is_signaled_locked(&fence->base))
360 return false; 450 return false;
361 451
362 fifo_mem = fence->fman->dev_priv->mmio_virt; 452 fifo_mem = fman->dev_priv->mmio_virt;
363 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL); 453 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
364 if (likely(fence->fman->seqno_valid && 454 if (likely(fman->seqno_valid &&
365 goal_seqno - fence->seqno < VMW_FENCE_WRAP)) 455 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
366 return false; 456 return false;
367 457
368 iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL); 458 iowrite32(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
369 fence->fman->seqno_valid = true; 459 fman->seqno_valid = true;
370 460
371 return true; 461 return true;
372} 462}
373 463
374void vmw_fences_update(struct vmw_fence_manager *fman) 464static void __vmw_fences_update(struct vmw_fence_manager *fman)
375{ 465{
376 unsigned long flags;
377 struct vmw_fence_obj *fence, *next_fence; 466 struct vmw_fence_obj *fence, *next_fence;
378 struct list_head action_list; 467 struct list_head action_list;
379 bool needs_rerun; 468 bool needs_rerun;
@@ -382,32 +471,25 @@ void vmw_fences_update(struct vmw_fence_manager *fman)
382 471
383 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 472 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
384rerun: 473rerun:
385 spin_lock_irqsave(&fman->lock, flags);
386 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { 474 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
387 if (seqno - fence->seqno < VMW_FENCE_WRAP) { 475 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
388 list_del_init(&fence->head); 476 list_del_init(&fence->head);
389 fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC; 477 fence_signal_locked(&fence->base);
390 INIT_LIST_HEAD(&action_list); 478 INIT_LIST_HEAD(&action_list);
391 list_splice_init(&fence->seq_passed_actions, 479 list_splice_init(&fence->seq_passed_actions,
392 &action_list); 480 &action_list);
393 vmw_fences_perform_actions(fman, &action_list); 481 vmw_fences_perform_actions(fman, &action_list);
394 wake_up_all(&fence->queue);
395 } else 482 } else
396 break; 483 break;
397 } 484 }
398 485
399 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
400
401 if (!list_empty(&fman->cleanup_list))
402 (void) schedule_work(&fman->work);
403 spin_unlock_irqrestore(&fman->lock, flags);
404
405 /* 486 /*
406 * Rerun if the fence goal seqno was updated, and the 487 * Rerun if the fence goal seqno was updated, and the
407 * hardware might have raced with that update, so that 488 * hardware might have raced with that update, so that
408 * we missed a fence_goal irq. 489 * we missed a fence_goal irq.
409 */ 490 */
410 491
492 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
411 if (unlikely(needs_rerun)) { 493 if (unlikely(needs_rerun)) {
412 new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 494 new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
413 if (new_seqno != seqno) { 495 if (new_seqno != seqno) {
@@ -415,79 +497,58 @@ rerun:
415 goto rerun; 497 goto rerun;
416 } 498 }
417 } 499 }
500
501 if (!list_empty(&fman->cleanup_list))
502 (void) schedule_work(&fman->work);
418} 503}
419 504
420bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, 505void vmw_fences_update(struct vmw_fence_manager *fman)
421 uint32_t flags)
422{ 506{
423 struct vmw_fence_manager *fman = fence->fman;
424 unsigned long irq_flags; 507 unsigned long irq_flags;
425 uint32_t signaled;
426 508
427 spin_lock_irqsave(&fman->lock, irq_flags); 509 spin_lock_irqsave(&fman->lock, irq_flags);
428 signaled = fence->signaled; 510 __vmw_fences_update(fman);
429 spin_unlock_irqrestore(&fman->lock, irq_flags); 511 spin_unlock_irqrestore(&fman->lock, irq_flags);
512}
430 513
431 flags &= fence->signal_mask; 514bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
432 if ((signaled & flags) == flags) 515{
433 return 1; 516 struct vmw_fence_manager *fman = fman_from_fence(fence);
434 517
435 if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0) 518 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
436 vmw_fences_update(fman); 519 return 1;
437 520
438 spin_lock_irqsave(&fman->lock, irq_flags); 521 vmw_fences_update(fman);
439 signaled = fence->signaled;
440 spin_unlock_irqrestore(&fman->lock, irq_flags);
441 522
442 return ((signaled & flags) == flags); 523 return fence_is_signaled(&fence->base);
443} 524}
444 525
445int vmw_fence_obj_wait(struct vmw_fence_obj *fence, 526int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
446 uint32_t flags, bool lazy,
447 bool interruptible, unsigned long timeout) 527 bool interruptible, unsigned long timeout)
448{ 528{
449 struct vmw_private *dev_priv = fence->fman->dev_priv; 529 long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
450 long ret;
451 530
452 if (likely(vmw_fence_obj_signaled(fence, flags))) 531 if (likely(ret > 0))
453 return 0; 532 return 0;
454 533 else if (ret == 0)
455 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 534 return -EBUSY;
456 vmw_seqno_waiter_add(dev_priv);
457
458 if (interruptible)
459 ret = wait_event_interruptible_timeout
460 (fence->queue,
461 vmw_fence_obj_signaled(fence, flags),
462 timeout);
463 else 535 else
464 ret = wait_event_timeout 536 return ret;
465 (fence->queue,
466 vmw_fence_obj_signaled(fence, flags),
467 timeout);
468
469 vmw_seqno_waiter_remove(dev_priv);
470
471 if (unlikely(ret == 0))
472 ret = -EBUSY;
473 else if (likely(ret > 0))
474 ret = 0;
475
476 return ret;
477} 537}
478 538
479void vmw_fence_obj_flush(struct vmw_fence_obj *fence) 539void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
480{ 540{
481 struct vmw_private *dev_priv = fence->fman->dev_priv; 541 struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
482 542
483 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 543 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
484} 544}
485 545
486static void vmw_fence_destroy(struct vmw_fence_obj *fence) 546static void vmw_fence_destroy(struct vmw_fence_obj *fence)
487{ 547{
488 struct vmw_fence_manager *fman = fence->fman; 548 struct vmw_fence_manager *fman = fman_from_fence(fence);
549
550 fence_free(&fence->base);
489 551
490 kfree(fence);
491 /* 552 /*
492 * Free kernel space accounting. 553 * Free kernel space accounting.
493 */ 554 */
@@ -497,7 +558,6 @@ static void vmw_fence_destroy(struct vmw_fence_obj *fence)
497 558
498int vmw_fence_create(struct vmw_fence_manager *fman, 559int vmw_fence_create(struct vmw_fence_manager *fman,
499 uint32_t seqno, 560 uint32_t seqno,
500 uint32_t mask,
501 struct vmw_fence_obj **p_fence) 561 struct vmw_fence_obj **p_fence)
502{ 562{
503 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); 563 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
@@ -515,7 +575,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
515 goto out_no_object; 575 goto out_no_object;
516 } 576 }
517 577
518 ret = vmw_fence_obj_init(fman, fence, seqno, mask, 578 ret = vmw_fence_obj_init(fman, fence, seqno,
519 vmw_fence_destroy); 579 vmw_fence_destroy);
520 if (unlikely(ret != 0)) 580 if (unlikely(ret != 0))
521 goto out_err_init; 581 goto out_err_init;
@@ -535,7 +595,7 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
535{ 595{
536 struct vmw_user_fence *ufence = 596 struct vmw_user_fence *ufence =
537 container_of(fence, struct vmw_user_fence, fence); 597 container_of(fence, struct vmw_user_fence, fence);
538 struct vmw_fence_manager *fman = fence->fman; 598 struct vmw_fence_manager *fman = fman_from_fence(fence);
539 599
540 ttm_base_object_kfree(ufence, base); 600 ttm_base_object_kfree(ufence, base);
541 /* 601 /*
@@ -559,7 +619,6 @@ static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
559int vmw_user_fence_create(struct drm_file *file_priv, 619int vmw_user_fence_create(struct drm_file *file_priv,
560 struct vmw_fence_manager *fman, 620 struct vmw_fence_manager *fman,
561 uint32_t seqno, 621 uint32_t seqno,
562 uint32_t mask,
563 struct vmw_fence_obj **p_fence, 622 struct vmw_fence_obj **p_fence,
564 uint32_t *p_handle) 623 uint32_t *p_handle)
565{ 624{
@@ -586,7 +645,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
586 } 645 }
587 646
588 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno, 647 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
589 mask, vmw_user_fence_destroy); 648 vmw_user_fence_destroy);
590 if (unlikely(ret != 0)) { 649 if (unlikely(ret != 0)) {
591 kfree(ufence); 650 kfree(ufence);
592 goto out_no_object; 651 goto out_no_object;
@@ -629,7 +688,6 @@ out_no_object:
629 688
630void vmw_fence_fifo_down(struct vmw_fence_manager *fman) 689void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
631{ 690{
632 unsigned long irq_flags;
633 struct list_head action_list; 691 struct list_head action_list;
634 int ret; 692 int ret;
635 693
@@ -638,35 +696,32 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
638 * restart when we've released the fman->lock. 696 * restart when we've released the fman->lock.
639 */ 697 */
640 698
641 spin_lock_irqsave(&fman->lock, irq_flags); 699 spin_lock_irq(&fman->lock);
642 fman->fifo_down = true; 700 fman->fifo_down = true;
643 while (!list_empty(&fman->fence_list)) { 701 while (!list_empty(&fman->fence_list)) {
644 struct vmw_fence_obj *fence = 702 struct vmw_fence_obj *fence =
645 list_entry(fman->fence_list.prev, struct vmw_fence_obj, 703 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
646 head); 704 head);
647 kref_get(&fence->kref); 705 fence_get(&fence->base);
648 spin_unlock_irq(&fman->lock); 706 spin_unlock_irq(&fman->lock);
649 707
650 ret = vmw_fence_obj_wait(fence, fence->signal_mask, 708 ret = vmw_fence_obj_wait(fence, false, false,
651 false, false,
652 VMW_FENCE_WAIT_TIMEOUT); 709 VMW_FENCE_WAIT_TIMEOUT);
653 710
654 if (unlikely(ret != 0)) { 711 if (unlikely(ret != 0)) {
655 list_del_init(&fence->head); 712 list_del_init(&fence->head);
656 fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC; 713 fence_signal(&fence->base);
657 INIT_LIST_HEAD(&action_list); 714 INIT_LIST_HEAD(&action_list);
658 list_splice_init(&fence->seq_passed_actions, 715 list_splice_init(&fence->seq_passed_actions,
659 &action_list); 716 &action_list);
660 vmw_fences_perform_actions(fman, &action_list); 717 vmw_fences_perform_actions(fman, &action_list);
661 wake_up_all(&fence->queue);
662 } 718 }
663 719
664 spin_lock_irq(&fman->lock);
665
666 BUG_ON(!list_empty(&fence->head)); 720 BUG_ON(!list_empty(&fence->head));
667 kref_put(&fence->kref, vmw_fence_obj_destroy_locked); 721 fence_put(&fence->base);
722 spin_lock_irq(&fman->lock);
668 } 723 }
669 spin_unlock_irqrestore(&fman->lock, irq_flags); 724 spin_unlock_irq(&fman->lock);
670} 725}
671 726
672void vmw_fence_fifo_up(struct vmw_fence_manager *fman) 727void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
@@ -716,14 +771,14 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
716 771
717 timeout = jiffies; 772 timeout = jiffies;
718 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) { 773 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
719 ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ? 774 ret = ((vmw_fence_obj_signaled(fence)) ?
720 0 : -EBUSY); 775 0 : -EBUSY);
721 goto out; 776 goto out;
722 } 777 }
723 778
724 timeout = (unsigned long)arg->kernel_cookie - timeout; 779 timeout = (unsigned long)arg->kernel_cookie - timeout;
725 780
726 ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout); 781 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
727 782
728out: 783out:
729 ttm_base_object_unref(&base); 784 ttm_base_object_unref(&base);
@@ -758,12 +813,12 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
758 } 813 }
759 814
760 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 815 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
761 fman = fence->fman; 816 fman = fman_from_fence(fence);
762 817
763 arg->signaled = vmw_fence_obj_signaled(fence, arg->flags); 818 arg->signaled = vmw_fence_obj_signaled(fence);
764 spin_lock_irq(&fman->lock);
765 819
766 arg->signaled_flags = fence->signaled; 820 arg->signaled_flags = arg->flags;
821 spin_lock_irq(&fman->lock);
767 arg->passed_seqno = dev_priv->last_read_seqno; 822 arg->passed_seqno = dev_priv->last_read_seqno;
768 spin_unlock_irq(&fman->lock); 823 spin_unlock_irq(&fman->lock);
769 824
@@ -876,7 +931,7 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
876{ 931{
877 struct vmw_event_fence_action *eaction = 932 struct vmw_event_fence_action *eaction =
878 container_of(action, struct vmw_event_fence_action, action); 933 container_of(action, struct vmw_event_fence_action, action);
879 struct vmw_fence_manager *fman = eaction->fence->fman; 934 struct vmw_fence_manager *fman = fman_from_fence(eaction->fence);
880 unsigned long irq_flags; 935 unsigned long irq_flags;
881 936
882 spin_lock_irqsave(&fman->lock, irq_flags); 937 spin_lock_irqsave(&fman->lock, irq_flags);
@@ -900,7 +955,7 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
900static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, 955static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
901 struct vmw_fence_action *action) 956 struct vmw_fence_action *action)
902{ 957{
903 struct vmw_fence_manager *fman = fence->fman; 958 struct vmw_fence_manager *fman = fman_from_fence(fence);
904 unsigned long irq_flags; 959 unsigned long irq_flags;
905 bool run_update = false; 960 bool run_update = false;
906 961
@@ -908,7 +963,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
908 spin_lock_irqsave(&fman->lock, irq_flags); 963 spin_lock_irqsave(&fman->lock, irq_flags);
909 964
910 fman->pending_actions[action->type]++; 965 fman->pending_actions[action->type]++;
911 if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) { 966 if (fence_is_signaled_locked(&fence->base)) {
912 struct list_head action_list; 967 struct list_head action_list;
913 968
914 INIT_LIST_HEAD(&action_list); 969 INIT_LIST_HEAD(&action_list);
@@ -960,7 +1015,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
960 bool interruptible) 1015 bool interruptible)
961{ 1016{
962 struct vmw_event_fence_action *eaction; 1017 struct vmw_event_fence_action *eaction;
963 struct vmw_fence_manager *fman = fence->fman; 1018 struct vmw_fence_manager *fman = fman_from_fence(fence);
964 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1019 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
965 unsigned long irq_flags; 1020 unsigned long irq_flags;
966 1021
@@ -1000,7 +1055,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
1000 bool interruptible) 1055 bool interruptible)
1001{ 1056{
1002 struct vmw_event_fence_pending *event; 1057 struct vmw_event_fence_pending *event;
1003 struct drm_device *dev = fence->fman->dev_priv->dev; 1058 struct vmw_fence_manager *fman = fman_from_fence(fence);
1059 struct drm_device *dev = fman->dev_priv->dev;
1004 unsigned long irq_flags; 1060 unsigned long irq_flags;
1005 int ret; 1061 int ret;
1006 1062
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index faf2e7873860..26a4add39208 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -27,6 +27,8 @@
27 27
28#ifndef _VMWGFX_FENCE_H_ 28#ifndef _VMWGFX_FENCE_H_
29 29
30#include <linux/fence.h>
31
30#define VMW_FENCE_WAIT_TIMEOUT (5*HZ) 32#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
31 33
32struct vmw_private; 34struct vmw_private;
@@ -50,16 +52,11 @@ struct vmw_fence_action {
50}; 52};
51 53
52struct vmw_fence_obj { 54struct vmw_fence_obj {
53 struct kref kref; 55 struct fence base;
54 u32 seqno;
55 56
56 struct vmw_fence_manager *fman;
57 struct list_head head; 57 struct list_head head;
58 uint32_t signaled;
59 uint32_t signal_mask;
60 struct list_head seq_passed_actions; 58 struct list_head seq_passed_actions;
61 void (*destroy)(struct vmw_fence_obj *fence); 59 void (*destroy)(struct vmw_fence_obj *fence);
62 wait_queue_head_t queue;
63}; 60};
64 61
65extern struct vmw_fence_manager * 62extern struct vmw_fence_manager *
@@ -67,17 +64,29 @@ vmw_fence_manager_init(struct vmw_private *dev_priv);
67 64
68extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman); 65extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman);
69 66
70extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p); 67static inline void
68vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
69{
70 struct vmw_fence_obj *fence = *fence_p;
71
72 *fence_p = NULL;
73 if (fence)
74 fence_put(&fence->base);
75}
71 76
72extern struct vmw_fence_obj * 77static inline struct vmw_fence_obj *
73vmw_fence_obj_reference(struct vmw_fence_obj *fence); 78vmw_fence_obj_reference(struct vmw_fence_obj *fence)
79{
80 if (fence)
81 fence_get(&fence->base);
82 return fence;
83}
74 84
75extern void vmw_fences_update(struct vmw_fence_manager *fman); 85extern void vmw_fences_update(struct vmw_fence_manager *fman);
76 86
77extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, 87extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence);
78 uint32_t flags);
79 88
80extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, uint32_t flags, 89extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
81 bool lazy, 90 bool lazy,
82 bool interruptible, unsigned long timeout); 91 bool interruptible, unsigned long timeout);
83 92
@@ -85,13 +94,11 @@ extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence);
85 94
86extern int vmw_fence_create(struct vmw_fence_manager *fman, 95extern int vmw_fence_create(struct vmw_fence_manager *fman,
87 uint32_t seqno, 96 uint32_t seqno,
88 uint32_t mask,
89 struct vmw_fence_obj **p_fence); 97 struct vmw_fence_obj **p_fence);
90 98
91extern int vmw_user_fence_create(struct drm_file *file_priv, 99extern int vmw_user_fence_create(struct drm_file *file_priv,
92 struct vmw_fence_manager *fman, 100 struct vmw_fence_manager *fman,
93 uint32_t sequence, 101 uint32_t sequence,
94 uint32_t mask,
95 struct vmw_fence_obj **p_fence, 102 struct vmw_fence_obj **p_fence,
96 uint32_t *p_handle); 103 uint32_t *p_handle);
97 104
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 6eae14d2a3f7..09e10aefcd8e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -160,16 +160,21 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
160 return vmw_fifo_send_fence(dev_priv, &dummy); 160 return vmw_fifo_send_fence(dev_priv, &dummy);
161} 161}
162 162
163void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) 163void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason)
164{ 164{
165 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 165 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
166 166
167 mutex_lock(&dev_priv->hw_mutex);
168
169 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { 167 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
170 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); 168 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
171 vmw_write(dev_priv, SVGA_REG_SYNC, reason); 169 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
172 } 170 }
171}
172
173void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
174{
175 mutex_lock(&dev_priv->hw_mutex);
176
177 vmw_fifo_ping_host_locked(dev_priv, reason);
173 178
174 mutex_unlock(&dev_priv->hw_mutex); 179 mutex_unlock(&dev_priv->hw_mutex);
175} 180}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 26f8bdde3529..170b61be1e4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -46,8 +46,7 @@ struct vmwgfx_gmrid_man {
46 46
47static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, 47static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
48 struct ttm_buffer_object *bo, 48 struct ttm_buffer_object *bo,
49 struct ttm_placement *placement, 49 const struct ttm_place *place,
50 uint32_t flags,
51 struct ttm_mem_reg *mem) 50 struct ttm_mem_reg *mem)
52{ 51{
53 struct vmwgfx_gmrid_man *gman = 52 struct vmwgfx_gmrid_man *gman =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a432c0db257c..026de7cea0f6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -133,6 +133,7 @@ static void vmw_resource_release(struct kref *kref)
133 struct ttm_validate_buffer val_buf; 133 struct ttm_validate_buffer val_buf;
134 134
135 val_buf.bo = bo; 135 val_buf.bo = bo;
136 val_buf.shared = false;
136 res->func->unbind(res, false, &val_buf); 137 res->func->unbind(res, false, &val_buf);
137 } 138 }
138 res->backup_dirty = false; 139 res->backup_dirty = false;
@@ -429,7 +430,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
429 ret = ttm_bo_init(bdev, &vmw_bo->base, size, 430 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
430 ttm_bo_type_device, placement, 431 ttm_bo_type_device, placement,
431 0, interruptible, 432 0, interruptible,
432 NULL, acc_size, NULL, bo_free); 433 NULL, acc_size, NULL, NULL, bo_free);
433 return ret; 434 return ret;
434} 435}
435 436
@@ -567,13 +568,18 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
567 int ret; 568 int ret;
568 569
569 if (flags & drm_vmw_synccpu_allow_cs) { 570 if (flags & drm_vmw_synccpu_allow_cs) {
570 struct ttm_bo_device *bdev = bo->bdev; 571 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
572 long lret;
571 573
572 spin_lock(&bdev->fence_lock); 574 if (nonblock)
573 ret = ttm_bo_wait(bo, false, true, 575 return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
574 !!(flags & drm_vmw_synccpu_dontblock)); 576
575 spin_unlock(&bdev->fence_lock); 577 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
576 return ret; 578 if (!lret)
579 return -EBUSY;
580 else if (lret < 0)
581 return lret;
582 return 0;
577 } 583 }
578 584
579 ret = ttm_bo_synccpu_write_grab 585 ret = ttm_bo_synccpu_write_grab
@@ -1214,8 +1220,9 @@ vmw_resource_check_buffer(struct vmw_resource *res,
1214 1220
1215 INIT_LIST_HEAD(&val_list); 1221 INIT_LIST_HEAD(&val_list);
1216 val_buf->bo = ttm_bo_reference(&res->backup->base); 1222 val_buf->bo = ttm_bo_reference(&res->backup->base);
1223 val_buf->shared = false;
1217 list_add_tail(&val_buf->head, &val_list); 1224 list_add_tail(&val_buf->head, &val_list);
1218 ret = ttm_eu_reserve_buffers(NULL, &val_list); 1225 ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
1219 if (unlikely(ret != 0)) 1226 if (unlikely(ret != 0))
1220 goto out_no_reserve; 1227 goto out_no_reserve;
1221 1228
@@ -1307,6 +1314,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1307 BUG_ON(!func->may_evict); 1314 BUG_ON(!func->may_evict);
1308 1315
1309 val_buf.bo = NULL; 1316 val_buf.bo = NULL;
1317 val_buf.shared = false;
1310 ret = vmw_resource_check_buffer(res, interruptible, &val_buf); 1318 ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1311 if (unlikely(ret != 0)) 1319 if (unlikely(ret != 0))
1312 return ret; 1320 return ret;
@@ -1352,6 +1360,7 @@ int vmw_resource_validate(struct vmw_resource *res)
1352 return 0; 1360 return 0;
1353 1361
1354 val_buf.bo = NULL; 1362 val_buf.bo = NULL;
1363 val_buf.shared = false;
1355 if (res->backup) 1364 if (res->backup)
1356 val_buf.bo = &res->backup->base; 1365 val_buf.bo = &res->backup->base;
1357 do { 1366 do {
@@ -1419,25 +1428,16 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1419 struct vmw_fence_obj *fence) 1428 struct vmw_fence_obj *fence)
1420{ 1429{
1421 struct ttm_bo_device *bdev = bo->bdev; 1430 struct ttm_bo_device *bdev = bo->bdev;
1422 struct ttm_bo_driver *driver = bdev->driver; 1431
1423 struct vmw_fence_obj *old_fence_obj;
1424 struct vmw_private *dev_priv = 1432 struct vmw_private *dev_priv =
1425 container_of(bdev, struct vmw_private, bdev); 1433 container_of(bdev, struct vmw_private, bdev);
1426 1434
1427 if (fence == NULL) 1435 if (fence == NULL) {
1428 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); 1436 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1429 else 1437 reservation_object_add_excl_fence(bo->resv, &fence->base);
1430 driver->sync_obj_ref(fence); 1438 fence_put(&fence->base);
1431 1439 } else
1432 spin_lock(&bdev->fence_lock); 1440 reservation_object_add_excl_fence(bo->resv, &fence->base);
1433
1434 old_fence_obj = bo->sync_obj;
1435 bo->sync_obj = fence;
1436
1437 spin_unlock(&bdev->fence_lock);
1438
1439 if (old_fence_obj)
1440 vmw_fence_obj_unreference(&old_fence_obj);
1441} 1441}
1442 1442
1443/** 1443/**
@@ -1475,10 +1475,10 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1475 1475
1476 if (mem->mem_type != VMW_PL_MOB) { 1476 if (mem->mem_type != VMW_PL_MOB) {
1477 struct vmw_resource *res, *n; 1477 struct vmw_resource *res, *n;
1478 struct ttm_bo_device *bdev = bo->bdev;
1479 struct ttm_validate_buffer val_buf; 1478 struct ttm_validate_buffer val_buf;
1480 1479
1481 val_buf.bo = bo; 1480 val_buf.bo = bo;
1481 val_buf.shared = false;
1482 1482
1483 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { 1483 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1484 1484
@@ -1491,9 +1491,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1491 list_del_init(&res->mob_head); 1491 list_del_init(&res->mob_head);
1492 } 1492 }
1493 1493
1494 spin_lock(&bdev->fence_lock);
1495 (void) ttm_bo_wait(bo, false, false, false); 1494 (void) ttm_bo_wait(bo, false, false, false);
1496 spin_unlock(&bdev->fence_lock);
1497 } 1495 }
1498} 1496}
1499 1497
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
index 2f228a2f2a48..aefdff95356d 100644
--- a/drivers/gpu/ipu-v3/Kconfig
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -1,7 +1,8 @@
1config IMX_IPUV3_CORE 1config IMX_IPUV3_CORE
2 tristate "IPUv3 core support" 2 tristate "IPUv3 core support"
3 depends on SOC_IMX5 || SOC_IMX6Q || SOC_IMX6SL || ARCH_MULTIPLATFORM 3 depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
4 depends on RESET_CONTROLLER 4 depends on RESET_CONTROLLER
5 select GENERIC_IRQ_CHIP
5 help 6 help
6 Choose this if you have a i.MX5/6 system and want to use the Image 7 Choose this if you have a i.MX5/6 system and want to use the Image
7 Processing Unit. This option only enables IPU base support. 8 Processing Unit. This option only enables IPU base support.
diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile
index 0b42836caae1..107ec236a4a6 100644
--- a/drivers/gpu/ipu-v3/Makefile
+++ b/drivers/gpu/ipu-v3/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o 1obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
2 2
3imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-dc.o ipu-di.o \ 3imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
4 ipu-dp.o ipu-dmfc.o ipu-smfc.o 4 ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-smfc.o
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 5978e7aab8ed..f707d25ae78f 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -74,6 +74,12 @@ enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
74 case DRM_FORMAT_UYVY: 74 case DRM_FORMAT_UYVY:
75 case DRM_FORMAT_YUV420: 75 case DRM_FORMAT_YUV420:
76 case DRM_FORMAT_YVU420: 76 case DRM_FORMAT_YVU420:
77 case DRM_FORMAT_YUV422:
78 case DRM_FORMAT_YVU422:
79 case DRM_FORMAT_NV12:
80 case DRM_FORMAT_NV21:
81 case DRM_FORMAT_NV16:
82 case DRM_FORMAT_NV61:
77 return IPUV3_COLORSPACE_YUV; 83 return IPUV3_COLORSPACE_YUV;
78 default: 84 default:
79 return IPUV3_COLORSPACE_UNKNOWN; 85 return IPUV3_COLORSPACE_UNKNOWN;
@@ -86,8 +92,13 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
86 switch (pixelformat) { 92 switch (pixelformat) {
87 case V4L2_PIX_FMT_YUV420: 93 case V4L2_PIX_FMT_YUV420:
88 case V4L2_PIX_FMT_YVU420: 94 case V4L2_PIX_FMT_YVU420:
95 case V4L2_PIX_FMT_YUV422P:
89 case V4L2_PIX_FMT_UYVY: 96 case V4L2_PIX_FMT_UYVY:
90 case V4L2_PIX_FMT_YUYV: 97 case V4L2_PIX_FMT_YUYV:
98 case V4L2_PIX_FMT_NV12:
99 case V4L2_PIX_FMT_NV21:
100 case V4L2_PIX_FMT_NV16:
101 case V4L2_PIX_FMT_NV61:
91 return IPUV3_COLORSPACE_YUV; 102 return IPUV3_COLORSPACE_YUV;
92 case V4L2_PIX_FMT_RGB32: 103 case V4L2_PIX_FMT_RGB32:
93 case V4L2_PIX_FMT_BGR32: 104 case V4L2_PIX_FMT_BGR32:
@@ -101,6 +112,135 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
101} 112}
102EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace); 113EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
103 114
115bool ipu_pixelformat_is_planar(u32 pixelformat)
116{
117 switch (pixelformat) {
118 case V4L2_PIX_FMT_YUV420:
119 case V4L2_PIX_FMT_YVU420:
120 case V4L2_PIX_FMT_YUV422P:
121 case V4L2_PIX_FMT_NV12:
122 case V4L2_PIX_FMT_NV21:
123 case V4L2_PIX_FMT_NV16:
124 case V4L2_PIX_FMT_NV61:
125 return true;
126 }
127
128 return false;
129}
130EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar);
131
132enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code)
133{
134 switch (mbus_code & 0xf000) {
135 case 0x1000:
136 return IPUV3_COLORSPACE_RGB;
137 case 0x2000:
138 return IPUV3_COLORSPACE_YUV;
139 default:
140 return IPUV3_COLORSPACE_UNKNOWN;
141 }
142}
143EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace);
144
145int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
146{
147 switch (pixelformat) {
148 case V4L2_PIX_FMT_YUV420:
149 case V4L2_PIX_FMT_YVU420:
150 case V4L2_PIX_FMT_YUV422P:
151 case V4L2_PIX_FMT_NV12:
152 case V4L2_PIX_FMT_NV21:
153 case V4L2_PIX_FMT_NV16:
154 case V4L2_PIX_FMT_NV61:
155 /*
156 * for the planar YUV formats, the stride passed to
157 * cpmem must be the stride in bytes of the Y plane.
158 * And all the planar YUV formats have an 8-bit
159 * Y component.
160 */
161 return (8 * pixel_stride) >> 3;
162 case V4L2_PIX_FMT_RGB565:
163 case V4L2_PIX_FMT_YUYV:
164 case V4L2_PIX_FMT_UYVY:
165 return (16 * pixel_stride) >> 3;
166 case V4L2_PIX_FMT_BGR24:
167 case V4L2_PIX_FMT_RGB24:
168 return (24 * pixel_stride) >> 3;
169 case V4L2_PIX_FMT_BGR32:
170 case V4L2_PIX_FMT_RGB32:
171 return (32 * pixel_stride) >> 3;
172 default:
173 break;
174 }
175
176 return -EINVAL;
177}
178EXPORT_SYMBOL_GPL(ipu_stride_to_bytes);
179
180int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
181 bool hflip, bool vflip)
182{
183 u32 r90, vf, hf;
184
185 switch (degrees) {
186 case 0:
187 vf = hf = r90 = 0;
188 break;
189 case 90:
190 vf = hf = 0;
191 r90 = 1;
192 break;
193 case 180:
194 vf = hf = 1;
195 r90 = 0;
196 break;
197 case 270:
198 vf = hf = r90 = 1;
199 break;
200 default:
201 return -EINVAL;
202 }
203
204 hf ^= (u32)hflip;
205 vf ^= (u32)vflip;
206
207 *mode = (enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf);
208 return 0;
209}
210EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode);
211
212int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
213 bool hflip, bool vflip)
214{
215 u32 r90, vf, hf;
216
217 r90 = ((u32)mode >> 2) & 0x1;
218 hf = ((u32)mode >> 1) & 0x1;
219 vf = ((u32)mode >> 0) & 0x1;
220 hf ^= (u32)hflip;
221 vf ^= (u32)vflip;
222
223 switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) {
224 case IPU_ROTATE_NONE:
225 *degrees = 0;
226 break;
227 case IPU_ROTATE_90_RIGHT:
228 *degrees = 90;
229 break;
230 case IPU_ROTATE_180:
231 *degrees = 180;
232 break;
233 case IPU_ROTATE_90_LEFT:
234 *degrees = 270;
235 break;
236 default:
237 return -EINVAL;
238 }
239
240 return 0;
241}
242EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees);
243
104struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num) 244struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
105{ 245{
106 struct ipuv3_channel *channel; 246 struct ipuv3_channel *channel;
@@ -143,7 +283,26 @@ void ipu_idmac_put(struct ipuv3_channel *channel)
143} 283}
144EXPORT_SYMBOL_GPL(ipu_idmac_put); 284EXPORT_SYMBOL_GPL(ipu_idmac_put);
145 285
146#define idma_mask(ch) (1 << (ch & 0x1f)) 286#define idma_mask(ch) (1 << ((ch) & 0x1f))
287
288/*
289 * This is an undocumented feature, a write one to a channel bit in
290 * IPU_CHA_CUR_BUF and IPU_CHA_TRIPLE_CUR_BUF will reset the channel's
291 * internal current buffer pointer so that transfers start from buffer
292 * 0 on the next channel enable (that's the theory anyway, the imx6 TRM
293 * only says these are read-only registers). This operation is required
294 * for channel linking to work correctly, for instance video capture
295 * pipelines that carry out image rotations will fail after the first
296 * streaming unless this function is called for each channel before
297 * re-enabling the channels.
298 */
299static void __ipu_idmac_reset_current_buffer(struct ipuv3_channel *channel)
300{
301 struct ipu_soc *ipu = channel->ipu;
302 unsigned int chno = channel->num;
303
304 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_CUR_BUF(chno));
305}
147 306
148void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel, 307void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
149 bool doublebuffer) 308 bool doublebuffer)
@@ -161,10 +320,81 @@ void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
161 reg &= ~idma_mask(channel->num); 320 reg &= ~idma_mask(channel->num);
162 ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num)); 321 ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
163 322
323 __ipu_idmac_reset_current_buffer(channel);
324
164 spin_unlock_irqrestore(&ipu->lock, flags); 325 spin_unlock_irqrestore(&ipu->lock, flags);
165} 326}
166EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer); 327EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
167 328
329static const struct {
330 int chnum;
331 u32 reg;
332 int shift;
333} idmac_lock_en_info[] = {
334 { .chnum = 5, .reg = IDMAC_CH_LOCK_EN_1, .shift = 0, },
335 { .chnum = 11, .reg = IDMAC_CH_LOCK_EN_1, .shift = 2, },
336 { .chnum = 12, .reg = IDMAC_CH_LOCK_EN_1, .shift = 4, },
337 { .chnum = 14, .reg = IDMAC_CH_LOCK_EN_1, .shift = 6, },
338 { .chnum = 15, .reg = IDMAC_CH_LOCK_EN_1, .shift = 8, },
339 { .chnum = 20, .reg = IDMAC_CH_LOCK_EN_1, .shift = 10, },
340 { .chnum = 21, .reg = IDMAC_CH_LOCK_EN_1, .shift = 12, },
341 { .chnum = 22, .reg = IDMAC_CH_LOCK_EN_1, .shift = 14, },
342 { .chnum = 23, .reg = IDMAC_CH_LOCK_EN_1, .shift = 16, },
343 { .chnum = 27, .reg = IDMAC_CH_LOCK_EN_1, .shift = 18, },
344 { .chnum = 28, .reg = IDMAC_CH_LOCK_EN_1, .shift = 20, },
345 { .chnum = 45, .reg = IDMAC_CH_LOCK_EN_2, .shift = 0, },
346 { .chnum = 46, .reg = IDMAC_CH_LOCK_EN_2, .shift = 2, },
347 { .chnum = 47, .reg = IDMAC_CH_LOCK_EN_2, .shift = 4, },
348 { .chnum = 48, .reg = IDMAC_CH_LOCK_EN_2, .shift = 6, },
349 { .chnum = 49, .reg = IDMAC_CH_LOCK_EN_2, .shift = 8, },
350 { .chnum = 50, .reg = IDMAC_CH_LOCK_EN_2, .shift = 10, },
351};
352
353int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
354{
355 struct ipu_soc *ipu = channel->ipu;
356 unsigned long flags;
357 u32 bursts, regval;
358 int i;
359
360 switch (num_bursts) {
361 case 0:
362 case 1:
363 bursts = 0x00; /* locking disabled */
364 break;
365 case 2:
366 bursts = 0x01;
367 break;
368 case 4:
369 bursts = 0x02;
370 break;
371 case 8:
372 bursts = 0x03;
373 break;
374 default:
375 return -EINVAL;
376 }
377
378 for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
379 if (channel->num == idmac_lock_en_info[i].chnum)
380 break;
381 }
382 if (i >= ARRAY_SIZE(idmac_lock_en_info))
383 return -EINVAL;
384
385 spin_lock_irqsave(&ipu->lock, flags);
386
387 regval = ipu_idmac_read(ipu, idmac_lock_en_info[i].reg);
388 regval &= ~(0x03 << idmac_lock_en_info[i].shift);
389 regval |= (bursts << idmac_lock_en_info[i].shift);
390 ipu_idmac_write(ipu, regval, idmac_lock_en_info[i].reg);
391
392 spin_unlock_irqrestore(&ipu->lock, flags);
393
394 return 0;
395}
396EXPORT_SYMBOL_GPL(ipu_idmac_lock_enable);
397
168int ipu_module_enable(struct ipu_soc *ipu, u32 mask) 398int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
169{ 399{
170 unsigned long lock_flags; 400 unsigned long lock_flags;
@@ -217,30 +447,6 @@ int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
217} 447}
218EXPORT_SYMBOL_GPL(ipu_module_disable); 448EXPORT_SYMBOL_GPL(ipu_module_disable);
219 449
220int ipu_csi_enable(struct ipu_soc *ipu, int csi)
221{
222 return ipu_module_enable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN);
223}
224EXPORT_SYMBOL_GPL(ipu_csi_enable);
225
226int ipu_csi_disable(struct ipu_soc *ipu, int csi)
227{
228 return ipu_module_disable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN);
229}
230EXPORT_SYMBOL_GPL(ipu_csi_disable);
231
232int ipu_smfc_enable(struct ipu_soc *ipu)
233{
234 return ipu_module_enable(ipu, IPU_CONF_SMFC_EN);
235}
236EXPORT_SYMBOL_GPL(ipu_smfc_enable);
237
238int ipu_smfc_disable(struct ipu_soc *ipu)
239{
240 return ipu_module_disable(ipu, IPU_CONF_SMFC_EN);
241}
242EXPORT_SYMBOL_GPL(ipu_smfc_disable);
243
244int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel) 450int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
245{ 451{
246 struct ipu_soc *ipu = channel->ipu; 452 struct ipu_soc *ipu = channel->ipu;
@@ -250,6 +456,30 @@ int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
250} 456}
251EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer); 457EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
252 458
459bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num)
460{
461 struct ipu_soc *ipu = channel->ipu;
462 unsigned long flags;
463 u32 reg = 0;
464
465 spin_lock_irqsave(&ipu->lock, flags);
466 switch (buf_num) {
467 case 0:
468 reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num));
469 break;
470 case 1:
471 reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num));
472 break;
473 case 2:
474 reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(channel->num));
475 break;
476 }
477 spin_unlock_irqrestore(&ipu->lock, flags);
478
479 return ((reg & idma_mask(channel->num)) != 0);
480}
481EXPORT_SYMBOL_GPL(ipu_idmac_buffer_is_ready);
482
253void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num) 483void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
254{ 484{
255 struct ipu_soc *ipu = channel->ipu; 485 struct ipu_soc *ipu = channel->ipu;
@@ -268,6 +498,34 @@ void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
268} 498}
269EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer); 499EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
270 500
501void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num)
502{
503 struct ipu_soc *ipu = channel->ipu;
504 unsigned int chno = channel->num;
505 unsigned long flags;
506
507 spin_lock_irqsave(&ipu->lock, flags);
508
509 ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
510 switch (buf_num) {
511 case 0:
512 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
513 break;
514 case 1:
515 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
516 break;
517 case 2:
518 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF2_RDY(chno));
519 break;
520 default:
521 break;
522 }
523 ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
524
525 spin_unlock_irqrestore(&ipu->lock, flags);
526}
527EXPORT_SYMBOL_GPL(ipu_idmac_clear_buffer);
528
271int ipu_idmac_enable_channel(struct ipuv3_channel *channel) 529int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
272{ 530{
273 struct ipu_soc *ipu = channel->ipu; 531 struct ipu_soc *ipu = channel->ipu;
@@ -338,6 +596,8 @@ int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
338 val &= ~idma_mask(channel->num); 596 val &= ~idma_mask(channel->num);
339 ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num)); 597 ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
340 598
599 __ipu_idmac_reset_current_buffer(channel);
600
341 /* Set channel buffers NOT to be ready */ 601 /* Set channel buffers NOT to be ready */
342 ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */ 602 ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
343 603
@@ -366,6 +626,31 @@ int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
366} 626}
367EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel); 627EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
368 628
629/*
630 * The imx6 rev. D TRM says that enabling the WM feature will increase
631 * a channel's priority. Refer to Table 36-8 Calculated priority value.
632 * The sub-module that is the sink or source for the channel must enable
633 * watermark signal for this to take effect (SMFC_WM for instance).
634 */
635void ipu_idmac_enable_watermark(struct ipuv3_channel *channel, bool enable)
636{
637 struct ipu_soc *ipu = channel->ipu;
638 unsigned long flags;
639 u32 val;
640
641 spin_lock_irqsave(&ipu->lock, flags);
642
643 val = ipu_idmac_read(ipu, IDMAC_WM_EN(channel->num));
644 if (enable)
645 val |= 1 << (channel->num % 32);
646 else
647 val &= ~(1 << (channel->num % 32));
648 ipu_idmac_write(ipu, val, IDMAC_WM_EN(channel->num));
649
650 spin_unlock_irqrestore(&ipu->lock, flags);
651}
652EXPORT_SYMBOL_GPL(ipu_idmac_enable_watermark);
653
369static int ipu_memory_reset(struct ipu_soc *ipu) 654static int ipu_memory_reset(struct ipu_soc *ipu)
370{ 655{
371 unsigned long timeout; 656 unsigned long timeout;
@@ -382,12 +667,66 @@ static int ipu_memory_reset(struct ipu_soc *ipu)
382 return 0; 667 return 0;
383} 668}
384 669
670/*
671 * Set the source mux for the given CSI. Selects either parallel or
672 * MIPI CSI2 sources.
673 */
674void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2)
675{
676 unsigned long flags;
677 u32 val, mask;
678
679 mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE :
680 IPU_CONF_CSI0_DATA_SOURCE;
681
682 spin_lock_irqsave(&ipu->lock, flags);
683
684 val = ipu_cm_read(ipu, IPU_CONF);
685 if (mipi_csi2)
686 val |= mask;
687 else
688 val &= ~mask;
689 ipu_cm_write(ipu, val, IPU_CONF);
690
691 spin_unlock_irqrestore(&ipu->lock, flags);
692}
693EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux);
694
695/*
696 * Set the source mux for the IC. Selects either CSI[01] or the VDI.
697 */
698void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
699{
700 unsigned long flags;
701 u32 val;
702
703 spin_lock_irqsave(&ipu->lock, flags);
704
705 val = ipu_cm_read(ipu, IPU_CONF);
706 if (vdi) {
707 val |= IPU_CONF_IC_INPUT;
708 } else {
709 val &= ~IPU_CONF_IC_INPUT;
710 if (csi_id == 1)
711 val |= IPU_CONF_CSI_SEL;
712 else
713 val &= ~IPU_CONF_CSI_SEL;
714 }
715 ipu_cm_write(ipu, val, IPU_CONF);
716
717 spin_unlock_irqrestore(&ipu->lock, flags);
718}
719EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
720
385struct ipu_devtype { 721struct ipu_devtype {
386 const char *name; 722 const char *name;
387 unsigned long cm_ofs; 723 unsigned long cm_ofs;
388 unsigned long cpmem_ofs; 724 unsigned long cpmem_ofs;
389 unsigned long srm_ofs; 725 unsigned long srm_ofs;
390 unsigned long tpm_ofs; 726 unsigned long tpm_ofs;
727 unsigned long csi0_ofs;
728 unsigned long csi1_ofs;
729 unsigned long ic_ofs;
391 unsigned long disp0_ofs; 730 unsigned long disp0_ofs;
392 unsigned long disp1_ofs; 731 unsigned long disp1_ofs;
393 unsigned long dc_tmpl_ofs; 732 unsigned long dc_tmpl_ofs;
@@ -401,6 +740,9 @@ static struct ipu_devtype ipu_type_imx51 = {
401 .cpmem_ofs = 0x1f000000, 740 .cpmem_ofs = 0x1f000000,
402 .srm_ofs = 0x1f040000, 741 .srm_ofs = 0x1f040000,
403 .tpm_ofs = 0x1f060000, 742 .tpm_ofs = 0x1f060000,
743 .csi0_ofs = 0x1f030000,
744 .csi1_ofs = 0x1f038000,
745 .ic_ofs = 0x1f020000,
404 .disp0_ofs = 0x1e040000, 746 .disp0_ofs = 0x1e040000,
405 .disp1_ofs = 0x1e048000, 747 .disp1_ofs = 0x1e048000,
406 .dc_tmpl_ofs = 0x1f080000, 748 .dc_tmpl_ofs = 0x1f080000,
@@ -414,6 +756,9 @@ static struct ipu_devtype ipu_type_imx53 = {
414 .cpmem_ofs = 0x07000000, 756 .cpmem_ofs = 0x07000000,
415 .srm_ofs = 0x07040000, 757 .srm_ofs = 0x07040000,
416 .tpm_ofs = 0x07060000, 758 .tpm_ofs = 0x07060000,
759 .csi0_ofs = 0x07030000,
760 .csi1_ofs = 0x07038000,
761 .ic_ofs = 0x07020000,
417 .disp0_ofs = 0x06040000, 762 .disp0_ofs = 0x06040000,
418 .disp1_ofs = 0x06048000, 763 .disp1_ofs = 0x06048000,
419 .dc_tmpl_ofs = 0x07080000, 764 .dc_tmpl_ofs = 0x07080000,
@@ -427,6 +772,9 @@ static struct ipu_devtype ipu_type_imx6q = {
427 .cpmem_ofs = 0x00300000, 772 .cpmem_ofs = 0x00300000,
428 .srm_ofs = 0x00340000, 773 .srm_ofs = 0x00340000,
429 .tpm_ofs = 0x00360000, 774 .tpm_ofs = 0x00360000,
775 .csi0_ofs = 0x00230000,
776 .csi1_ofs = 0x00238000,
777 .ic_ofs = 0x00220000,
430 .disp0_ofs = 0x00240000, 778 .disp0_ofs = 0x00240000,
431 .disp1_ofs = 0x00248000, 779 .disp1_ofs = 0x00248000,
432 .dc_tmpl_ofs = 0x00380000, 780 .dc_tmpl_ofs = 0x00380000,
@@ -457,8 +805,30 @@ static int ipu_submodules_init(struct ipu_soc *ipu,
457 goto err_cpmem; 805 goto err_cpmem;
458 } 806 }
459 807
808 ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs,
809 IPU_CONF_CSI0_EN, ipu_clk);
810 if (ret) {
811 unit = "csi0";
812 goto err_csi_0;
813 }
814
815 ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs,
816 IPU_CONF_CSI1_EN, ipu_clk);
817 if (ret) {
818 unit = "csi1";
819 goto err_csi_1;
820 }
821
822 ret = ipu_ic_init(ipu, dev,
823 ipu_base + devtype->ic_ofs,
824 ipu_base + devtype->tpm_ofs);
825 if (ret) {
826 unit = "ic";
827 goto err_ic;
828 }
829
460 ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs, 830 ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
461 IPU_CONF_DI0_EN, ipu_clk); 831 IPU_CONF_DI0_EN, ipu_clk);
462 if (ret) { 832 if (ret) {
463 unit = "di0"; 833 unit = "di0";
464 goto err_di_0; 834 goto err_di_0;
@@ -511,6 +881,12 @@ err_dc:
511err_di_1: 881err_di_1:
512 ipu_di_exit(ipu, 0); 882 ipu_di_exit(ipu, 0);
513err_di_0: 883err_di_0:
884 ipu_ic_exit(ipu);
885err_ic:
886 ipu_csi_exit(ipu, 1);
887err_csi_1:
888 ipu_csi_exit(ipu, 0);
889err_csi_0:
514 ipu_cpmem_exit(ipu); 890 ipu_cpmem_exit(ipu);
515err_cpmem: 891err_cpmem:
516 dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret); 892 dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
@@ -589,6 +965,9 @@ static void ipu_submodules_exit(struct ipu_soc *ipu)
589 ipu_dc_exit(ipu); 965 ipu_dc_exit(ipu);
590 ipu_di_exit(ipu, 1); 966 ipu_di_exit(ipu, 1);
591 ipu_di_exit(ipu, 0); 967 ipu_di_exit(ipu, 0);
968 ipu_ic_exit(ipu);
969 ipu_csi_exit(ipu, 1);
970 ipu_csi_exit(ipu, 0);
592 ipu_cpmem_exit(ipu); 971 ipu_cpmem_exit(ipu);
593} 972}
594 973
@@ -681,8 +1060,10 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
681 id++, &reg->pdata, sizeof(reg->pdata)); 1060 id++, &reg->pdata, sizeof(reg->pdata));
682 } 1061 }
683 1062
684 if (IS_ERR(pdev)) 1063 if (IS_ERR(pdev)) {
1064 ret = PTR_ERR(pdev);
685 goto err_register; 1065 goto err_register;
1066 }
686 } 1067 }
687 1068
688 return 0; 1069 return 0;
@@ -766,6 +1147,44 @@ static void ipu_irq_exit(struct ipu_soc *ipu)
766 irq_domain_remove(ipu->domain); 1147 irq_domain_remove(ipu->domain);
767} 1148}
768 1149
1150void ipu_dump(struct ipu_soc *ipu)
1151{
1152 int i;
1153
1154 dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n",
1155 ipu_cm_read(ipu, IPU_CONF));
1156 dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n",
1157 ipu_idmac_read(ipu, IDMAC_CONF));
1158 dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
1159 ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
1160 dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
1161 ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
1162 dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
1163 ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
1164 dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
1165 ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
1166 dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
1167 ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
1168 dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
1169 ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
1170 dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
1171 ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
1172 dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
1173 ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
1174 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
1175 ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
1176 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
1177 ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
1178 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
1179 ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
1180 dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
1181 ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
1182 for (i = 0; i < 15; i++)
1183 dev_dbg(ipu->dev, "IPU_INT_CTRL(%d) = \t%08X\n", i,
1184 ipu_cm_read(ipu, IPU_INT_CTRL(i)));
1185}
1186EXPORT_SYMBOL_GPL(ipu_dump);
1187
769static int ipu_probe(struct platform_device *pdev) 1188static int ipu_probe(struct platform_device *pdev)
770{ 1189{
771 const struct of_device_id *of_id = 1190 const struct of_device_id *of_id =
@@ -808,6 +1227,12 @@ static int ipu_probe(struct platform_device *pdev)
808 ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS); 1227 ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
809 dev_dbg(&pdev->dev, "cpmem: 0x%08lx\n", 1228 dev_dbg(&pdev->dev, "cpmem: 0x%08lx\n",
810 ipu_base + devtype->cpmem_ofs); 1229 ipu_base + devtype->cpmem_ofs);
1230 dev_dbg(&pdev->dev, "csi0: 0x%08lx\n",
1231 ipu_base + devtype->csi0_ofs);
1232 dev_dbg(&pdev->dev, "csi1: 0x%08lx\n",
1233 ipu_base + devtype->csi1_ofs);
1234 dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
1235 ipu_base + devtype->ic_ofs);
811 dev_dbg(&pdev->dev, "disp0: 0x%08lx\n", 1236 dev_dbg(&pdev->dev, "disp0: 0x%08lx\n",
812 ipu_base + devtype->disp0_ofs); 1237 ipu_base + devtype->disp0_ofs);
813 dev_dbg(&pdev->dev, "disp1: 0x%08lx\n", 1238 dev_dbg(&pdev->dev, "disp1: 0x%08lx\n",
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 7adfa78a48bc..3bf05bc4ab67 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -64,6 +64,7 @@ struct ipu_cpmem {
64#define IPU_FIELD_BNDM IPU_CPMEM_WORD(0, 114, 3) 64#define IPU_FIELD_BNDM IPU_CPMEM_WORD(0, 114, 3)
65#define IPU_FIELD_BM IPU_CPMEM_WORD(0, 117, 2) 65#define IPU_FIELD_BM IPU_CPMEM_WORD(0, 117, 2)
66#define IPU_FIELD_ROT IPU_CPMEM_WORD(0, 119, 1) 66#define IPU_FIELD_ROT IPU_CPMEM_WORD(0, 119, 1)
67#define IPU_FIELD_ROT_HF_VF IPU_CPMEM_WORD(0, 119, 3)
67#define IPU_FIELD_HF IPU_CPMEM_WORD(0, 120, 1) 68#define IPU_FIELD_HF IPU_CPMEM_WORD(0, 120, 1)
68#define IPU_FIELD_VF IPU_CPMEM_WORD(0, 121, 1) 69#define IPU_FIELD_VF IPU_CPMEM_WORD(0, 121, 1)
69#define IPU_FIELD_THE IPU_CPMEM_WORD(0, 122, 1) 70#define IPU_FIELD_THE IPU_CPMEM_WORD(0, 122, 1)
@@ -192,8 +193,14 @@ static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
192 return DRM_FORMAT_YUYV; 193 return DRM_FORMAT_YUYV;
193 case V4L2_PIX_FMT_YUV420: 194 case V4L2_PIX_FMT_YUV420:
194 return DRM_FORMAT_YUV420; 195 return DRM_FORMAT_YUV420;
196 case V4L2_PIX_FMT_YUV422P:
197 return DRM_FORMAT_YUV422;
195 case V4L2_PIX_FMT_YVU420: 198 case V4L2_PIX_FMT_YVU420:
196 return DRM_FORMAT_YVU420; 199 return DRM_FORMAT_YVU420;
200 case V4L2_PIX_FMT_NV12:
201 return DRM_FORMAT_NV12;
202 case V4L2_PIX_FMT_NV16:
203 return DRM_FORMAT_NV16;
197 } 204 }
198 205
199 return -EINVAL; 206 return -EINVAL;
@@ -254,12 +261,34 @@ void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride)
254}; 261};
255EXPORT_SYMBOL_GPL(ipu_cpmem_interlaced_scan); 262EXPORT_SYMBOL_GPL(ipu_cpmem_interlaced_scan);
256 263
264void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id)
265{
266 id &= 0x3;
267 ipu_ch_param_write_field(ch, IPU_FIELD_ID, id);
268}
269EXPORT_SYMBOL_GPL(ipu_cpmem_set_axi_id);
270
257void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize) 271void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize)
258{ 272{
259 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1); 273 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1);
260}; 274};
261EXPORT_SYMBOL_GPL(ipu_cpmem_set_burstsize); 275EXPORT_SYMBOL_GPL(ipu_cpmem_set_burstsize);
262 276
277void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch)
278{
279 ipu_ch_param_write_field(ch, IPU_FIELD_BM, 1);
280}
281EXPORT_SYMBOL_GPL(ipu_cpmem_set_block_mode);
282
283void ipu_cpmem_set_rotation(struct ipuv3_channel *ch,
284 enum ipu_rotate_mode rot)
285{
286 u32 temp_rot = bitrev8(rot) >> 5;
287
288 ipu_ch_param_write_field(ch, IPU_FIELD_ROT_HF_VF, temp_rot);
289}
290EXPORT_SYMBOL_GPL(ipu_cpmem_set_rotation);
291
263int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch, 292int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
264 const struct ipu_rgb *rgb) 293 const struct ipu_rgb *rgb)
265{ 294{
@@ -371,6 +400,7 @@ void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
371{ 400{
372 switch (pixel_format) { 401 switch (pixel_format) {
373 case V4L2_PIX_FMT_YUV420: 402 case V4L2_PIX_FMT_YUV420:
403 case V4L2_PIX_FMT_YUV422P:
374 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1); 404 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
375 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8); 405 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
376 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8); 406 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
@@ -380,6 +410,12 @@ void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
380 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8); 410 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8);
381 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8); 411 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
382 break; 412 break;
413 case V4L2_PIX_FMT_NV12:
414 case V4L2_PIX_FMT_NV16:
415 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, stride - 1);
416 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
417 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
418 break;
383 } 419 }
384} 420}
385EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full); 421EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
@@ -399,6 +435,19 @@ void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
399 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride, 435 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
400 u_offset, v_offset); 436 u_offset, v_offset);
401 break; 437 break;
438 case V4L2_PIX_FMT_YUV422P:
439 uv_stride = stride / 2;
440 u_offset = stride * height;
441 v_offset = u_offset + (uv_stride * height);
442 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
443 u_offset, v_offset);
444 break;
445 case V4L2_PIX_FMT_NV12:
446 case V4L2_PIX_FMT_NV16:
447 u_offset = stride * height;
448 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
449 u_offset, 0);
450 break;
402 } 451 }
403} 452}
404EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar); 453EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
@@ -452,11 +501,20 @@ static const struct ipu_rgb def_bgr_16 = {
452}; 501};
453 502
454#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y)) 503#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
455#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \ 504#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
456 (pix->width * (y) / 4) + (x) / 2) 505 (pix->width * (y) / 4) + (x) / 2)
457#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \ 506#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
458 (pix->width * pix->height / 4) + \ 507 (pix->width * pix->height / 4) + \
459 (pix->width * (y) / 4) + (x) / 2) 508 (pix->width * (y) / 4) + (x) / 2)
509#define U2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
510 (pix->width * (y) / 2) + (x) / 2)
511#define V2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
512 (pix->width * pix->height / 2) + \
513 (pix->width * (y) / 2) + (x) / 2)
514#define UV_OFFSET(pix, x, y) ((pix->width * pix->height) + \
515 (pix->width * (y) / 2) + (x))
516#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
517 (pix->width * y) + (x))
460 518
461int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc) 519int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
462{ 520{
@@ -468,6 +526,25 @@ int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
468 /* burst size */ 526 /* burst size */
469 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31); 527 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
470 break; 528 break;
529 case DRM_FORMAT_YUV422:
530 case DRM_FORMAT_YVU422:
531 /* pix format */
532 ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 1);
533 /* burst size */
534 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
535 break;
536 case DRM_FORMAT_NV12:
537 /* pix format */
538 ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 4);
539 /* burst size */
540 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
541 break;
542 case DRM_FORMAT_NV16:
543 /* pix format */
544 ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 3);
545 /* burst size */
546 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
547 break;
471 case DRM_FORMAT_UYVY: 548 case DRM_FORMAT_UYVY:
472 /* bits/pixel */ 549 /* bits/pixel */
473 ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); 550 ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3);
@@ -515,7 +592,7 @@ EXPORT_SYMBOL_GPL(ipu_cpmem_set_fmt);
515int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image) 592int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
516{ 593{
517 struct v4l2_pix_format *pix = &image->pix; 594 struct v4l2_pix_format *pix = &image->pix;
518 int y_offset, u_offset, v_offset; 595 int offset, u_offset, v_offset;
519 596
520 pr_debug("%s: resolution: %dx%d stride: %d\n", 597 pr_debug("%s: resolution: %dx%d stride: %d\n",
521 __func__, pix->width, pix->height, 598 __func__, pix->width, pix->height,
@@ -529,47 +606,137 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
529 switch (pix->pixelformat) { 606 switch (pix->pixelformat) {
530 case V4L2_PIX_FMT_YUV420: 607 case V4L2_PIX_FMT_YUV420:
531 case V4L2_PIX_FMT_YVU420: 608 case V4L2_PIX_FMT_YVU420:
532 y_offset = Y_OFFSET(pix, image->rect.left, image->rect.top); 609 offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
533 u_offset = U_OFFSET(pix, image->rect.left, 610 u_offset = U_OFFSET(pix, image->rect.left,
534 image->rect.top) - y_offset; 611 image->rect.top) - offset;
535 v_offset = V_OFFSET(pix, image->rect.left, 612 v_offset = V_OFFSET(pix, image->rect.left,
536 image->rect.top) - y_offset; 613 image->rect.top) - offset;
614
615 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
616 pix->bytesperline,
617 u_offset, v_offset);
618 break;
619 case V4L2_PIX_FMT_YUV422P:
620 offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
621 u_offset = U2_OFFSET(pix, image->rect.left,
622 image->rect.top) - offset;
623 v_offset = V2_OFFSET(pix, image->rect.left,
624 image->rect.top) - offset;
537 625
538 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, 626 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
539 pix->bytesperline, u_offset, v_offset); 627 pix->bytesperline,
540 ipu_cpmem_set_buffer(ch, 0, image->phys + y_offset); 628 u_offset, v_offset);
629 break;
630 case V4L2_PIX_FMT_NV12:
631 offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
632 u_offset = UV_OFFSET(pix, image->rect.left,
633 image->rect.top) - offset;
634 v_offset = 0;
635
636 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
637 pix->bytesperline,
638 u_offset, v_offset);
639 break;
640 case V4L2_PIX_FMT_NV16:
641 offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
642 u_offset = UV2_OFFSET(pix, image->rect.left,
643 image->rect.top) - offset;
644 v_offset = 0;
645
646 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
647 pix->bytesperline,
648 u_offset, v_offset);
541 break; 649 break;
542 case V4L2_PIX_FMT_UYVY: 650 case V4L2_PIX_FMT_UYVY:
543 case V4L2_PIX_FMT_YUYV: 651 case V4L2_PIX_FMT_YUYV:
544 ipu_cpmem_set_buffer(ch, 0, image->phys + 652 case V4L2_PIX_FMT_RGB565:
545 image->rect.left * 2 + 653 offset = image->rect.left * 2 +
546 image->rect.top * image->pix.bytesperline); 654 image->rect.top * pix->bytesperline;
547 break; 655 break;
548 case V4L2_PIX_FMT_RGB32: 656 case V4L2_PIX_FMT_RGB32:
549 case V4L2_PIX_FMT_BGR32: 657 case V4L2_PIX_FMT_BGR32:
550 ipu_cpmem_set_buffer(ch, 0, image->phys + 658 offset = image->rect.left * 4 +
551 image->rect.left * 4 + 659 image->rect.top * pix->bytesperline;
552 image->rect.top * image->pix.bytesperline);
553 break;
554 case V4L2_PIX_FMT_RGB565:
555 ipu_cpmem_set_buffer(ch, 0, image->phys +
556 image->rect.left * 2 +
557 image->rect.top * image->pix.bytesperline);
558 break; 660 break;
559 case V4L2_PIX_FMT_RGB24: 661 case V4L2_PIX_FMT_RGB24:
560 case V4L2_PIX_FMT_BGR24: 662 case V4L2_PIX_FMT_BGR24:
561 ipu_cpmem_set_buffer(ch, 0, image->phys + 663 offset = image->rect.left * 3 +
562 image->rect.left * 3 + 664 image->rect.top * pix->bytesperline;
563 image->rect.top * image->pix.bytesperline);
564 break; 665 break;
565 default: 666 default:
566 return -EINVAL; 667 return -EINVAL;
567 } 668 }
568 669
670 ipu_cpmem_set_buffer(ch, 0, image->phys0 + offset);
671 ipu_cpmem_set_buffer(ch, 1, image->phys1 + offset);
672
569 return 0; 673 return 0;
570} 674}
571EXPORT_SYMBOL_GPL(ipu_cpmem_set_image); 675EXPORT_SYMBOL_GPL(ipu_cpmem_set_image);
572 676
677void ipu_cpmem_dump(struct ipuv3_channel *ch)
678{
679 struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch);
680 struct ipu_soc *ipu = ch->ipu;
681 int chno = ch->num;
682
683 dev_dbg(ipu->dev, "ch %d word 0 - %08X %08X %08X %08X %08X\n", chno,
684 readl(&p->word[0].data[0]),
685 readl(&p->word[0].data[1]),
686 readl(&p->word[0].data[2]),
687 readl(&p->word[0].data[3]),
688 readl(&p->word[0].data[4]));
689 dev_dbg(ipu->dev, "ch %d word 1 - %08X %08X %08X %08X %08X\n", chno,
690 readl(&p->word[1].data[0]),
691 readl(&p->word[1].data[1]),
692 readl(&p->word[1].data[2]),
693 readl(&p->word[1].data[3]),
694 readl(&p->word[1].data[4]));
695 dev_dbg(ipu->dev, "PFS 0x%x, ",
696 ipu_ch_param_read_field(ch, IPU_FIELD_PFS));
697 dev_dbg(ipu->dev, "BPP 0x%x, ",
698 ipu_ch_param_read_field(ch, IPU_FIELD_BPP));
699 dev_dbg(ipu->dev, "NPB 0x%x\n",
700 ipu_ch_param_read_field(ch, IPU_FIELD_NPB));
701
702 dev_dbg(ipu->dev, "FW %d, ",
703 ipu_ch_param_read_field(ch, IPU_FIELD_FW));
704 dev_dbg(ipu->dev, "FH %d, ",
705 ipu_ch_param_read_field(ch, IPU_FIELD_FH));
706 dev_dbg(ipu->dev, "EBA0 0x%x\n",
707 ipu_ch_param_read_field(ch, IPU_FIELD_EBA0) << 3);
708 dev_dbg(ipu->dev, "EBA1 0x%x\n",
709 ipu_ch_param_read_field(ch, IPU_FIELD_EBA1) << 3);
710 dev_dbg(ipu->dev, "Stride %d\n",
711 ipu_ch_param_read_field(ch, IPU_FIELD_SL));
712 dev_dbg(ipu->dev, "scan_order %d\n",
713 ipu_ch_param_read_field(ch, IPU_FIELD_SO));
714 dev_dbg(ipu->dev, "uv_stride %d\n",
715 ipu_ch_param_read_field(ch, IPU_FIELD_SLUV));
716 dev_dbg(ipu->dev, "u_offset 0x%x\n",
717 ipu_ch_param_read_field(ch, IPU_FIELD_UBO) << 3);
718 dev_dbg(ipu->dev, "v_offset 0x%x\n",
719 ipu_ch_param_read_field(ch, IPU_FIELD_VBO) << 3);
720
721 dev_dbg(ipu->dev, "Width0 %d+1, ",
722 ipu_ch_param_read_field(ch, IPU_FIELD_WID0));
723 dev_dbg(ipu->dev, "Width1 %d+1, ",
724 ipu_ch_param_read_field(ch, IPU_FIELD_WID1));
725 dev_dbg(ipu->dev, "Width2 %d+1, ",
726 ipu_ch_param_read_field(ch, IPU_FIELD_WID2));
727 dev_dbg(ipu->dev, "Width3 %d+1, ",
728 ipu_ch_param_read_field(ch, IPU_FIELD_WID3));
729 dev_dbg(ipu->dev, "Offset0 %d, ",
730 ipu_ch_param_read_field(ch, IPU_FIELD_OFS0));
731 dev_dbg(ipu->dev, "Offset1 %d, ",
732 ipu_ch_param_read_field(ch, IPU_FIELD_OFS1));
733 dev_dbg(ipu->dev, "Offset2 %d, ",
734 ipu_ch_param_read_field(ch, IPU_FIELD_OFS2));
735 dev_dbg(ipu->dev, "Offset3 %d\n",
736 ipu_ch_param_read_field(ch, IPU_FIELD_OFS3));
737}
738EXPORT_SYMBOL_GPL(ipu_cpmem_dump);
739
573int ipu_cpmem_init(struct ipu_soc *ipu, struct device *dev, unsigned long base) 740int ipu_cpmem_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
574{ 741{
575 struct ipu_cpmem *cpmem; 742 struct ipu_cpmem *cpmem;
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
new file mode 100644
index 000000000000..d6f56471bd2a
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -0,0 +1,741 @@
1/*
2 * Copyright (C) 2012-2014 Mentor Graphics Inc.
3 * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15#include <linux/export.h>
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/errno.h>
19#include <linux/delay.h>
20#include <linux/io.h>
21#include <linux/err.h>
22#include <linux/platform_device.h>
23#include <linux/videodev2.h>
24#include <uapi/linux/v4l2-mediabus.h>
25#include <linux/clk.h>
26#include <linux/clk-provider.h>
27#include <linux/clkdev.h>
28
29#include "ipu-prv.h"
30
31struct ipu_csi {
32 void __iomem *base;
33 int id;
34 u32 module;
35 struct clk *clk_ipu; /* IPU bus clock */
36 spinlock_t lock;
37 bool inuse;
38 struct ipu_soc *ipu;
39};
40
41/* CSI Register Offsets */
42#define CSI_SENS_CONF 0x0000
43#define CSI_SENS_FRM_SIZE 0x0004
44#define CSI_ACT_FRM_SIZE 0x0008
45#define CSI_OUT_FRM_CTRL 0x000c
46#define CSI_TST_CTRL 0x0010
47#define CSI_CCIR_CODE_1 0x0014
48#define CSI_CCIR_CODE_2 0x0018
49#define CSI_CCIR_CODE_3 0x001c
50#define CSI_MIPI_DI 0x0020
51#define CSI_SKIP 0x0024
52#define CSI_CPD_CTRL 0x0028
53#define CSI_CPD_RC(n) (0x002c + ((n)*4))
54#define CSI_CPD_RS(n) (0x004c + ((n)*4))
55#define CSI_CPD_GRC(n) (0x005c + ((n)*4))
56#define CSI_CPD_GRS(n) (0x007c + ((n)*4))
57#define CSI_CPD_GBC(n) (0x008c + ((n)*4))
58#define CSI_CPD_GBS(n) (0x00Ac + ((n)*4))
59#define CSI_CPD_BC(n) (0x00Bc + ((n)*4))
60#define CSI_CPD_BS(n) (0x00Dc + ((n)*4))
61#define CSI_CPD_OFFSET1 0x00ec
62#define CSI_CPD_OFFSET2 0x00f0
63
64/* CSI Register Fields */
65#define CSI_SENS_CONF_DATA_FMT_SHIFT 8
66#define CSI_SENS_CONF_DATA_FMT_MASK 0x00000700
67#define CSI_SENS_CONF_DATA_FMT_RGB_YUV444 0L
68#define CSI_SENS_CONF_DATA_FMT_YUV422_YUYV 1L
69#define CSI_SENS_CONF_DATA_FMT_YUV422_UYVY 2L
70#define CSI_SENS_CONF_DATA_FMT_BAYER 3L
71#define CSI_SENS_CONF_DATA_FMT_RGB565 4L
72#define CSI_SENS_CONF_DATA_FMT_RGB555 5L
73#define CSI_SENS_CONF_DATA_FMT_RGB444 6L
74#define CSI_SENS_CONF_DATA_FMT_JPEG 7L
75
76#define CSI_SENS_CONF_VSYNC_POL_SHIFT 0
77#define CSI_SENS_CONF_HSYNC_POL_SHIFT 1
78#define CSI_SENS_CONF_DATA_POL_SHIFT 2
79#define CSI_SENS_CONF_PIX_CLK_POL_SHIFT 3
80#define CSI_SENS_CONF_SENS_PRTCL_MASK 0x00000070
81#define CSI_SENS_CONF_SENS_PRTCL_SHIFT 4
82#define CSI_SENS_CONF_PACK_TIGHT_SHIFT 7
83#define CSI_SENS_CONF_DATA_WIDTH_SHIFT 11
84#define CSI_SENS_CONF_EXT_VSYNC_SHIFT 15
85#define CSI_SENS_CONF_DIVRATIO_SHIFT 16
86
87#define CSI_SENS_CONF_DIVRATIO_MASK 0x00ff0000
88#define CSI_SENS_CONF_DATA_DEST_SHIFT 24
89#define CSI_SENS_CONF_DATA_DEST_MASK 0x07000000
90#define CSI_SENS_CONF_JPEG8_EN_SHIFT 27
91#define CSI_SENS_CONF_JPEG_EN_SHIFT 28
92#define CSI_SENS_CONF_FORCE_EOF_SHIFT 29
93#define CSI_SENS_CONF_DATA_EN_POL_SHIFT 31
94
95#define CSI_DATA_DEST_IC 2
96#define CSI_DATA_DEST_IDMAC 4
97
98#define CSI_CCIR_ERR_DET_EN 0x01000000
99#define CSI_HORI_DOWNSIZE_EN 0x80000000
100#define CSI_VERT_DOWNSIZE_EN 0x40000000
101#define CSI_TEST_GEN_MODE_EN 0x01000000
102
103#define CSI_HSC_MASK 0x1fff0000
104#define CSI_HSC_SHIFT 16
105#define CSI_VSC_MASK 0x00000fff
106#define CSI_VSC_SHIFT 0
107
108#define CSI_TEST_GEN_R_MASK 0x000000ff
109#define CSI_TEST_GEN_R_SHIFT 0
110#define CSI_TEST_GEN_G_MASK 0x0000ff00
111#define CSI_TEST_GEN_G_SHIFT 8
112#define CSI_TEST_GEN_B_MASK 0x00ff0000
113#define CSI_TEST_GEN_B_SHIFT 16
114
115#define CSI_MAX_RATIO_SKIP_SMFC_MASK 0x00000007
116#define CSI_MAX_RATIO_SKIP_SMFC_SHIFT 0
117#define CSI_SKIP_SMFC_MASK 0x000000f8
118#define CSI_SKIP_SMFC_SHIFT 3
119#define CSI_ID_2_SKIP_MASK 0x00000300
120#define CSI_ID_2_SKIP_SHIFT 8
121
122#define CSI_COLOR_FIRST_ROW_MASK 0x00000002
123#define CSI_COLOR_FIRST_COMP_MASK 0x00000001
124
125/* MIPI CSI-2 data types */
126#define MIPI_DT_YUV420 0x18 /* YYY.../UYVY.... */
127#define MIPI_DT_YUV420_LEGACY 0x1a /* UYY.../VYY... */
128#define MIPI_DT_YUV422 0x1e /* UYVY... */
129#define MIPI_DT_RGB444 0x20
130#define MIPI_DT_RGB555 0x21
131#define MIPI_DT_RGB565 0x22
132#define MIPI_DT_RGB666 0x23
133#define MIPI_DT_RGB888 0x24
134#define MIPI_DT_RAW6 0x28
135#define MIPI_DT_RAW7 0x29
136#define MIPI_DT_RAW8 0x2a
137#define MIPI_DT_RAW10 0x2b
138#define MIPI_DT_RAW12 0x2c
139#define MIPI_DT_RAW14 0x2d
140
141/*
142 * Bitfield of CSI bus signal polarities and modes.
143 */
144struct ipu_csi_bus_config {
145 unsigned data_width:4;
146 unsigned clk_mode:3;
147 unsigned ext_vsync:1;
148 unsigned vsync_pol:1;
149 unsigned hsync_pol:1;
150 unsigned pixclk_pol:1;
151 unsigned data_pol:1;
152 unsigned sens_clksrc:1;
153 unsigned pack_tight:1;
154 unsigned force_eof:1;
155 unsigned data_en_pol:1;
156
157 unsigned data_fmt;
158 unsigned mipi_dt;
159};
160
161/*
162 * Enumeration of CSI data bus widths.
163 */
164enum ipu_csi_data_width {
165 IPU_CSI_DATA_WIDTH_4 = 0,
166 IPU_CSI_DATA_WIDTH_8 = 1,
167 IPU_CSI_DATA_WIDTH_10 = 3,
168 IPU_CSI_DATA_WIDTH_12 = 5,
169 IPU_CSI_DATA_WIDTH_16 = 9,
170};
171
172/*
173 * Enumeration of CSI clock modes.
174 */
175enum ipu_csi_clk_mode {
176 IPU_CSI_CLK_MODE_GATED_CLK,
177 IPU_CSI_CLK_MODE_NONGATED_CLK,
178 IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE,
179 IPU_CSI_CLK_MODE_CCIR656_INTERLACED,
180 IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR,
181 IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR,
182 IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR,
183 IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR,
184};
185
186static inline u32 ipu_csi_read(struct ipu_csi *csi, unsigned offset)
187{
188 return readl(csi->base + offset);
189}
190
191static inline void ipu_csi_write(struct ipu_csi *csi, u32 value,
192 unsigned offset)
193{
194 writel(value, csi->base + offset);
195}
196
197/*
198 * Set mclk division ratio for generating test mode mclk. Only used
199 * for test generator.
200 */
201static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
202 u32 ipu_clk)
203{
204 u32 temp;
205 u32 div_ratio;
206
207 div_ratio = (ipu_clk / pixel_clk) - 1;
208
209 if (div_ratio > 0xFF || div_ratio < 0) {
210 dev_err(csi->ipu->dev,
211 "value of pixel_clk extends normal range\n");
212 return -EINVAL;
213 }
214
215 temp = ipu_csi_read(csi, CSI_SENS_CONF);
216 temp &= ~CSI_SENS_CONF_DIVRATIO_MASK;
217 ipu_csi_write(csi, temp | (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT),
218 CSI_SENS_CONF);
219
220 return 0;
221}
222
223/*
224 * Find the CSI data format and data width for the given V4L2 media
225 * bus pixel format code.
226 */
227static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
228{
229 switch (mbus_code) {
230 case V4L2_MBUS_FMT_BGR565_2X8_BE:
231 case V4L2_MBUS_FMT_BGR565_2X8_LE:
232 case V4L2_MBUS_FMT_RGB565_2X8_BE:
233 case V4L2_MBUS_FMT_RGB565_2X8_LE:
234 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
235 cfg->mipi_dt = MIPI_DT_RGB565;
236 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
237 break;
238 case V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE:
239 case V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE:
240 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB444;
241 cfg->mipi_dt = MIPI_DT_RGB444;
242 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
243 break;
244 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE:
245 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
246 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555;
247 cfg->mipi_dt = MIPI_DT_RGB555;
248 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
249 break;
250 case V4L2_MBUS_FMT_UYVY8_2X8:
251 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
252 cfg->mipi_dt = MIPI_DT_YUV422;
253 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
254 break;
255 case V4L2_MBUS_FMT_YUYV8_2X8:
256 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
257 cfg->mipi_dt = MIPI_DT_YUV422;
258 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
259 break;
260 case V4L2_MBUS_FMT_UYVY8_1X16:
261 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
262 cfg->mipi_dt = MIPI_DT_YUV422;
263 cfg->data_width = IPU_CSI_DATA_WIDTH_16;
264 break;
265 case V4L2_MBUS_FMT_YUYV8_1X16:
266 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
267 cfg->mipi_dt = MIPI_DT_YUV422;
268 cfg->data_width = IPU_CSI_DATA_WIDTH_16;
269 break;
270 case V4L2_MBUS_FMT_SBGGR8_1X8:
271 case V4L2_MBUS_FMT_SGBRG8_1X8:
272 case V4L2_MBUS_FMT_SGRBG8_1X8:
273 case V4L2_MBUS_FMT_SRGGB8_1X8:
274 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
275 cfg->mipi_dt = MIPI_DT_RAW8;
276 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
277 break;
278 case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8:
279 case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8:
280 case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
281 case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8:
282 case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE:
283 case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE:
284 case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE:
285 case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE:
286 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
287 cfg->mipi_dt = MIPI_DT_RAW10;
288 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
289 break;
290 case V4L2_MBUS_FMT_SBGGR10_1X10:
291 case V4L2_MBUS_FMT_SGBRG10_1X10:
292 case V4L2_MBUS_FMT_SGRBG10_1X10:
293 case V4L2_MBUS_FMT_SRGGB10_1X10:
294 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
295 cfg->mipi_dt = MIPI_DT_RAW10;
296 cfg->data_width = IPU_CSI_DATA_WIDTH_10;
297 break;
298 case V4L2_MBUS_FMT_SBGGR12_1X12:
299 case V4L2_MBUS_FMT_SGBRG12_1X12:
300 case V4L2_MBUS_FMT_SGRBG12_1X12:
301 case V4L2_MBUS_FMT_SRGGB12_1X12:
302 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
303 cfg->mipi_dt = MIPI_DT_RAW12;
304 cfg->data_width = IPU_CSI_DATA_WIDTH_12;
305 break;
306 case V4L2_MBUS_FMT_JPEG_1X8:
307 /* TODO */
308 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_JPEG;
309 cfg->mipi_dt = MIPI_DT_RAW8;
310 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
311 break;
312 default:
313 return -EINVAL;
314 }
315
316 return 0;
317}
318
319/*
320 * Fill a CSI bus config struct from mbus_config and mbus_framefmt.
321 */
322static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
323 struct v4l2_mbus_config *mbus_cfg,
324 struct v4l2_mbus_framefmt *mbus_fmt)
325{
326 memset(csicfg, 0, sizeof(*csicfg));
327
328 mbus_code_to_bus_cfg(csicfg, mbus_fmt->code);
329
330 switch (mbus_cfg->type) {
331 case V4L2_MBUS_PARALLEL:
332 csicfg->ext_vsync = 1;
333 csicfg->vsync_pol = (mbus_cfg->flags &
334 V4L2_MBUS_VSYNC_ACTIVE_LOW) ? 1 : 0;
335 csicfg->hsync_pol = (mbus_cfg->flags &
336 V4L2_MBUS_HSYNC_ACTIVE_LOW) ? 1 : 0;
337 csicfg->pixclk_pol = (mbus_cfg->flags &
338 V4L2_MBUS_PCLK_SAMPLE_FALLING) ? 1 : 0;
339 csicfg->clk_mode = IPU_CSI_CLK_MODE_GATED_CLK;
340 break;
341 case V4L2_MBUS_BT656:
342 csicfg->ext_vsync = 0;
343 if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field))
344 csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
345 else
346 csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
347 break;
348 case V4L2_MBUS_CSI2:
349 /*
350 * MIPI CSI-2 requires non gated clock mode, all other
351 * parameters are not applicable for MIPI CSI-2 bus.
352 */
353 csicfg->clk_mode = IPU_CSI_CLK_MODE_NONGATED_CLK;
354 break;
355 default:
356 /* will never get here, keep compiler quiet */
357 break;
358 }
359}
360
361int ipu_csi_init_interface(struct ipu_csi *csi,
362 struct v4l2_mbus_config *mbus_cfg,
363 struct v4l2_mbus_framefmt *mbus_fmt)
364{
365 struct ipu_csi_bus_config cfg;
366 unsigned long flags;
367 u32 data = 0;
368
369 fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
370
371 /* Set the CSI_SENS_CONF register remaining fields */
372 data |= cfg.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
373 cfg.data_fmt << CSI_SENS_CONF_DATA_FMT_SHIFT |
374 cfg.data_pol << CSI_SENS_CONF_DATA_POL_SHIFT |
375 cfg.vsync_pol << CSI_SENS_CONF_VSYNC_POL_SHIFT |
376 cfg.hsync_pol << CSI_SENS_CONF_HSYNC_POL_SHIFT |
377 cfg.pixclk_pol << CSI_SENS_CONF_PIX_CLK_POL_SHIFT |
378 cfg.ext_vsync << CSI_SENS_CONF_EXT_VSYNC_SHIFT |
379 cfg.clk_mode << CSI_SENS_CONF_SENS_PRTCL_SHIFT |
380 cfg.pack_tight << CSI_SENS_CONF_PACK_TIGHT_SHIFT |
381 cfg.force_eof << CSI_SENS_CONF_FORCE_EOF_SHIFT |
382 cfg.data_en_pol << CSI_SENS_CONF_DATA_EN_POL_SHIFT;
383
384 spin_lock_irqsave(&csi->lock, flags);
385
386 ipu_csi_write(csi, data, CSI_SENS_CONF);
387
388 /* Setup sensor frame size */
389 ipu_csi_write(csi,
390 (mbus_fmt->width - 1) | ((mbus_fmt->height - 1) << 16),
391 CSI_SENS_FRM_SIZE);
392
393 /* Set CCIR registers */
394
395 switch (cfg.clk_mode) {
396 case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
397 ipu_csi_write(csi, 0x40030, CSI_CCIR_CODE_1);
398 ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
399 break;
400 case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
401 if (mbus_fmt->width == 720 && mbus_fmt->height == 576) {
402 /*
403 * PAL case
404 *
405 * Field0BlankEnd = 0x6, Field0BlankStart = 0x2,
406 * Field0ActiveEnd = 0x4, Field0ActiveStart = 0
407 * Field1BlankEnd = 0x7, Field1BlankStart = 0x3,
408 * Field1ActiveEnd = 0x5, Field1ActiveStart = 0x1
409 */
410 ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN,
411 CSI_CCIR_CODE_1);
412 ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2);
413 ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
414
415 } else if (mbus_fmt->width == 720 && mbus_fmt->height == 480) {
416 /*
417 * NTSC case
418 *
419 * Field0BlankEnd = 0x7, Field0BlankStart = 0x3,
420 * Field0ActiveEnd = 0x5, Field0ActiveStart = 0x1
421 * Field1BlankEnd = 0x6, Field1BlankStart = 0x2,
422 * Field1ActiveEnd = 0x4, Field1ActiveStart = 0
423 */
424 ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN,
425 CSI_CCIR_CODE_1);
426 ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2);
427 ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
428 } else {
429 dev_err(csi->ipu->dev,
430 "Unsupported CCIR656 interlaced video mode\n");
431 spin_unlock_irqrestore(&csi->lock, flags);
432 return -EINVAL;
433 }
434 break;
435 case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
436 case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
437 case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
438 case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
439 ipu_csi_write(csi, 0x40030 | CSI_CCIR_ERR_DET_EN,
440 CSI_CCIR_CODE_1);
441 ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
442 break;
443 case IPU_CSI_CLK_MODE_GATED_CLK:
444 case IPU_CSI_CLK_MODE_NONGATED_CLK:
445 ipu_csi_write(csi, 0, CSI_CCIR_CODE_1);
446 break;
447 }
448
449 dev_dbg(csi->ipu->dev, "CSI_SENS_CONF = 0x%08X\n",
450 ipu_csi_read(csi, CSI_SENS_CONF));
451 dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
452 ipu_csi_read(csi, CSI_ACT_FRM_SIZE));
453
454 spin_unlock_irqrestore(&csi->lock, flags);
455
456 return 0;
457}
458EXPORT_SYMBOL_GPL(ipu_csi_init_interface);
459
460bool ipu_csi_is_interlaced(struct ipu_csi *csi)
461{
462 unsigned long flags;
463 u32 sensor_protocol;
464
465 spin_lock_irqsave(&csi->lock, flags);
466 sensor_protocol =
467 (ipu_csi_read(csi, CSI_SENS_CONF) &
468 CSI_SENS_CONF_SENS_PRTCL_MASK) >>
469 CSI_SENS_CONF_SENS_PRTCL_SHIFT;
470 spin_unlock_irqrestore(&csi->lock, flags);
471
472 switch (sensor_protocol) {
473 case IPU_CSI_CLK_MODE_GATED_CLK:
474 case IPU_CSI_CLK_MODE_NONGATED_CLK:
475 case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
476 case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
477 case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
478 return false;
479 case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
480 case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
481 case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
482 return true;
483 default:
484 dev_err(csi->ipu->dev,
485 "CSI %d sensor protocol unsupported\n", csi->id);
486 return false;
487 }
488}
489EXPORT_SYMBOL_GPL(ipu_csi_is_interlaced);
490
491void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w)
492{
493 unsigned long flags;
494 u32 reg;
495
496 spin_lock_irqsave(&csi->lock, flags);
497
498 reg = ipu_csi_read(csi, CSI_ACT_FRM_SIZE);
499 w->width = (reg & 0xFFFF) + 1;
500 w->height = (reg >> 16 & 0xFFFF) + 1;
501
502 reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
503 w->left = (reg & CSI_HSC_MASK) >> CSI_HSC_SHIFT;
504 w->top = (reg & CSI_VSC_MASK) >> CSI_VSC_SHIFT;
505
506 spin_unlock_irqrestore(&csi->lock, flags);
507}
508EXPORT_SYMBOL_GPL(ipu_csi_get_window);
509
510void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w)
511{
512 unsigned long flags;
513 u32 reg;
514
515 spin_lock_irqsave(&csi->lock, flags);
516
517 ipu_csi_write(csi, (w->width - 1) | ((w->height - 1) << 16),
518 CSI_ACT_FRM_SIZE);
519
520 reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
521 reg &= ~(CSI_HSC_MASK | CSI_VSC_MASK);
522 reg |= ((w->top << CSI_VSC_SHIFT) | (w->left << CSI_HSC_SHIFT));
523 ipu_csi_write(csi, reg, CSI_OUT_FRM_CTRL);
524
525 spin_unlock_irqrestore(&csi->lock, flags);
526}
527EXPORT_SYMBOL_GPL(ipu_csi_set_window);
528
529void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
530 u32 r_value, u32 g_value, u32 b_value,
531 u32 pix_clk)
532{
533 unsigned long flags;
534 u32 ipu_clk = clk_get_rate(csi->clk_ipu);
535 u32 temp;
536
537 spin_lock_irqsave(&csi->lock, flags);
538
539 temp = ipu_csi_read(csi, CSI_TST_CTRL);
540
541 if (active == false) {
542 temp &= ~CSI_TEST_GEN_MODE_EN;
543 ipu_csi_write(csi, temp, CSI_TST_CTRL);
544 } else {
545 /* Set sensb_mclk div_ratio */
546 ipu_csi_set_testgen_mclk(csi, pix_clk, ipu_clk);
547
548 temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK |
549 CSI_TEST_GEN_B_MASK);
550 temp |= CSI_TEST_GEN_MODE_EN;
551 temp |= (r_value << CSI_TEST_GEN_R_SHIFT) |
552 (g_value << CSI_TEST_GEN_G_SHIFT) |
553 (b_value << CSI_TEST_GEN_B_SHIFT);
554 ipu_csi_write(csi, temp, CSI_TST_CTRL);
555 }
556
557 spin_unlock_irqrestore(&csi->lock, flags);
558}
559EXPORT_SYMBOL_GPL(ipu_csi_set_test_generator);
560
561int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
562 struct v4l2_mbus_framefmt *mbus_fmt)
563{
564 struct ipu_csi_bus_config cfg;
565 unsigned long flags;
566 u32 temp;
567
568 if (vc > 3)
569 return -EINVAL;
570
571 mbus_code_to_bus_cfg(&cfg, mbus_fmt->code);
572
573 spin_lock_irqsave(&csi->lock, flags);
574
575 temp = ipu_csi_read(csi, CSI_MIPI_DI);
576 temp &= ~(0xff << (vc * 8));
577 temp |= (cfg.mipi_dt << (vc * 8));
578 ipu_csi_write(csi, temp, CSI_MIPI_DI);
579
580 spin_unlock_irqrestore(&csi->lock, flags);
581
582 return 0;
583}
584EXPORT_SYMBOL_GPL(ipu_csi_set_mipi_datatype);
585
586int ipu_csi_set_skip_smfc(struct ipu_csi *csi, u32 skip,
587 u32 max_ratio, u32 id)
588{
589 unsigned long flags;
590 u32 temp;
591
592 if (max_ratio > 5 || id > 3)
593 return -EINVAL;
594
595 spin_lock_irqsave(&csi->lock, flags);
596
597 temp = ipu_csi_read(csi, CSI_SKIP);
598 temp &= ~(CSI_MAX_RATIO_SKIP_SMFC_MASK | CSI_ID_2_SKIP_MASK |
599 CSI_SKIP_SMFC_MASK);
600 temp |= (max_ratio << CSI_MAX_RATIO_SKIP_SMFC_SHIFT) |
601 (id << CSI_ID_2_SKIP_SHIFT) |
602 (skip << CSI_SKIP_SMFC_SHIFT);
603 ipu_csi_write(csi, temp, CSI_SKIP);
604
605 spin_unlock_irqrestore(&csi->lock, flags);
606
607 return 0;
608}
609EXPORT_SYMBOL_GPL(ipu_csi_set_skip_smfc);
610
611int ipu_csi_set_dest(struct ipu_csi *csi, enum ipu_csi_dest csi_dest)
612{
613 unsigned long flags;
614 u32 csi_sens_conf, dest;
615
616 if (csi_dest == IPU_CSI_DEST_IDMAC)
617 dest = CSI_DATA_DEST_IDMAC;
618 else
619 dest = CSI_DATA_DEST_IC; /* IC or VDIC */
620
621 spin_lock_irqsave(&csi->lock, flags);
622
623 csi_sens_conf = ipu_csi_read(csi, CSI_SENS_CONF);
624 csi_sens_conf &= ~CSI_SENS_CONF_DATA_DEST_MASK;
625 csi_sens_conf |= (dest << CSI_SENS_CONF_DATA_DEST_SHIFT);
626 ipu_csi_write(csi, csi_sens_conf, CSI_SENS_CONF);
627
628 spin_unlock_irqrestore(&csi->lock, flags);
629
630 return 0;
631}
632EXPORT_SYMBOL_GPL(ipu_csi_set_dest);
633
634int ipu_csi_enable(struct ipu_csi *csi)
635{
636 ipu_module_enable(csi->ipu, csi->module);
637
638 return 0;
639}
640EXPORT_SYMBOL_GPL(ipu_csi_enable);
641
642int ipu_csi_disable(struct ipu_csi *csi)
643{
644 ipu_module_disable(csi->ipu, csi->module);
645
646 return 0;
647}
648EXPORT_SYMBOL_GPL(ipu_csi_disable);
649
650struct ipu_csi *ipu_csi_get(struct ipu_soc *ipu, int id)
651{
652 unsigned long flags;
653 struct ipu_csi *csi, *ret;
654
655 if (id > 1)
656 return ERR_PTR(-EINVAL);
657
658 csi = ipu->csi_priv[id];
659 ret = csi;
660
661 spin_lock_irqsave(&csi->lock, flags);
662
663 if (csi->inuse) {
664 ret = ERR_PTR(-EBUSY);
665 goto unlock;
666 }
667
668 csi->inuse = true;
669unlock:
670 spin_unlock_irqrestore(&csi->lock, flags);
671 return ret;
672}
673EXPORT_SYMBOL_GPL(ipu_csi_get);
674
675void ipu_csi_put(struct ipu_csi *csi)
676{
677 unsigned long flags;
678
679 spin_lock_irqsave(&csi->lock, flags);
680 csi->inuse = false;
681 spin_unlock_irqrestore(&csi->lock, flags);
682}
683EXPORT_SYMBOL_GPL(ipu_csi_put);
684
685int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id,
686 unsigned long base, u32 module, struct clk *clk_ipu)
687{
688 struct ipu_csi *csi;
689
690 if (id > 1)
691 return -ENODEV;
692
693 csi = devm_kzalloc(dev, sizeof(*csi), GFP_KERNEL);
694 if (!csi)
695 return -ENOMEM;
696
697 ipu->csi_priv[id] = csi;
698
699 spin_lock_init(&csi->lock);
700 csi->module = module;
701 csi->id = id;
702 csi->clk_ipu = clk_ipu;
703 csi->base = devm_ioremap(dev, base, PAGE_SIZE);
704 if (!csi->base)
705 return -ENOMEM;
706
707 dev_dbg(dev, "CSI%d base: 0x%08lx remapped to %p\n",
708 id, base, csi->base);
709 csi->ipu = ipu;
710
711 return 0;
712}
713
714void ipu_csi_exit(struct ipu_soc *ipu, int id)
715{
716}
717
718void ipu_csi_dump(struct ipu_csi *csi)
719{
720 dev_dbg(csi->ipu->dev, "CSI_SENS_CONF: %08x\n",
721 ipu_csi_read(csi, CSI_SENS_CONF));
722 dev_dbg(csi->ipu->dev, "CSI_SENS_FRM_SIZE: %08x\n",
723 ipu_csi_read(csi, CSI_SENS_FRM_SIZE));
724 dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE: %08x\n",
725 ipu_csi_read(csi, CSI_ACT_FRM_SIZE));
726 dev_dbg(csi->ipu->dev, "CSI_OUT_FRM_CTRL: %08x\n",
727 ipu_csi_read(csi, CSI_OUT_FRM_CTRL));
728 dev_dbg(csi->ipu->dev, "CSI_TST_CTRL: %08x\n",
729 ipu_csi_read(csi, CSI_TST_CTRL));
730 dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_1: %08x\n",
731 ipu_csi_read(csi, CSI_CCIR_CODE_1));
732 dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_2: %08x\n",
733 ipu_csi_read(csi, CSI_CCIR_CODE_2));
734 dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_3: %08x\n",
735 ipu_csi_read(csi, CSI_CCIR_CODE_3));
736 dev_dbg(csi->ipu->dev, "CSI_MIPI_DI: %08x\n",
737 ipu_csi_read(csi, CSI_MIPI_DI));
738 dev_dbg(csi->ipu->dev, "CSI_SKIP: %08x\n",
739 ipu_csi_read(csi, CSI_SKIP));
740}
741EXPORT_SYMBOL_GPL(ipu_csi_dump);
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
new file mode 100644
index 000000000000..ad75588e1629
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -0,0 +1,778 @@
1/*
2 * Copyright (C) 2012-2014 Mentor Graphics Inc.
3 * Copyright 2005-2012 Freescale Semiconductor, Inc. All Rights Reserved.
4 *
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
8 *
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
11 */
12
13#include <linux/types.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/spinlock.h>
17#include <linux/bitrev.h>
18#include <linux/io.h>
19#include <linux/err.h>
20#include "ipu-prv.h"
21
22/* IC Register Offsets */
23#define IC_CONF 0x0000
24#define IC_PRP_ENC_RSC 0x0004
25#define IC_PRP_VF_RSC 0x0008
26#define IC_PP_RSC 0x000C
27#define IC_CMBP_1 0x0010
28#define IC_CMBP_2 0x0014
29#define IC_IDMAC_1 0x0018
30#define IC_IDMAC_2 0x001C
31#define IC_IDMAC_3 0x0020
32#define IC_IDMAC_4 0x0024
33
34/* IC Register Fields */
35#define IC_CONF_PRPENC_EN (1 << 0)
36#define IC_CONF_PRPENC_CSC1 (1 << 1)
37#define IC_CONF_PRPENC_ROT_EN (1 << 2)
38#define IC_CONF_PRPVF_EN (1 << 8)
39#define IC_CONF_PRPVF_CSC1 (1 << 9)
40#define IC_CONF_PRPVF_CSC2 (1 << 10)
41#define IC_CONF_PRPVF_CMB (1 << 11)
42#define IC_CONF_PRPVF_ROT_EN (1 << 12)
43#define IC_CONF_PP_EN (1 << 16)
44#define IC_CONF_PP_CSC1 (1 << 17)
45#define IC_CONF_PP_CSC2 (1 << 18)
46#define IC_CONF_PP_CMB (1 << 19)
47#define IC_CONF_PP_ROT_EN (1 << 20)
48#define IC_CONF_IC_GLB_LOC_A (1 << 28)
49#define IC_CONF_KEY_COLOR_EN (1 << 29)
50#define IC_CONF_RWS_EN (1 << 30)
51#define IC_CONF_CSI_MEM_WR_EN (1 << 31)
52
53#define IC_IDMAC_1_CB0_BURST_16 (1 << 0)
54#define IC_IDMAC_1_CB1_BURST_16 (1 << 1)
55#define IC_IDMAC_1_CB2_BURST_16 (1 << 2)
56#define IC_IDMAC_1_CB3_BURST_16 (1 << 3)
57#define IC_IDMAC_1_CB4_BURST_16 (1 << 4)
58#define IC_IDMAC_1_CB5_BURST_16 (1 << 5)
59#define IC_IDMAC_1_CB6_BURST_16 (1 << 6)
60#define IC_IDMAC_1_CB7_BURST_16 (1 << 7)
61#define IC_IDMAC_1_PRPENC_ROT_MASK (0x7 << 11)
62#define IC_IDMAC_1_PRPENC_ROT_OFFSET 11
63#define IC_IDMAC_1_PRPVF_ROT_MASK (0x7 << 14)
64#define IC_IDMAC_1_PRPVF_ROT_OFFSET 14
65#define IC_IDMAC_1_PP_ROT_MASK (0x7 << 17)
66#define IC_IDMAC_1_PP_ROT_OFFSET 17
67#define IC_IDMAC_1_PP_FLIP_RS (1 << 22)
68#define IC_IDMAC_1_PRPVF_FLIP_RS (1 << 21)
69#define IC_IDMAC_1_PRPENC_FLIP_RS (1 << 20)
70
71#define IC_IDMAC_2_PRPENC_HEIGHT_MASK (0x3ff << 0)
72#define IC_IDMAC_2_PRPENC_HEIGHT_OFFSET 0
73#define IC_IDMAC_2_PRPVF_HEIGHT_MASK (0x3ff << 10)
74#define IC_IDMAC_2_PRPVF_HEIGHT_OFFSET 10
75#define IC_IDMAC_2_PP_HEIGHT_MASK (0x3ff << 20)
76#define IC_IDMAC_2_PP_HEIGHT_OFFSET 20
77
78#define IC_IDMAC_3_PRPENC_WIDTH_MASK (0x3ff << 0)
79#define IC_IDMAC_3_PRPENC_WIDTH_OFFSET 0
80#define IC_IDMAC_3_PRPVF_WIDTH_MASK (0x3ff << 10)
81#define IC_IDMAC_3_PRPVF_WIDTH_OFFSET 10
82#define IC_IDMAC_3_PP_WIDTH_MASK (0x3ff << 20)
83#define IC_IDMAC_3_PP_WIDTH_OFFSET 20
84
85struct ic_task_regoffs {
86 u32 rsc;
87 u32 tpmem_csc[2];
88};
89
90struct ic_task_bitfields {
91 u32 ic_conf_en;
92 u32 ic_conf_rot_en;
93 u32 ic_conf_cmb_en;
94 u32 ic_conf_csc1_en;
95 u32 ic_conf_csc2_en;
96 u32 ic_cmb_galpha_bit;
97};
98
99static const struct ic_task_regoffs ic_task_reg[IC_NUM_TASKS] = {
100 [IC_TASK_ENCODER] = {
101 .rsc = IC_PRP_ENC_RSC,
102 .tpmem_csc = {0x2008, 0},
103 },
104 [IC_TASK_VIEWFINDER] = {
105 .rsc = IC_PRP_VF_RSC,
106 .tpmem_csc = {0x4028, 0x4040},
107 },
108 [IC_TASK_POST_PROCESSOR] = {
109 .rsc = IC_PP_RSC,
110 .tpmem_csc = {0x6060, 0x6078},
111 },
112};
113
114static const struct ic_task_bitfields ic_task_bit[IC_NUM_TASKS] = {
115 [IC_TASK_ENCODER] = {
116 .ic_conf_en = IC_CONF_PRPENC_EN,
117 .ic_conf_rot_en = IC_CONF_PRPENC_ROT_EN,
118 .ic_conf_cmb_en = 0, /* NA */
119 .ic_conf_csc1_en = IC_CONF_PRPENC_CSC1,
120 .ic_conf_csc2_en = 0, /* NA */
121 .ic_cmb_galpha_bit = 0, /* NA */
122 },
123 [IC_TASK_VIEWFINDER] = {
124 .ic_conf_en = IC_CONF_PRPVF_EN,
125 .ic_conf_rot_en = IC_CONF_PRPVF_ROT_EN,
126 .ic_conf_cmb_en = IC_CONF_PRPVF_CMB,
127 .ic_conf_csc1_en = IC_CONF_PRPVF_CSC1,
128 .ic_conf_csc2_en = IC_CONF_PRPVF_CSC2,
129 .ic_cmb_galpha_bit = 0,
130 },
131 [IC_TASK_POST_PROCESSOR] = {
132 .ic_conf_en = IC_CONF_PP_EN,
133 .ic_conf_rot_en = IC_CONF_PP_ROT_EN,
134 .ic_conf_cmb_en = IC_CONF_PP_CMB,
135 .ic_conf_csc1_en = IC_CONF_PP_CSC1,
136 .ic_conf_csc2_en = IC_CONF_PP_CSC2,
137 .ic_cmb_galpha_bit = 8,
138 },
139};
140
141struct ipu_ic_priv;
142
143struct ipu_ic {
144 enum ipu_ic_task task;
145 const struct ic_task_regoffs *reg;
146 const struct ic_task_bitfields *bit;
147
148 enum ipu_color_space in_cs, g_in_cs;
149 enum ipu_color_space out_cs;
150 bool graphics;
151 bool rotation;
152 bool in_use;
153
154 struct ipu_ic_priv *priv;
155};
156
157struct ipu_ic_priv {
158 void __iomem *base;
159 void __iomem *tpmem_base;
160 spinlock_t lock;
161 struct ipu_soc *ipu;
162 int use_count;
163 struct ipu_ic task[IC_NUM_TASKS];
164};
165
166static inline u32 ipu_ic_read(struct ipu_ic *ic, unsigned offset)
167{
168 return readl(ic->priv->base + offset);
169}
170
171static inline void ipu_ic_write(struct ipu_ic *ic, u32 value, unsigned offset)
172{
173 writel(value, ic->priv->base + offset);
174}
175
176struct ic_csc_params {
177 s16 coeff[3][3]; /* signed 9-bit integer coefficients */
178 s16 offset[3]; /* signed 11+2-bit fixed point offset */
179 u8 scale:2; /* scale coefficients * 2^(scale-1) */
180 bool sat:1; /* saturate to (16, 235(Y) / 240(U, V)) */
181};
182
183/*
184 * Y = R * .299 + G * .587 + B * .114;
185 * U = R * -.169 + G * -.332 + B * .500 + 128.;
186 * V = R * .500 + G * -.419 + B * -.0813 + 128.;
187 */
188static const struct ic_csc_params ic_csc_rgb2ycbcr = {
189 .coeff = {
190 { 77, 150, 29 },
191 { 469, 427, 128 },
192 { 128, 405, 491 },
193 },
194 .offset = { 0, 512, 512 },
195 .scale = 1,
196};
197
198/* transparent RGB->RGB matrix for graphics combining */
199static const struct ic_csc_params ic_csc_rgb2rgb = {
200 .coeff = {
201 { 128, 0, 0 },
202 { 0, 128, 0 },
203 { 0, 0, 128 },
204 },
205 .scale = 2,
206};
207
208/*
209 * R = (1.164 * (Y - 16)) + (1.596 * (Cr - 128));
210 * G = (1.164 * (Y - 16)) - (0.392 * (Cb - 128)) - (0.813 * (Cr - 128));
211 * B = (1.164 * (Y - 16)) + (2.017 * (Cb - 128);
212 */
213static const struct ic_csc_params ic_csc_ycbcr2rgb = {
214 .coeff = {
215 { 149, 0, 204 },
216 { 149, 462, 408 },
217 { 149, 255, 0 },
218 },
219 .offset = { -446, 266, -554 },
220 .scale = 2,
221};
222
223static int init_csc(struct ipu_ic *ic,
224 enum ipu_color_space inf,
225 enum ipu_color_space outf,
226 int csc_index)
227{
228 struct ipu_ic_priv *priv = ic->priv;
229 const struct ic_csc_params *params;
230 u32 __iomem *base;
231 const u16 (*c)[3];
232 const u16 *a;
233 u32 param;
234
235 base = (u32 __iomem *)
236 (priv->tpmem_base + ic->reg->tpmem_csc[csc_index]);
237
238 if (inf == IPUV3_COLORSPACE_YUV && outf == IPUV3_COLORSPACE_RGB)
239 params = &ic_csc_ycbcr2rgb;
240 else if (inf == IPUV3_COLORSPACE_RGB && outf == IPUV3_COLORSPACE_YUV)
241 params = &ic_csc_rgb2ycbcr;
242 else if (inf == IPUV3_COLORSPACE_RGB && outf == IPUV3_COLORSPACE_RGB)
243 params = &ic_csc_rgb2rgb;
244 else {
245 dev_err(priv->ipu->dev, "Unsupported color space conversion\n");
246 return -EINVAL;
247 }
248
249 /* Cast to unsigned */
250 c = (const u16 (*)[3])params->coeff;
251 a = (const u16 *)params->offset;
252
253 param = ((a[0] & 0x1f) << 27) | ((c[0][0] & 0x1ff) << 18) |
254 ((c[1][1] & 0x1ff) << 9) | (c[2][2] & 0x1ff);
255 writel(param, base++);
256
257 param = ((a[0] & 0x1fe0) >> 5) | (params->scale << 8) |
258 (params->sat << 9);
259 writel(param, base++);
260
261 param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) |
262 ((c[1][0] & 0x1ff) << 9) | (c[2][0] & 0x1ff);
263 writel(param, base++);
264
265 param = ((a[1] & 0x1fe0) >> 5);
266 writel(param, base++);
267
268 param = ((a[2] & 0x1f) << 27) | ((c[0][2] & 0x1ff) << 18) |
269 ((c[1][2] & 0x1ff) << 9) | (c[2][1] & 0x1ff);
270 writel(param, base++);
271
272 param = ((a[2] & 0x1fe0) >> 5);
273 writel(param, base++);
274
275 return 0;
276}
277
278static int calc_resize_coeffs(struct ipu_ic *ic,
279 u32 in_size, u32 out_size,
280 u32 *resize_coeff,
281 u32 *downsize_coeff)
282{
283 struct ipu_ic_priv *priv = ic->priv;
284 struct ipu_soc *ipu = priv->ipu;
285 u32 temp_size, temp_downsize;
286
287 /*
288 * Input size cannot be more than 4096, and output size cannot
289 * be more than 1024
290 */
291 if (in_size > 4096) {
292 dev_err(ipu->dev, "Unsupported resize (in_size > 4096)\n");
293 return -EINVAL;
294 }
295 if (out_size > 1024) {
296 dev_err(ipu->dev, "Unsupported resize (out_size > 1024)\n");
297 return -EINVAL;
298 }
299
300 /* Cannot downsize more than 8:1 */
301 if ((out_size << 3) < in_size) {
302 dev_err(ipu->dev, "Unsupported downsize\n");
303 return -EINVAL;
304 }
305
306 /* Compute downsizing coefficient */
307 temp_downsize = 0;
308 temp_size = in_size;
309 while (((temp_size > 1024) || (temp_size >= out_size * 2)) &&
310 (temp_downsize < 2)) {
311 temp_size >>= 1;
312 temp_downsize++;
313 }
314 *downsize_coeff = temp_downsize;
315
316 /*
317 * compute resizing coefficient using the following equation:
318 * resize_coeff = M * (SI - 1) / (SO - 1)
319 * where M = 2^13, SI = input size, SO = output size
320 */
321 *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
322 if (*resize_coeff >= 16384L) {
323 dev_err(ipu->dev, "Warning! Overflow on resize coeff.\n");
324 *resize_coeff = 0x3FFF;
325 }
326
327 return 0;
328}
329
330void ipu_ic_task_enable(struct ipu_ic *ic)
331{
332 struct ipu_ic_priv *priv = ic->priv;
333 unsigned long flags;
334 u32 ic_conf;
335
336 spin_lock_irqsave(&priv->lock, flags);
337
338 ic_conf = ipu_ic_read(ic, IC_CONF);
339
340 ic_conf |= ic->bit->ic_conf_en;
341
342 if (ic->rotation)
343 ic_conf |= ic->bit->ic_conf_rot_en;
344
345 if (ic->in_cs != ic->out_cs)
346 ic_conf |= ic->bit->ic_conf_csc1_en;
347
348 if (ic->graphics) {
349 ic_conf |= ic->bit->ic_conf_cmb_en;
350 ic_conf |= ic->bit->ic_conf_csc1_en;
351
352 if (ic->g_in_cs != ic->out_cs)
353 ic_conf |= ic->bit->ic_conf_csc2_en;
354 }
355
356 ipu_ic_write(ic, ic_conf, IC_CONF);
357
358 spin_unlock_irqrestore(&priv->lock, flags);
359}
360EXPORT_SYMBOL_GPL(ipu_ic_task_enable);
361
362void ipu_ic_task_disable(struct ipu_ic *ic)
363{
364 struct ipu_ic_priv *priv = ic->priv;
365 unsigned long flags;
366 u32 ic_conf;
367
368 spin_lock_irqsave(&priv->lock, flags);
369
370 ic_conf = ipu_ic_read(ic, IC_CONF);
371
372 ic_conf &= ~(ic->bit->ic_conf_en |
373 ic->bit->ic_conf_csc1_en |
374 ic->bit->ic_conf_rot_en);
375 if (ic->bit->ic_conf_csc2_en)
376 ic_conf &= ~ic->bit->ic_conf_csc2_en;
377 if (ic->bit->ic_conf_cmb_en)
378 ic_conf &= ~ic->bit->ic_conf_cmb_en;
379
380 ipu_ic_write(ic, ic_conf, IC_CONF);
381
382 ic->rotation = ic->graphics = false;
383
384 spin_unlock_irqrestore(&priv->lock, flags);
385}
386EXPORT_SYMBOL_GPL(ipu_ic_task_disable);
387
388int ipu_ic_task_graphics_init(struct ipu_ic *ic,
389 enum ipu_color_space in_g_cs,
390 bool galpha_en, u32 galpha,
391 bool colorkey_en, u32 colorkey)
392{
393 struct ipu_ic_priv *priv = ic->priv;
394 unsigned long flags;
395 u32 reg, ic_conf;
396 int ret = 0;
397
398 if (ic->task == IC_TASK_ENCODER)
399 return -EINVAL;
400
401 spin_lock_irqsave(&priv->lock, flags);
402
403 ic_conf = ipu_ic_read(ic, IC_CONF);
404
405 if (!(ic_conf & ic->bit->ic_conf_csc1_en)) {
406 /* need transparent CSC1 conversion */
407 ret = init_csc(ic, IPUV3_COLORSPACE_RGB,
408 IPUV3_COLORSPACE_RGB, 0);
409 if (ret)
410 goto unlock;
411 }
412
413 ic->g_in_cs = in_g_cs;
414
415 if (ic->g_in_cs != ic->out_cs) {
416 ret = init_csc(ic, ic->g_in_cs, ic->out_cs, 1);
417 if (ret)
418 goto unlock;
419 }
420
421 if (galpha_en) {
422 ic_conf |= IC_CONF_IC_GLB_LOC_A;
423 reg = ipu_ic_read(ic, IC_CMBP_1);
424 reg &= ~(0xff << ic->bit->ic_cmb_galpha_bit);
425 reg |= (galpha << ic->bit->ic_cmb_galpha_bit);
426 ipu_ic_write(ic, reg, IC_CMBP_1);
427 } else
428 ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
429
430 if (colorkey_en) {
431 ic_conf |= IC_CONF_KEY_COLOR_EN;
432 ipu_ic_write(ic, colorkey, IC_CMBP_2);
433 } else
434 ic_conf &= ~IC_CONF_KEY_COLOR_EN;
435
436 ipu_ic_write(ic, ic_conf, IC_CONF);
437
438 ic->graphics = true;
439unlock:
440 spin_unlock_irqrestore(&priv->lock, flags);
441 return ret;
442}
443EXPORT_SYMBOL_GPL(ipu_ic_task_graphics_init);
444
445int ipu_ic_task_init(struct ipu_ic *ic,
446 int in_width, int in_height,
447 int out_width, int out_height,
448 enum ipu_color_space in_cs,
449 enum ipu_color_space out_cs)
450{
451 struct ipu_ic_priv *priv = ic->priv;
452 u32 reg, downsize_coeff, resize_coeff;
453 unsigned long flags;
454 int ret = 0;
455
456 /* Setup vertical resizing */
457 ret = calc_resize_coeffs(ic, in_height, out_height,
458 &resize_coeff, &downsize_coeff);
459 if (ret)
460 return ret;
461
462 reg = (downsize_coeff << 30) | (resize_coeff << 16);
463
464 /* Setup horizontal resizing */
465 ret = calc_resize_coeffs(ic, in_width, out_width,
466 &resize_coeff, &downsize_coeff);
467 if (ret)
468 return ret;
469
470 reg |= (downsize_coeff << 14) | resize_coeff;
471
472 spin_lock_irqsave(&priv->lock, flags);
473
474 ipu_ic_write(ic, reg, ic->reg->rsc);
475
476 /* Setup color space conversion */
477 ic->in_cs = in_cs;
478 ic->out_cs = out_cs;
479
480 if (ic->in_cs != ic->out_cs) {
481 ret = init_csc(ic, ic->in_cs, ic->out_cs, 0);
482 if (ret)
483 goto unlock;
484 }
485
486unlock:
487 spin_unlock_irqrestore(&priv->lock, flags);
488 return ret;
489}
490EXPORT_SYMBOL_GPL(ipu_ic_task_init);
491
492int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
493 u32 width, u32 height, int burst_size,
494 enum ipu_rotate_mode rot)
495{
496 struct ipu_ic_priv *priv = ic->priv;
497 struct ipu_soc *ipu = priv->ipu;
498 u32 ic_idmac_1, ic_idmac_2, ic_idmac_3;
499 u32 temp_rot = bitrev8(rot) >> 5;
500 bool need_hor_flip = false;
501 unsigned long flags;
502 int ret = 0;
503
504 if ((burst_size != 8) && (burst_size != 16)) {
505 dev_err(ipu->dev, "Illegal burst length for IC\n");
506 return -EINVAL;
507 }
508
509 width--;
510 height--;
511
512 if (temp_rot & 0x2) /* Need horizontal flip */
513 need_hor_flip = true;
514
515 spin_lock_irqsave(&priv->lock, flags);
516
517 ic_idmac_1 = ipu_ic_read(ic, IC_IDMAC_1);
518 ic_idmac_2 = ipu_ic_read(ic, IC_IDMAC_2);
519 ic_idmac_3 = ipu_ic_read(ic, IC_IDMAC_3);
520
521 switch (channel->num) {
522 case IPUV3_CHANNEL_IC_PP_MEM:
523 if (burst_size == 16)
524 ic_idmac_1 |= IC_IDMAC_1_CB2_BURST_16;
525 else
526 ic_idmac_1 &= ~IC_IDMAC_1_CB2_BURST_16;
527
528 if (need_hor_flip)
529 ic_idmac_1 |= IC_IDMAC_1_PP_FLIP_RS;
530 else
531 ic_idmac_1 &= ~IC_IDMAC_1_PP_FLIP_RS;
532
533 ic_idmac_2 &= ~IC_IDMAC_2_PP_HEIGHT_MASK;
534 ic_idmac_2 |= height << IC_IDMAC_2_PP_HEIGHT_OFFSET;
535
536 ic_idmac_3 &= ~IC_IDMAC_3_PP_WIDTH_MASK;
537 ic_idmac_3 |= width << IC_IDMAC_3_PP_WIDTH_OFFSET;
538 break;
539 case IPUV3_CHANNEL_MEM_IC_PP:
540 if (burst_size == 16)
541 ic_idmac_1 |= IC_IDMAC_1_CB5_BURST_16;
542 else
543 ic_idmac_1 &= ~IC_IDMAC_1_CB5_BURST_16;
544 break;
545 case IPUV3_CHANNEL_MEM_ROT_PP:
546 ic_idmac_1 &= ~IC_IDMAC_1_PP_ROT_MASK;
547 ic_idmac_1 |= temp_rot << IC_IDMAC_1_PP_ROT_OFFSET;
548 break;
549 case IPUV3_CHANNEL_MEM_IC_PRP_VF:
550 if (burst_size == 16)
551 ic_idmac_1 |= IC_IDMAC_1_CB6_BURST_16;
552 else
553 ic_idmac_1 &= ~IC_IDMAC_1_CB6_BURST_16;
554 break;
555 case IPUV3_CHANNEL_IC_PRP_ENC_MEM:
556 if (burst_size == 16)
557 ic_idmac_1 |= IC_IDMAC_1_CB0_BURST_16;
558 else
559 ic_idmac_1 &= ~IC_IDMAC_1_CB0_BURST_16;
560
561 if (need_hor_flip)
562 ic_idmac_1 |= IC_IDMAC_1_PRPENC_FLIP_RS;
563 else
564 ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_FLIP_RS;
565
566 ic_idmac_2 &= ~IC_IDMAC_2_PRPENC_HEIGHT_MASK;
567 ic_idmac_2 |= height << IC_IDMAC_2_PRPENC_HEIGHT_OFFSET;
568
569 ic_idmac_3 &= ~IC_IDMAC_3_PRPENC_WIDTH_MASK;
570 ic_idmac_3 |= width << IC_IDMAC_3_PRPENC_WIDTH_OFFSET;
571 break;
572 case IPUV3_CHANNEL_MEM_ROT_ENC:
573 ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_ROT_MASK;
574 ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPENC_ROT_OFFSET;
575 break;
576 case IPUV3_CHANNEL_IC_PRP_VF_MEM:
577 if (burst_size == 16)
578 ic_idmac_1 |= IC_IDMAC_1_CB1_BURST_16;
579 else
580 ic_idmac_1 &= ~IC_IDMAC_1_CB1_BURST_16;
581
582 if (need_hor_flip)
583 ic_idmac_1 |= IC_IDMAC_1_PRPVF_FLIP_RS;
584 else
585 ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_FLIP_RS;
586
587 ic_idmac_2 &= ~IC_IDMAC_2_PRPVF_HEIGHT_MASK;
588 ic_idmac_2 |= height << IC_IDMAC_2_PRPVF_HEIGHT_OFFSET;
589
590 ic_idmac_3 &= ~IC_IDMAC_3_PRPVF_WIDTH_MASK;
591 ic_idmac_3 |= width << IC_IDMAC_3_PRPVF_WIDTH_OFFSET;
592 break;
593 case IPUV3_CHANNEL_MEM_ROT_VF:
594 ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_ROT_MASK;
595 ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPVF_ROT_OFFSET;
596 break;
597 case IPUV3_CHANNEL_G_MEM_IC_PRP_VF:
598 if (burst_size == 16)
599 ic_idmac_1 |= IC_IDMAC_1_CB3_BURST_16;
600 else
601 ic_idmac_1 &= ~IC_IDMAC_1_CB3_BURST_16;
602 break;
603 case IPUV3_CHANNEL_G_MEM_IC_PP:
604 if (burst_size == 16)
605 ic_idmac_1 |= IC_IDMAC_1_CB4_BURST_16;
606 else
607 ic_idmac_1 &= ~IC_IDMAC_1_CB4_BURST_16;
608 break;
609 case IPUV3_CHANNEL_VDI_MEM_IC_VF:
610 if (burst_size == 16)
611 ic_idmac_1 |= IC_IDMAC_1_CB7_BURST_16;
612 else
613 ic_idmac_1 &= ~IC_IDMAC_1_CB7_BURST_16;
614 break;
615 default:
616 goto unlock;
617 }
618
619 ipu_ic_write(ic, ic_idmac_1, IC_IDMAC_1);
620 ipu_ic_write(ic, ic_idmac_2, IC_IDMAC_2);
621 ipu_ic_write(ic, ic_idmac_3, IC_IDMAC_3);
622
623 if (rot >= IPU_ROTATE_90_RIGHT)
624 ic->rotation = true;
625
626unlock:
627 spin_unlock_irqrestore(&priv->lock, flags);
628 return ret;
629}
630EXPORT_SYMBOL_GPL(ipu_ic_task_idma_init);
631
632int ipu_ic_enable(struct ipu_ic *ic)
633{
634 struct ipu_ic_priv *priv = ic->priv;
635 unsigned long flags;
636 u32 module = IPU_CONF_IC_EN;
637
638 spin_lock_irqsave(&priv->lock, flags);
639
640 if (ic->rotation)
641 module |= IPU_CONF_ROT_EN;
642
643 if (!priv->use_count)
644 ipu_module_enable(priv->ipu, module);
645
646 priv->use_count++;
647
648 spin_unlock_irqrestore(&priv->lock, flags);
649
650 return 0;
651}
652EXPORT_SYMBOL_GPL(ipu_ic_enable);
653
654int ipu_ic_disable(struct ipu_ic *ic)
655{
656 struct ipu_ic_priv *priv = ic->priv;
657 unsigned long flags;
658 u32 module = IPU_CONF_IC_EN | IPU_CONF_ROT_EN;
659
660 spin_lock_irqsave(&priv->lock, flags);
661
662 priv->use_count--;
663
664 if (!priv->use_count)
665 ipu_module_disable(priv->ipu, module);
666
667 if (priv->use_count < 0)
668 priv->use_count = 0;
669
670 spin_unlock_irqrestore(&priv->lock, flags);
671
672 return 0;
673}
674EXPORT_SYMBOL_GPL(ipu_ic_disable);
675
676struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task)
677{
678 struct ipu_ic_priv *priv = ipu->ic_priv;
679 unsigned long flags;
680 struct ipu_ic *ic, *ret;
681
682 if (task >= IC_NUM_TASKS)
683 return ERR_PTR(-EINVAL);
684
685 ic = &priv->task[task];
686
687 spin_lock_irqsave(&priv->lock, flags);
688
689 if (ic->in_use) {
690 ret = ERR_PTR(-EBUSY);
691 goto unlock;
692 }
693
694 ic->in_use = true;
695 ret = ic;
696
697unlock:
698 spin_unlock_irqrestore(&priv->lock, flags);
699 return ret;
700}
701EXPORT_SYMBOL_GPL(ipu_ic_get);
702
703void ipu_ic_put(struct ipu_ic *ic)
704{
705 struct ipu_ic_priv *priv = ic->priv;
706 unsigned long flags;
707
708 spin_lock_irqsave(&priv->lock, flags);
709 ic->in_use = false;
710 spin_unlock_irqrestore(&priv->lock, flags);
711}
712EXPORT_SYMBOL_GPL(ipu_ic_put);
713
714int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
715 unsigned long base, unsigned long tpmem_base)
716{
717 struct ipu_ic_priv *priv;
718 int i;
719
720 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
721 if (!priv)
722 return -ENOMEM;
723
724 ipu->ic_priv = priv;
725
726 spin_lock_init(&priv->lock);
727 priv->base = devm_ioremap(dev, base, PAGE_SIZE);
728 if (!priv->base)
729 return -ENOMEM;
730 priv->tpmem_base = devm_ioremap(dev, tpmem_base, SZ_64K);
731 if (!priv->tpmem_base)
732 return -ENOMEM;
733
734 dev_dbg(dev, "IC base: 0x%08lx remapped to %p\n", base, priv->base);
735
736 priv->ipu = ipu;
737
738 for (i = 0; i < IC_NUM_TASKS; i++) {
739 priv->task[i].task = i;
740 priv->task[i].priv = priv;
741 priv->task[i].reg = &ic_task_reg[i];
742 priv->task[i].bit = &ic_task_bit[i];
743 }
744
745 return 0;
746}
747
748void ipu_ic_exit(struct ipu_soc *ipu)
749{
750}
751
752void ipu_ic_dump(struct ipu_ic *ic)
753{
754 struct ipu_ic_priv *priv = ic->priv;
755 struct ipu_soc *ipu = priv->ipu;
756
757 dev_dbg(ipu->dev, "IC_CONF = \t0x%08X\n",
758 ipu_ic_read(ic, IC_CONF));
759 dev_dbg(ipu->dev, "IC_PRP_ENC_RSC = \t0x%08X\n",
760 ipu_ic_read(ic, IC_PRP_ENC_RSC));
761 dev_dbg(ipu->dev, "IC_PRP_VF_RSC = \t0x%08X\n",
762 ipu_ic_read(ic, IC_PRP_VF_RSC));
763 dev_dbg(ipu->dev, "IC_PP_RSC = \t0x%08X\n",
764 ipu_ic_read(ic, IC_PP_RSC));
765 dev_dbg(ipu->dev, "IC_CMBP_1 = \t0x%08X\n",
766 ipu_ic_read(ic, IC_CMBP_1));
767 dev_dbg(ipu->dev, "IC_CMBP_2 = \t0x%08X\n",
768 ipu_ic_read(ic, IC_CMBP_2));
769 dev_dbg(ipu->dev, "IC_IDMAC_1 = \t0x%08X\n",
770 ipu_ic_read(ic, IC_IDMAC_1));
771 dev_dbg(ipu->dev, "IC_IDMAC_2 = \t0x%08X\n",
772 ipu_ic_read(ic, IC_IDMAC_2));
773 dev_dbg(ipu->dev, "IC_IDMAC_3 = \t0x%08X\n",
774 ipu_ic_read(ic, IC_IDMAC_3));
775 dev_dbg(ipu->dev, "IC_IDMAC_4 = \t0x%08X\n",
776 ipu_ic_read(ic, IC_IDMAC_4));
777}
778EXPORT_SYMBOL_GPL(ipu_ic_dump);
diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index 0a7b2adaba39..bfb1e8a4483f 100644
--- a/drivers/gpu/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -24,23 +24,6 @@ struct ipu_soc;
24 24
25#include <video/imx-ipu-v3.h> 25#include <video/imx-ipu-v3.h>
26 26
27#define IPUV3_CHANNEL_CSI0 0
28#define IPUV3_CHANNEL_CSI1 1
29#define IPUV3_CHANNEL_CSI2 2
30#define IPUV3_CHANNEL_CSI3 3
31#define IPUV3_CHANNEL_MEM_BG_SYNC 23
32#define IPUV3_CHANNEL_MEM_FG_SYNC 27
33#define IPUV3_CHANNEL_MEM_DC_SYNC 28
34#define IPUV3_CHANNEL_MEM_FG_SYNC_ALPHA 31
35#define IPUV3_CHANNEL_MEM_DC_ASYNC 41
36#define IPUV3_CHANNEL_ROT_ENC_MEM 45
37#define IPUV3_CHANNEL_ROT_VF_MEM 46
38#define IPUV3_CHANNEL_ROT_PP_MEM 47
39#define IPUV3_CHANNEL_ROT_ENC_MEM_OUT 48
40#define IPUV3_CHANNEL_ROT_VF_MEM_OUT 49
41#define IPUV3_CHANNEL_ROT_PP_MEM_OUT 50
42#define IPUV3_CHANNEL_MEM_BG_SYNC_ALPHA 51
43
44#define IPU_MCU_T_DEFAULT 8 27#define IPU_MCU_T_DEFAULT 8
45#define IPU_CM_IDMAC_REG_OFS 0x00008000 28#define IPU_CM_IDMAC_REG_OFS 0x00008000
46#define IPU_CM_IC_REG_OFS 0x00020000 29#define IPU_CM_IC_REG_OFS 0x00020000
@@ -85,6 +68,7 @@ struct ipu_soc;
85#define IPU_DISP_TASK_STAT IPU_CM_REG(0x0254) 68#define IPU_DISP_TASK_STAT IPU_CM_REG(0x0254)
86#define IPU_CHA_BUF0_RDY(ch) IPU_CM_REG(0x0268 + 4 * ((ch) / 32)) 69#define IPU_CHA_BUF0_RDY(ch) IPU_CM_REG(0x0268 + 4 * ((ch) / 32))
87#define IPU_CHA_BUF1_RDY(ch) IPU_CM_REG(0x0270 + 4 * ((ch) / 32)) 70#define IPU_CHA_BUF1_RDY(ch) IPU_CM_REG(0x0270 + 4 * ((ch) / 32))
71#define IPU_CHA_BUF2_RDY(ch) IPU_CM_REG(0x0288 + 4 * ((ch) / 32))
88#define IPU_ALT_CHA_BUF0_RDY(ch) IPU_CM_REG(0x0278 + 4 * ((ch) / 32)) 72#define IPU_ALT_CHA_BUF0_RDY(ch) IPU_CM_REG(0x0278 + 4 * ((ch) / 32))
89#define IPU_ALT_CHA_BUF1_RDY(ch) IPU_CM_REG(0x0280 + 4 * ((ch) / 32)) 73#define IPU_ALT_CHA_BUF1_RDY(ch) IPU_CM_REG(0x0280 + 4 * ((ch) / 32))
90 74
@@ -149,9 +133,11 @@ struct ipuv3_channel {
149}; 133};
150 134
151struct ipu_cpmem; 135struct ipu_cpmem;
136struct ipu_csi;
152struct ipu_dc_priv; 137struct ipu_dc_priv;
153struct ipu_dmfc_priv; 138struct ipu_dmfc_priv;
154struct ipu_di; 139struct ipu_di;
140struct ipu_ic_priv;
155struct ipu_smfc_priv; 141struct ipu_smfc_priv;
156 142
157struct ipu_devtype; 143struct ipu_devtype;
@@ -181,6 +167,8 @@ struct ipu_soc {
181 struct ipu_dp_priv *dp_priv; 167 struct ipu_dp_priv *dp_priv;
182 struct ipu_dmfc_priv *dmfc_priv; 168 struct ipu_dmfc_priv *dmfc_priv;
183 struct ipu_di *di_priv[2]; 169 struct ipu_di *di_priv[2];
170 struct ipu_csi *csi_priv[2];
171 struct ipu_ic_priv *ic_priv;
184 struct ipu_smfc_priv *smfc_priv; 172 struct ipu_smfc_priv *smfc_priv;
185}; 173};
186 174
@@ -203,6 +191,14 @@ int ipu_module_disable(struct ipu_soc *ipu, u32 mask);
203bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno); 191bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno);
204int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms); 192int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms);
205 193
194int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id,
195 unsigned long base, u32 module, struct clk *clk_ipu);
196void ipu_csi_exit(struct ipu_soc *ipu, int id);
197
198int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
199 unsigned long base, unsigned long tpmem_base);
200void ipu_ic_exit(struct ipu_soc *ipu);
201
206int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id, 202int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
207 unsigned long base, u32 module, struct clk *ipu_clk); 203 unsigned long base, u32 module, struct clk *ipu_clk);
208void ipu_di_exit(struct ipu_soc *ipu, int id); 204void ipu_di_exit(struct ipu_soc *ipu, int id);
diff --git a/drivers/gpu/ipu-v3/ipu-smfc.c b/drivers/gpu/ipu-v3/ipu-smfc.c
index e4f85ad286fc..4ef910991413 100644
--- a/drivers/gpu/ipu-v3/ipu-smfc.c
+++ b/drivers/gpu/ipu-v3/ipu-smfc.c
@@ -8,7 +8,6 @@
8 * http://www.opensource.org/licenses/gpl-license.html 8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html 9 * http://www.gnu.org/copyleft/gpl.html
10 */ 10 */
11#define DEBUG
12#include <linux/export.h> 11#include <linux/export.h>
13#include <linux/types.h> 12#include <linux/types.h>
14#include <linux/init.h> 13#include <linux/init.h>
@@ -21,9 +20,18 @@
21 20
22#include "ipu-prv.h" 21#include "ipu-prv.h"
23 22
23struct ipu_smfc {
24 struct ipu_smfc_priv *priv;
25 int chno;
26 bool inuse;
27};
28
24struct ipu_smfc_priv { 29struct ipu_smfc_priv {
25 void __iomem *base; 30 void __iomem *base;
26 spinlock_t lock; 31 spinlock_t lock;
32 struct ipu_soc *ipu;
33 struct ipu_smfc channel[4];
34 int use_count;
27}; 35};
28 36
29/*SMFC Registers */ 37/*SMFC Registers */
@@ -31,63 +39,166 @@ struct ipu_smfc_priv {
31#define SMFC_WMC 0x0004 39#define SMFC_WMC 0x0004
32#define SMFC_BS 0x0008 40#define SMFC_BS 0x0008
33 41
34int ipu_smfc_set_burstsize(struct ipu_soc *ipu, int channel, int burstsize) 42int ipu_smfc_set_burstsize(struct ipu_smfc *smfc, int burstsize)
35{ 43{
36 struct ipu_smfc_priv *smfc = ipu->smfc_priv; 44 struct ipu_smfc_priv *priv = smfc->priv;
37 unsigned long flags; 45 unsigned long flags;
38 u32 val, shift; 46 u32 val, shift;
39 47
40 spin_lock_irqsave(&smfc->lock, flags); 48 spin_lock_irqsave(&priv->lock, flags);
41 49
42 shift = channel * 4; 50 shift = smfc->chno * 4;
43 val = readl(smfc->base + SMFC_BS); 51 val = readl(priv->base + SMFC_BS);
44 val &= ~(0xf << shift); 52 val &= ~(0xf << shift);
45 val |= burstsize << shift; 53 val |= burstsize << shift;
46 writel(val, smfc->base + SMFC_BS); 54 writel(val, priv->base + SMFC_BS);
47 55
48 spin_unlock_irqrestore(&smfc->lock, flags); 56 spin_unlock_irqrestore(&priv->lock, flags);
49 57
50 return 0; 58 return 0;
51} 59}
52EXPORT_SYMBOL_GPL(ipu_smfc_set_burstsize); 60EXPORT_SYMBOL_GPL(ipu_smfc_set_burstsize);
53 61
54int ipu_smfc_map_channel(struct ipu_soc *ipu, int channel, int csi_id, int mipi_id) 62int ipu_smfc_map_channel(struct ipu_smfc *smfc, int csi_id, int mipi_id)
55{ 63{
56 struct ipu_smfc_priv *smfc = ipu->smfc_priv; 64 struct ipu_smfc_priv *priv = smfc->priv;
57 unsigned long flags; 65 unsigned long flags;
58 u32 val, shift; 66 u32 val, shift;
59 67
60 spin_lock_irqsave(&smfc->lock, flags); 68 spin_lock_irqsave(&priv->lock, flags);
61 69
62 shift = channel * 3; 70 shift = smfc->chno * 3;
63 val = readl(smfc->base + SMFC_MAP); 71 val = readl(priv->base + SMFC_MAP);
64 val &= ~(0x7 << shift); 72 val &= ~(0x7 << shift);
65 val |= ((csi_id << 2) | mipi_id) << shift; 73 val |= ((csi_id << 2) | mipi_id) << shift;
66 writel(val, smfc->base + SMFC_MAP); 74 writel(val, priv->base + SMFC_MAP);
67 75
68 spin_unlock_irqrestore(&smfc->lock, flags); 76 spin_unlock_irqrestore(&priv->lock, flags);
69 77
70 return 0; 78 return 0;
71} 79}
72EXPORT_SYMBOL_GPL(ipu_smfc_map_channel); 80EXPORT_SYMBOL_GPL(ipu_smfc_map_channel);
73 81
82int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level)
83{
84 struct ipu_smfc_priv *priv = smfc->priv;
85 unsigned long flags;
86 u32 val, shift;
87
88 spin_lock_irqsave(&priv->lock, flags);
89
90 shift = smfc->chno * 6 + (smfc->chno > 1 ? 4 : 0);
91 val = readl(priv->base + SMFC_WMC);
92 val &= ~(0x3f << shift);
93 val |= ((clr_level << 3) | set_level) << shift;
94 writel(val, priv->base + SMFC_WMC);
95
96 spin_unlock_irqrestore(&priv->lock, flags);
97
98 return 0;
99}
100EXPORT_SYMBOL_GPL(ipu_smfc_set_watermark);
101
102int ipu_smfc_enable(struct ipu_smfc *smfc)
103{
104 struct ipu_smfc_priv *priv = smfc->priv;
105 unsigned long flags;
106
107 spin_lock_irqsave(&priv->lock, flags);
108
109 if (!priv->use_count)
110 ipu_module_enable(priv->ipu, IPU_CONF_SMFC_EN);
111
112 priv->use_count++;
113
114 spin_unlock_irqrestore(&priv->lock, flags);
115
116 return 0;
117}
118EXPORT_SYMBOL_GPL(ipu_smfc_enable);
119
120int ipu_smfc_disable(struct ipu_smfc *smfc)
121{
122 struct ipu_smfc_priv *priv = smfc->priv;
123 unsigned long flags;
124
125 spin_lock_irqsave(&priv->lock, flags);
126
127 priv->use_count--;
128
129 if (!priv->use_count)
130 ipu_module_disable(priv->ipu, IPU_CONF_SMFC_EN);
131
132 if (priv->use_count < 0)
133 priv->use_count = 0;
134
135 spin_unlock_irqrestore(&priv->lock, flags);
136
137 return 0;
138}
139EXPORT_SYMBOL_GPL(ipu_smfc_disable);
140
141struct ipu_smfc *ipu_smfc_get(struct ipu_soc *ipu, unsigned int chno)
142{
143 struct ipu_smfc_priv *priv = ipu->smfc_priv;
144 struct ipu_smfc *smfc, *ret;
145 unsigned long flags;
146
147 if (chno >= 4)
148 return ERR_PTR(-EINVAL);
149
150 smfc = &priv->channel[chno];
151 ret = smfc;
152
153 spin_lock_irqsave(&priv->lock, flags);
154
155 if (smfc->inuse) {
156 ret = ERR_PTR(-EBUSY);
157 goto unlock;
158 }
159
160 smfc->inuse = true;
161unlock:
162 spin_unlock_irqrestore(&priv->lock, flags);
163 return ret;
164}
165EXPORT_SYMBOL_GPL(ipu_smfc_get);
166
167void ipu_smfc_put(struct ipu_smfc *smfc)
168{
169 struct ipu_smfc_priv *priv = smfc->priv;
170 unsigned long flags;
171
172 spin_lock_irqsave(&priv->lock, flags);
173 smfc->inuse = false;
174 spin_unlock_irqrestore(&priv->lock, flags);
175}
176EXPORT_SYMBOL_GPL(ipu_smfc_put);
177
74int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev, 178int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev,
75 unsigned long base) 179 unsigned long base)
76{ 180{
77 struct ipu_smfc_priv *smfc; 181 struct ipu_smfc_priv *priv;
182 int i;
78 183
79 smfc = devm_kzalloc(dev, sizeof(*smfc), GFP_KERNEL); 184 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
80 if (!smfc) 185 if (!priv)
81 return -ENOMEM; 186 return -ENOMEM;
82 187
83 ipu->smfc_priv = smfc; 188 ipu->smfc_priv = priv;
84 spin_lock_init(&smfc->lock); 189 spin_lock_init(&priv->lock);
190 priv->ipu = ipu;
85 191
86 smfc->base = devm_ioremap(dev, base, PAGE_SIZE); 192 priv->base = devm_ioremap(dev, base, PAGE_SIZE);
87 if (!smfc->base) 193 if (!priv->base)
88 return -ENOMEM; 194 return -ENOMEM;
89 195
90 pr_debug("%s: ioremap 0x%08lx -> %p\n", __func__, base, smfc->base); 196 for (i = 0; i < 4; i++) {
197 priv->channel[i].priv = priv;
198 priv->channel[i].chno = i;
199 }
200
201 pr_debug("%s: ioremap 0x%08lx -> %p\n", __func__, base, priv->base);
91 202
92 return 0; 203 return 0;
93} 204}
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index b18847827759..9cb222e2996f 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -531,6 +531,7 @@ static struct drm_driver imx_drm_driver = {
531 .unload = imx_drm_driver_unload, 531 .unload = imx_drm_driver_unload,
532 .lastclose = imx_drm_driver_lastclose, 532 .lastclose = imx_drm_driver_lastclose,
533 .preclose = imx_drm_driver_preclose, 533 .preclose = imx_drm_driver_preclose,
534 .set_busid = drm_platform_set_busid,
534 .gem_free_object = drm_gem_cma_free_object, 535 .gem_free_object = drm_gem_cma_free_object,
535 .gem_vm_ops = &drm_gem_cma_vm_ops, 536 .gem_vm_ops = &drm_gem_cma_vm_ops,
536 .dumb_create = drm_gem_cma_dumb_create, 537 .dumb_create = drm_gem_cma_dumb_create,
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index e911b9c96e19..ccbe2ae22ac5 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -4,6 +4,7 @@
4 4
5menuconfig FB 5menuconfig FB
6 tristate "Support for frame buffer devices" 6 tristate "Support for frame buffer devices"
7 select FB_CMDLINE
7 ---help--- 8 ---help---
8 The frame buffer device provides an abstraction for the graphics 9 The frame buffer device provides an abstraction for the graphics
9 hardware. It represents the frame buffer of some video hardware and 10 hardware. It represents the frame buffer of some video hardware and
@@ -52,6 +53,9 @@ config FIRMWARE_EDID
52 combination with certain motherboards and monitors are known to 53 combination with certain motherboards and monitors are known to
53 suffer from this problem. 54 suffer from this problem.
54 55
56config FB_CMDLINE
57 bool
58
55config FB_DDC 59config FB_DDC
56 tristate 60 tristate
57 depends on FB 61 depends on FB
diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
index fa306538dac2..67f28e20a892 100644
--- a/drivers/video/fbdev/core/Makefile
+++ b/drivers/video/fbdev/core/Makefile
@@ -1,4 +1,5 @@
1obj-y += fb_notify.o 1obj-y += fb_notify.o
2obj-$(CONFIG_FB_CMDLINE) += fb_cmdline.o
2obj-$(CONFIG_FB) += fb.o 3obj-$(CONFIG_FB) += fb.o
3fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \ 4fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
4 modedb.o fbcvt.o 5 modedb.o fbcvt.o
diff --git a/drivers/video/fbdev/core/fb_cmdline.c b/drivers/video/fbdev/core/fb_cmdline.c
new file mode 100644
index 000000000000..39509ccd92f1
--- /dev/null
+++ b/drivers/video/fbdev/core/fb_cmdline.c
@@ -0,0 +1,110 @@
1/*
2 * linux/drivers/video/fb_cmdline.c
3 *
4 * Copyright (C) 2014 Intel Corp
5 * Copyright (C) 1994 Martin Schaller
6 *
7 * 2001 - Documented with DocBook
8 * - Brad Douglas <brad@neruo.com>
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive
12 * for more details.
13 *
14 * Authors:
15 * Vetter <danie.vetter@ffwll.ch>
16 */
17#include <linux/init.h>
18#include <linux/fb.h>
19
20static char *video_options[FB_MAX] __read_mostly;
21static int ofonly __read_mostly;
22
23const char *fb_mode_option;
24EXPORT_SYMBOL_GPL(fb_mode_option);
25
26/**
27 * fb_get_options - get kernel boot parameters
28 * @name: framebuffer name as it would appear in
29 * the boot parameter line
30 * (video=<name>:<options>)
31 * @option: the option will be stored here
32 *
33 * NOTE: Needed to maintain backwards compatibility
34 */
35int fb_get_options(const char *name, char **option)
36{
37 char *opt, *options = NULL;
38 int retval = 0;
39 int name_len = strlen(name), i;
40
41 if (name_len && ofonly && strncmp(name, "offb", 4))
42 retval = 1;
43
44 if (name_len && !retval) {
45 for (i = 0; i < FB_MAX; i++) {
46 if (video_options[i] == NULL)
47 continue;
48 if (!video_options[i][0])
49 continue;
50 opt = video_options[i];
51 if (!strncmp(name, opt, name_len) &&
52 opt[name_len] == ':')
53 options = opt + name_len + 1;
54 }
55 }
56 /* No match, pass global option */
57 if (!options && option && fb_mode_option)
58 options = kstrdup(fb_mode_option, GFP_KERNEL);
59 if (options && !strncmp(options, "off", 3))
60 retval = 1;
61
62 if (option)
63 *option = options;
64
65 return retval;
66}
67EXPORT_SYMBOL(fb_get_options);
68
69/**
70 * video_setup - process command line options
71 * @options: string of options
72 *
73 * Process command line options for frame buffer subsystem.
74 *
75 * NOTE: This function is a __setup and __init function.
76 * It only stores the options. Drivers have to call
77 * fb_get_options() as necessary.
78 *
79 * Returns zero.
80 *
81 */
82static int __init video_setup(char *options)
83{
84 int i, global = 0;
85
86 if (!options || !*options)
87 global = 1;
88
89 if (!global && !strncmp(options, "ofonly", 6)) {
90 ofonly = 1;
91 global = 1;
92 }
93
94 if (!global && !strchr(options, ':')) {
95 fb_mode_option = options;
96 global = 1;
97 }
98
99 if (!global) {
100 for (i = 0; i < FB_MAX; i++) {
101 if (video_options[i] == NULL) {
102 video_options[i] = options;
103 break;
104 }
105 }
106 }
107
108 return 1;
109}
110__setup("video=", video_setup);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index b5e85f6c1c26..0705d8883ede 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1908,96 +1908,4 @@ int fb_new_modelist(struct fb_info *info)
1908 return err; 1908 return err;
1909} 1909}
1910 1910
1911static char *video_options[FB_MAX] __read_mostly;
1912static int ofonly __read_mostly;
1913
1914/**
1915 * fb_get_options - get kernel boot parameters
1916 * @name: framebuffer name as it would appear in
1917 * the boot parameter line
1918 * (video=<name>:<options>)
1919 * @option: the option will be stored here
1920 *
1921 * NOTE: Needed to maintain backwards compatibility
1922 */
1923int fb_get_options(const char *name, char **option)
1924{
1925 char *opt, *options = NULL;
1926 int retval = 0;
1927 int name_len = strlen(name), i;
1928
1929 if (name_len && ofonly && strncmp(name, "offb", 4))
1930 retval = 1;
1931
1932 if (name_len && !retval) {
1933 for (i = 0; i < FB_MAX; i++) {
1934 if (video_options[i] == NULL)
1935 continue;
1936 if (!video_options[i][0])
1937 continue;
1938 opt = video_options[i];
1939 if (!strncmp(name, opt, name_len) &&
1940 opt[name_len] == ':')
1941 options = opt + name_len + 1;
1942 }
1943 }
1944 /* No match, pass global option */
1945 if (!options && option && fb_mode_option)
1946 options = kstrdup(fb_mode_option, GFP_KERNEL);
1947 if (options && !strncmp(options, "off", 3))
1948 retval = 1;
1949
1950 if (option)
1951 *option = options;
1952
1953 return retval;
1954}
1955EXPORT_SYMBOL(fb_get_options);
1956
1957#ifndef MODULE
1958/**
1959 * video_setup - process command line options
1960 * @options: string of options
1961 *
1962 * Process command line options for frame buffer subsystem.
1963 *
1964 * NOTE: This function is a __setup and __init function.
1965 * It only stores the options. Drivers have to call
1966 * fb_get_options() as necessary.
1967 *
1968 * Returns zero.
1969 *
1970 */
1971static int __init video_setup(char *options)
1972{
1973 int i, global = 0;
1974
1975 if (!options || !*options)
1976 global = 1;
1977
1978 if (!global && !strncmp(options, "ofonly", 6)) {
1979 ofonly = 1;
1980 global = 1;
1981 }
1982
1983 if (!global && !strchr(options, ':')) {
1984 fb_mode_option = options;
1985 global = 1;
1986 }
1987
1988 if (!global) {
1989 for (i = 0; i < FB_MAX; i++) {
1990 if (video_options[i] == NULL) {
1991 video_options[i] = options;
1992 break;
1993 }
1994
1995 }
1996 }
1997
1998 return 1;
1999}
2000__setup("video=", video_setup);
2001#endif
2002
2003MODULE_LICENSE("GPL"); 1911MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index a9a907c440d7..388f7971494b 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -29,9 +29,6 @@
29#define DPRINTK(fmt, args...) 29#define DPRINTK(fmt, args...)
30#endif 30#endif
31 31
32const char *fb_mode_option;
33EXPORT_SYMBOL_GPL(fb_mode_option);
34
35/* 32/*
36 * Standard video mode definitions (taken from XFree86) 33 * Standard video mode definitions (taken from XFree86)
37 */ 34 */
diff --git a/include/drm/ati_pcigart.h b/include/drm/ati_pcigart.h
new file mode 100644
index 000000000000..5765648b5ef7
--- /dev/null
+++ b/include/drm/ati_pcigart.h
@@ -0,0 +1,30 @@
1#ifndef DRM_ATI_PCIGART_H
2#define DRM_ATI_PCIGART_H
3
4#include <drm/drm_legacy.h>
5
6/* location of GART table */
7#define DRM_ATI_GART_MAIN 1
8#define DRM_ATI_GART_FB 2
9
10#define DRM_ATI_GART_PCI 1
11#define DRM_ATI_GART_PCIE 2
12#define DRM_ATI_GART_IGP 3
13
14struct drm_ati_pcigart_info {
15 int gart_table_location;
16 int gart_reg_if;
17 void *addr;
18 dma_addr_t bus_addr;
19 dma_addr_t table_mask;
20 struct drm_dma_handle *table_handle;
21 struct drm_local_map mapping;
22 int table_size;
23};
24
25extern int drm_ati_pcigart_init(struct drm_device *dev,
26 struct drm_ati_pcigart_info * gart_info);
27extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
28 struct drm_ati_pcigart_info * gart_info);
29
30#endif
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 196890735367..53ed87698a74 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1,17 +1,14 @@
1/**
2 * \file drmP.h
3 * Private header for Direct Rendering Manager
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/* 1/*
2 * Internal Header for the Direct Rendering Manager
3 *
10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * Copyright (c) 2009-2010, Code Aurora Forum. 6 * Copyright (c) 2009-2010, Code Aurora Forum.
13 * All rights reserved. 7 * All rights reserved.
14 * 8 *
9 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
10 * Author: Gareth Hughes <gareth@valinux.com>
11 *
15 * Permission is hereby granted, free of charge, to any person obtaining a 12 * Permission is hereby granted, free of charge, to any person obtaining a
16 * copy of this software and associated documentation files (the "Software"), 13 * copy of this software and associated documentation files (the "Software"),
17 * to deal in the Software without restriction, including without limitation 14 * to deal in the Software without restriction, including without limitation
@@ -35,59 +32,62 @@
35#ifndef _DRM_P_H_ 32#ifndef _DRM_P_H_
36#define _DRM_P_H_ 33#define _DRM_P_H_
37 34
38#ifdef __KERNEL__ 35#include <linux/agp_backend.h>
39#ifdef __alpha__ 36#include <linux/cdev.h>
40/* add include of current.h so that "current" is defined 37#include <linux/dma-mapping.h>
41 * before static inline funcs in wait.h. Doing this so we 38#include <linux/file.h>
42 * can build the DRM (part of PI DRI). 4/21/2000 S + B */
43#include <asm/current.h>
44#endif /* __alpha__ */
45#include <linux/kernel.h>
46#include <linux/kref.h>
47#include <linux/miscdevice.h>
48#include <linux/fs.h> 39#include <linux/fs.h>
40#include <linux/highmem.h>
41#include <linux/idr.h>
49#include <linux/init.h> 42#include <linux/init.h>
50#include <linux/file.h> 43#include <linux/io.h>
51#include <linux/platform_device.h>
52#include <linux/pci.h>
53#include <linux/jiffies.h> 44#include <linux/jiffies.h>
54#include <linux/dma-mapping.h> 45#include <linux/kernel.h>
46#include <linux/kref.h>
47#include <linux/miscdevice.h>
55#include <linux/mm.h> 48#include <linux/mm.h>
56#include <linux/cdev.h>
57#include <linux/mutex.h> 49#include <linux/mutex.h>
58#include <linux/io.h> 50#include <linux/pci.h>
59#include <linux/slab.h> 51#include <linux/platform_device.h>
52#include <linux/poll.h>
60#include <linux/ratelimit.h> 53#include <linux/ratelimit.h>
61#if defined(__alpha__) || defined(__powerpc__) 54#include <linux/sched.h>
62#include <asm/pgtable.h> /* For pte_wrprotect */ 55#include <linux/slab.h>
63#endif
64#include <asm/mman.h>
65#include <asm/uaccess.h>
66#include <linux/types.h> 56#include <linux/types.h>
67#include <linux/agp_backend.h> 57#include <linux/vmalloc.h>
68#include <linux/workqueue.h> 58#include <linux/workqueue.h>
69#include <linux/poll.h> 59
60#include <asm/mman.h>
70#include <asm/pgalloc.h> 61#include <asm/pgalloc.h>
71#include <drm/drm.h> 62#include <asm/uaccess.h>
72#include <drm/drm_sarea.h>
73#include <drm/drm_vma_manager.h>
74 63
75#include <linux/idr.h> 64#include <uapi/drm/drm.h>
65#include <uapi/drm/drm_mode.h>
76 66
77#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) 67#include <drm/drm_agpsupport.h>
68#include <drm/drm_crtc.h>
69#include <drm/drm_global.h>
70#include <drm/drm_hashtab.h>
71#include <drm/drm_mem_util.h>
72#include <drm/drm_mm.h>
73#include <drm/drm_os_linux.h>
74#include <drm/drm_sarea.h>
75#include <drm/drm_vma_manager.h>
78 76
79struct module; 77struct module;
80 78
81struct drm_file; 79struct drm_file;
82struct drm_device; 80struct drm_device;
81struct drm_agp_head;
82struct drm_local_map;
83struct drm_device_dma;
84struct drm_dma_handle;
85struct drm_gem_object;
83 86
84struct device_node; 87struct device_node;
85struct videomode; 88struct videomode;
86struct reservation_object; 89struct reservation_object;
87 90struct dma_buf_attachment;
88#include <drm/drm_os_linux.h>
89#include <drm/drm_hashtab.h>
90#include <drm/drm_mm.h>
91 91
92/* 92/*
93 * 4 debug categories are defined: 93 * 4 debug categories are defined:
@@ -126,7 +126,7 @@ extern __printf(2, 3)
126void drm_ut_debug_printk(const char *function_name, 126void drm_ut_debug_printk(const char *function_name,
127 const char *format, ...); 127 const char *format, ...);
128extern __printf(2, 3) 128extern __printf(2, 3)
129int drm_err(const char *func, const char *format, ...); 129void drm_err(const char *func, const char *format, ...);
130 130
131/***********************************************************************/ 131/***********************************************************************/
132/** \name DRM template customization defaults */ 132/** \name DRM template customization defaults */
@@ -145,19 +145,6 @@ int drm_err(const char *func, const char *format, ...);
145#define DRIVER_RENDER 0x8000 145#define DRIVER_RENDER 0x8000
146 146
147/***********************************************************************/ 147/***********************************************************************/
148/** \name Begin the DRM... */
149/*@{*/
150
151#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then
152 also include looping detection. */
153
154#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
155
156#define DRM_MAP_HASH_OFFSET 0x10000000
157
158/*@}*/
159
160/***********************************************************************/
161/** \name Macros to make printk easier */ 148/** \name Macros to make printk easier */
162/*@{*/ 149/*@{*/
163 150
@@ -198,7 +185,6 @@ int drm_err(const char *func, const char *format, ...);
198 * \param fmt printf() like format string. 185 * \param fmt printf() like format string.
199 * \param arg arguments 186 * \param arg arguments
200 */ 187 */
201#if DRM_DEBUG_CODE
202#define DRM_DEBUG(fmt, args...) \ 188#define DRM_DEBUG(fmt, args...) \
203 do { \ 189 do { \
204 if (unlikely(drm_debug & DRM_UT_CORE)) \ 190 if (unlikely(drm_debug & DRM_UT_CORE)) \
@@ -220,12 +206,6 @@ int drm_err(const char *func, const char *format, ...);
220 if (unlikely(drm_debug & DRM_UT_PRIME)) \ 206 if (unlikely(drm_debug & DRM_UT_PRIME)) \
221 drm_ut_debug_printk(__func__, fmt, ##args); \ 207 drm_ut_debug_printk(__func__, fmt, ##args); \
222 } while (0) 208 } while (0)
223#else
224#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
225#define DRM_DEBUG_KMS(fmt, args...) do { } while (0)
226#define DRM_DEBUG_PRIME(fmt, args...) do { } while (0)
227#define DRM_DEBUG(fmt, arg...) do { } while (0)
228#endif
229 209
230/*@}*/ 210/*@}*/
231 211
@@ -236,23 +216,6 @@ int drm_err(const char *func, const char *format, ...);
236#define DRM_IF_VERSION(maj, min) (maj << 16 | min) 216#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
237 217
238/** 218/**
239 * Test that the hardware lock is held by the caller, returning otherwise.
240 *
241 * \param dev DRM device.
242 * \param filp file pointer of the caller.
243 */
244#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
245do { \
246 if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
247 _file_priv->master->lock.file_priv != _file_priv) { \
248 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
249 __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
250 _file_priv->master->lock.file_priv, _file_priv); \
251 return -EINVAL; \
252 } \
253} while (0)
254
255/**
256 * Ioctl function type. 219 * Ioctl function type.
257 * 220 *
258 * \param inode device inode. 221 * \param inode device inode.
@@ -292,80 +255,6 @@ struct drm_ioctl_desc {
292#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ 255#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
293 [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl} 256 [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
294 257
295struct drm_magic_entry {
296 struct list_head head;
297 struct drm_hash_item hash_item;
298 struct drm_file *priv;
299};
300
301struct drm_vma_entry {
302 struct list_head head;
303 struct vm_area_struct *vma;
304 pid_t pid;
305};
306
307/**
308 * DMA buffer.
309 */
310struct drm_buf {
311 int idx; /**< Index into master buflist */
312 int total; /**< Buffer size */
313 int order; /**< log-base-2(total) */
314 int used; /**< Amount of buffer in use (for DMA) */
315 unsigned long offset; /**< Byte offset (used internally) */
316 void *address; /**< Address of buffer */
317 unsigned long bus_address; /**< Bus address of buffer */
318 struct drm_buf *next; /**< Kernel-only: used for free list */
319 __volatile__ int waiting; /**< On kernel DMA queue */
320 __volatile__ int pending; /**< On hardware DMA queue */
321 struct drm_file *file_priv; /**< Private of holding file descr */
322 int context; /**< Kernel queue for this buffer */
323 int while_locked; /**< Dispatch this buffer while locked */
324 enum {
325 DRM_LIST_NONE = 0,
326 DRM_LIST_FREE = 1,
327 DRM_LIST_WAIT = 2,
328 DRM_LIST_PEND = 3,
329 DRM_LIST_PRIO = 4,
330 DRM_LIST_RECLAIM = 5
331 } list; /**< Which list we're on */
332
333 int dev_priv_size; /**< Size of buffer private storage */
334 void *dev_private; /**< Per-buffer private storage */
335};
336
337/** bufs is one longer than it has to be */
338struct drm_waitlist {
339 int count; /**< Number of possible buffers */
340 struct drm_buf **bufs; /**< List of pointers to buffers */
341 struct drm_buf **rp; /**< Read pointer */
342 struct drm_buf **wp; /**< Write pointer */
343 struct drm_buf **end; /**< End pointer */
344 spinlock_t read_lock;
345 spinlock_t write_lock;
346};
347
348typedef struct drm_dma_handle {
349 dma_addr_t busaddr;
350 void *vaddr;
351 size_t size;
352} drm_dma_handle_t;
353
354/**
355 * Buffer entry. There is one of this for each buffer size order.
356 */
357struct drm_buf_entry {
358 int buf_size; /**< size */
359 int buf_count; /**< number of buffers */
360 struct drm_buf *buflist; /**< buffer list */
361 int seg_count;
362 int page_order;
363 struct drm_dma_handle **seglist;
364
365 int low_mark; /**< Low water mark */
366 int high_mark; /**< High water mark */
367};
368
369/* Event queued up for userspace to read */ 258/* Event queued up for userspace to read */
370struct drm_pending_event { 259struct drm_pending_event {
371 struct drm_event *event; 260 struct drm_event *event;
@@ -444,214 +333,12 @@ struct drm_lock_data {
444}; 333};
445 334
446/** 335/**
447 * DMA data.
448 */
449struct drm_device_dma {
450
451 struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
452 int buf_count; /**< total number of buffers */
453 struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
454 int seg_count;
455 int page_count; /**< number of pages */
456 unsigned long *pagelist; /**< page list */
457 unsigned long byte_count;
458 enum {
459 _DRM_DMA_USE_AGP = 0x01,
460 _DRM_DMA_USE_SG = 0x02,
461 _DRM_DMA_USE_FB = 0x04,
462 _DRM_DMA_USE_PCI_RO = 0x08
463 } flags;
464
465};
466
467/**
468 * AGP memory entry. Stored as a doubly linked list.
469 */
470struct drm_agp_mem {
471 unsigned long handle; /**< handle */
472 struct agp_memory *memory;
473 unsigned long bound; /**< address */
474 int pages;
475 struct list_head head;
476};
477
478/**
479 * AGP data.
480 *
481 * \sa drm_agp_init() and drm_device::agp.
482 */
483struct drm_agp_head {
484 struct agp_kern_info agp_info; /**< AGP device information */
485 struct list_head memory;
486 unsigned long mode; /**< AGP mode */
487 struct agp_bridge_data *bridge;
488 int enabled; /**< whether the AGP bus as been enabled */
489 int acquired; /**< whether the AGP device has been acquired */
490 unsigned long base;
491 int agp_mtrr;
492 int cant_use_aperture;
493 unsigned long page_mask;
494};
495
496/**
497 * Scatter-gather memory.
498 */
499struct drm_sg_mem {
500 unsigned long handle;
501 void *virtual;
502 int pages;
503 struct page **pagelist;
504 dma_addr_t *busaddr;
505};
506
507struct drm_sigdata {
508 int context;
509 struct drm_hw_lock *lock;
510};
511
512
513/**
514 * Kernel side of a mapping
515 */
516struct drm_local_map {
517 resource_size_t offset; /**< Requested physical address (0 for SAREA)*/
518 unsigned long size; /**< Requested physical size (bytes) */
519 enum drm_map_type type; /**< Type of memory to map */
520 enum drm_map_flags flags; /**< Flags */
521 void *handle; /**< User-space: "Handle" to pass to mmap() */
522 /**< Kernel-space: kernel-virtual address */
523 int mtrr; /**< MTRR slot used */
524};
525
526typedef struct drm_local_map drm_local_map_t;
527
528/**
529 * Mappings list
530 */
531struct drm_map_list {
532 struct list_head head; /**< list head */
533 struct drm_hash_item hash;
534 struct drm_local_map *map; /**< mapping */
535 uint64_t user_token;
536 struct drm_master *master;
537};
538
539/* location of GART table */
540#define DRM_ATI_GART_MAIN 1
541#define DRM_ATI_GART_FB 2
542
543#define DRM_ATI_GART_PCI 1
544#define DRM_ATI_GART_PCIE 2
545#define DRM_ATI_GART_IGP 3
546
547struct drm_ati_pcigart_info {
548 int gart_table_location;
549 int gart_reg_if;
550 void *addr;
551 dma_addr_t bus_addr;
552 dma_addr_t table_mask;
553 struct drm_dma_handle *table_handle;
554 struct drm_local_map mapping;
555 int table_size;
556};
557
558/**
559 * This structure defines the drm_mm memory object, which will be used by the
560 * DRM for its buffer objects.
561 */
562struct drm_gem_object {
563 /** Reference count of this object */
564 struct kref refcount;
565
566 /**
567 * handle_count - gem file_priv handle count of this object
568 *
569 * Each handle also holds a reference. Note that when the handle_count
570 * drops to 0 any global names (e.g. the id in the flink namespace) will
571 * be cleared.
572 *
573 * Protected by dev->object_name_lock.
574 * */
575 unsigned handle_count;
576
577 /** Related drm device */
578 struct drm_device *dev;
579
580 /** File representing the shmem storage */
581 struct file *filp;
582
583 /* Mapping info for this object */
584 struct drm_vma_offset_node vma_node;
585
586 /**
587 * Size of the object, in bytes. Immutable over the object's
588 * lifetime.
589 */
590 size_t size;
591
592 /**
593 * Global name for this object, starts at 1. 0 means unnamed.
594 * Access is covered by the object_name_lock in the related drm_device
595 */
596 int name;
597
598 /**
599 * Memory domains. These monitor which caches contain read/write data
600 * related to the object. When transitioning from one set of domains
601 * to another, the driver is called to ensure that caches are suitably
602 * flushed and invalidated
603 */
604 uint32_t read_domains;
605 uint32_t write_domain;
606
607 /**
608 * While validating an exec operation, the
609 * new read/write domain values are computed here.
610 * They will be transferred to the above values
611 * at the point that any cache flushing occurs
612 */
613 uint32_t pending_read_domains;
614 uint32_t pending_write_domain;
615
616 /**
617 * dma_buf - dma buf associated with this GEM object
618 *
619 * Pointer to the dma-buf associated with this gem object (either
620 * through importing or exporting). We break the resulting reference
621 * loop when the last gem handle for this object is released.
622 *
623 * Protected by obj->object_name_lock
624 */
625 struct dma_buf *dma_buf;
626
627 /**
628 * import_attach - dma buf attachment backing this object
629 *
630 * Any foreign dma_buf imported as a gem object has this set to the
631 * attachment point for the device. This is invariant over the lifetime
632 * of a gem object.
633 *
634 * The driver's ->gem_free_object callback is responsible for cleaning
635 * up the dma_buf attachment and references acquired at import time.
636 *
637 * Note that the drm gem/prime core does not depend upon drivers setting
638 * this field any more. So for drivers where this doesn't make sense
639 * (e.g. virtual devices or a displaylink behind an usb bus) they can
640 * simply leave it as NULL.
641 */
642 struct dma_buf_attachment *import_attach;
643};
644
645#include <drm/drm_crtc.h>
646
647/**
648 * struct drm_master - drm master structure 336 * struct drm_master - drm master structure
649 * 337 *
650 * @refcount: Refcount for this master object. 338 * @refcount: Refcount for this master object.
651 * @minor: Link back to minor char device we are master for. Immutable. 339 * @minor: Link back to minor char device we are master for. Immutable.
652 * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex. 340 * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
653 * @unique_len: Length of unique field. Protected by drm_global_mutex. 341 * @unique_len: Length of unique field. Protected by drm_global_mutex.
654 * @unique_size: Amount allocated. Protected by drm_global_mutex.
655 * @magiclist: Hash of used authentication tokens. Protected by struct_mutex. 342 * @magiclist: Hash of used authentication tokens. Protected by struct_mutex.
656 * @magicfree: List of used authentication tokens. Protected by struct_mutex. 343 * @magicfree: List of used authentication tokens. Protected by struct_mutex.
657 * @lock: DRI lock information. 344 * @lock: DRI lock information.
@@ -662,7 +349,6 @@ struct drm_master {
662 struct drm_minor *minor; 349 struct drm_minor *minor;
663 char *unique; 350 char *unique;
664 int unique_len; 351 int unique_len;
665 int unique_size;
666 struct drm_open_hash magiclist; 352 struct drm_open_hash magiclist;
667 struct list_head magicfree; 353 struct list_head magicfree;
668 struct drm_lock_data lock; 354 struct drm_lock_data lock;
@@ -677,17 +363,13 @@ struct drm_master {
677/* Flags and return codes for get_vblank_timestamp() driver function. */ 363/* Flags and return codes for get_vblank_timestamp() driver function. */
678#define DRM_CALLED_FROM_VBLIRQ 1 364#define DRM_CALLED_FROM_VBLIRQ 1
679#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) 365#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
680#define DRM_VBLANKTIME_INVBL (1 << 1) 366#define DRM_VBLANKTIME_IN_VBLANK (1 << 1)
681 367
682/* get_scanout_position() return flags */ 368/* get_scanout_position() return flags */
683#define DRM_SCANOUTPOS_VALID (1 << 0) 369#define DRM_SCANOUTPOS_VALID (1 << 0)
684#define DRM_SCANOUTPOS_INVBL (1 << 1) 370#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
685#define DRM_SCANOUTPOS_ACCURATE (1 << 2) 371#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
686 372
687struct drm_bus {
688 int (*set_busid)(struct drm_device *dev, struct drm_master *master);
689};
690
691/** 373/**
692 * DRM driver structure. This structure represent the common code for 374 * DRM driver structure. This structure represent the common code for
693 * a family of cards. There will one drm_device for each card present 375 * a family of cards. There will one drm_device for each card present
@@ -706,6 +388,7 @@ struct drm_driver {
706 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); 388 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
707 int (*dma_quiescent) (struct drm_device *); 389 int (*dma_quiescent) (struct drm_device *);
708 int (*context_dtor) (struct drm_device *dev, int context); 390 int (*context_dtor) (struct drm_device *dev, int context);
391 int (*set_busid)(struct drm_device *dev, struct drm_master *master);
709 392
710 /** 393 /**
711 * get_vblank_counter - get raw hardware vblank counter 394 * get_vblank_counter - get raw hardware vblank counter
@@ -888,7 +571,8 @@ struct drm_driver {
888 struct drm_gem_object *obj); 571 struct drm_gem_object *obj);
889 struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); 572 struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
890 struct drm_gem_object *(*gem_prime_import_sg_table)( 573 struct drm_gem_object *(*gem_prime_import_sg_table)(
891 struct drm_device *dev, size_t size, 574 struct drm_device *dev,
575 struct dma_buf_attachment *attach,
892 struct sg_table *sgt); 576 struct sg_table *sgt);
893 void *(*gem_prime_vmap)(struct drm_gem_object *obj); 577 void *(*gem_prime_vmap)(struct drm_gem_object *obj);
894 void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); 578 void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
@@ -924,7 +608,6 @@ struct drm_driver {
924 const struct drm_ioctl_desc *ioctls; 608 const struct drm_ioctl_desc *ioctls;
925 int num_ioctls; 609 int num_ioctls;
926 const struct file_operations *fops; 610 const struct file_operations *fops;
927 struct drm_bus *bus;
928 611
929 /* List of devices hanging off this driver with stealth attach. */ 612 /* List of devices hanging off this driver with stealth attach. */
930 struct list_head legacy_dev_list; 613 struct list_head legacy_dev_list;
@@ -1079,6 +762,16 @@ struct drm_device {
1079 */ 762 */
1080 bool vblank_disable_allowed; 763 bool vblank_disable_allowed;
1081 764
765 /*
766 * If true, vblank interrupt will be disabled immediately when the
767 * refcount drops to zero, as opposed to via the vblank disable
768 * timer.
769 * This can be set to true it the hardware has a working vblank
770 * counter and the driver uses drm_vblank_on() and drm_vblank_off()
771 * appropriately.
772 */
773 bool vblank_disable_immediate;
774
1082 /* array of size num_crtcs */ 775 /* array of size num_crtcs */
1083 struct drm_vblank_crtc *vblank; 776 struct drm_vblank_crtc *vblank;
1084 777
@@ -1103,13 +796,16 @@ struct drm_device {
1103#endif 796#endif
1104 797
1105 struct platform_device *platformdev; /**< Platform device struture */ 798 struct platform_device *platformdev; /**< Platform device struture */
1106 struct usb_device *usbdev;
1107 799
1108 struct drm_sg_mem *sg; /**< Scatter gather memory */ 800 struct drm_sg_mem *sg; /**< Scatter gather memory */
1109 unsigned int num_crtcs; /**< Number of CRTCs on this device */ 801 unsigned int num_crtcs; /**< Number of CRTCs on this device */
1110 struct drm_sigdata sigdata; /**< For block_all_signals */
1111 sigset_t sigmask; 802 sigset_t sigmask;
1112 803
804 struct {
805 int context;
806 struct drm_hw_lock *lock;
807 } sigdata;
808
1113 struct drm_local_map *agp_buffer_map; 809 struct drm_local_map *agp_buffer_map;
1114 unsigned int agp_buffer_token; 810 unsigned int agp_buffer_token;
1115 811
@@ -1172,112 +868,32 @@ extern long drm_ioctl(struct file *filp,
1172 unsigned int cmd, unsigned long arg); 868 unsigned int cmd, unsigned long arg);
1173extern long drm_compat_ioctl(struct file *filp, 869extern long drm_compat_ioctl(struct file *filp,
1174 unsigned int cmd, unsigned long arg); 870 unsigned int cmd, unsigned long arg);
1175extern int drm_lastclose(struct drm_device *dev);
1176extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags); 871extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
1177 872
1178 /* Device support (drm_fops.h) */ 873 /* Device support (drm_fops.h) */
1179extern struct mutex drm_global_mutex;
1180extern int drm_open(struct inode *inode, struct file *filp); 874extern int drm_open(struct inode *inode, struct file *filp);
1181extern ssize_t drm_read(struct file *filp, char __user *buffer, 875extern ssize_t drm_read(struct file *filp, char __user *buffer,
1182 size_t count, loff_t *offset); 876 size_t count, loff_t *offset);
1183extern int drm_release(struct inode *inode, struct file *filp); 877extern int drm_release(struct inode *inode, struct file *filp);
1184 878
1185 /* Mapping support (drm_vm.h) */ 879 /* Mapping support (drm_vm.h) */
1186extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
1187extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
1188extern void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
1189extern void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
1190extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); 880extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
1191 881
1192 /* Memory management support (drm_memory.h) */ 882/* Misc. IOCTL support (drm_ioctl.c) */
1193#include <drm/drm_memory.h> 883int drm_noop(struct drm_device *dev, void *data,
1194 884 struct drm_file *file_priv);
1195
1196 /* Misc. IOCTL support (drm_ioctl.h) */
1197extern int drm_irq_by_busid(struct drm_device *dev, void *data,
1198 struct drm_file *file_priv);
1199extern int drm_getunique(struct drm_device *dev, void *data,
1200 struct drm_file *file_priv);
1201extern int drm_setunique(struct drm_device *dev, void *data,
1202 struct drm_file *file_priv);
1203extern int drm_getmap(struct drm_device *dev, void *data,
1204 struct drm_file *file_priv);
1205extern int drm_getclient(struct drm_device *dev, void *data,
1206 struct drm_file *file_priv);
1207extern int drm_getstats(struct drm_device *dev, void *data,
1208 struct drm_file *file_priv);
1209extern int drm_getcap(struct drm_device *dev, void *data,
1210 struct drm_file *file_priv);
1211extern int drm_setclientcap(struct drm_device *dev, void *data,
1212 struct drm_file *file_priv);
1213extern int drm_setversion(struct drm_device *dev, void *data,
1214 struct drm_file *file_priv);
1215extern int drm_noop(struct drm_device *dev, void *data,
1216 struct drm_file *file_priv);
1217
1218 /* Authentication IOCTL support (drm_auth.h) */
1219extern int drm_getmagic(struct drm_device *dev, void *data,
1220 struct drm_file *file_priv);
1221extern int drm_authmagic(struct drm_device *dev, void *data,
1222 struct drm_file *file_priv);
1223extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
1224 885
1225/* Cache management (drm_cache.c) */ 886/* Cache management (drm_cache.c) */
1226void drm_clflush_pages(struct page *pages[], unsigned long num_pages); 887void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
1227void drm_clflush_sg(struct sg_table *st); 888void drm_clflush_sg(struct sg_table *st);
1228void drm_clflush_virt_range(void *addr, unsigned long length); 889void drm_clflush_virt_range(void *addr, unsigned long length);
1229 890
1230 /* Locking IOCTL support (drm_lock.h) */
1231extern int drm_lock(struct drm_device *dev, void *data,
1232 struct drm_file *file_priv);
1233extern int drm_unlock(struct drm_device *dev, void *data,
1234 struct drm_file *file_priv);
1235extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
1236extern void drm_idlelock_take(struct drm_lock_data *lock_data);
1237extern void drm_idlelock_release(struct drm_lock_data *lock_data);
1238
1239/* 891/*
1240 * These are exported to drivers so that they can implement fencing using 892 * These are exported to drivers so that they can implement fencing using
1241 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. 893 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
1242 */ 894 */
1243 895
1244extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
1245
1246 /* Buffer management support (drm_bufs.h) */
1247extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
1248extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
1249extern int drm_addmap(struct drm_device *dev, resource_size_t offset,
1250 unsigned int size, enum drm_map_type type,
1251 enum drm_map_flags flags, struct drm_local_map **map_ptr);
1252extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
1253 struct drm_file *file_priv);
1254extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map);
1255extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map);
1256extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
1257 struct drm_file *file_priv);
1258extern int drm_addbufs(struct drm_device *dev, void *data,
1259 struct drm_file *file_priv);
1260extern int drm_infobufs(struct drm_device *dev, void *data,
1261 struct drm_file *file_priv);
1262extern int drm_markbufs(struct drm_device *dev, void *data,
1263 struct drm_file *file_priv);
1264extern int drm_freebufs(struct drm_device *dev, void *data,
1265 struct drm_file *file_priv);
1266extern int drm_mapbufs(struct drm_device *dev, void *data,
1267 struct drm_file *file_priv);
1268extern int drm_dma_ioctl(struct drm_device *dev, void *data,
1269 struct drm_file *file_priv);
1270
1271 /* DMA support (drm_dma.h) */
1272extern int drm_legacy_dma_setup(struct drm_device *dev);
1273extern void drm_legacy_dma_takedown(struct drm_device *dev);
1274extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
1275extern void drm_core_reclaim_buffers(struct drm_device *dev,
1276 struct drm_file *filp);
1277
1278 /* IRQ support (drm_irq.h) */ 896 /* IRQ support (drm_irq.h) */
1279extern int drm_control(struct drm_device *dev, void *data,
1280 struct drm_file *file_priv);
1281extern int drm_irq_install(struct drm_device *dev, int irq); 897extern int drm_irq_install(struct drm_device *dev, int irq);
1282extern int drm_irq_uninstall(struct drm_device *dev); 898extern int drm_irq_uninstall(struct drm_device *dev);
1283 899
@@ -1294,14 +910,14 @@ extern int drm_vblank_get(struct drm_device *dev, int crtc);
1294extern void drm_vblank_put(struct drm_device *dev, int crtc); 910extern void drm_vblank_put(struct drm_device *dev, int crtc);
1295extern int drm_crtc_vblank_get(struct drm_crtc *crtc); 911extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
1296extern void drm_crtc_vblank_put(struct drm_crtc *crtc); 912extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
913extern void drm_wait_one_vblank(struct drm_device *dev, int crtc);
914extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
1297extern void drm_vblank_off(struct drm_device *dev, int crtc); 915extern void drm_vblank_off(struct drm_device *dev, int crtc);
1298extern void drm_vblank_on(struct drm_device *dev, int crtc); 916extern void drm_vblank_on(struct drm_device *dev, int crtc);
1299extern void drm_crtc_vblank_off(struct drm_crtc *crtc); 917extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
1300extern void drm_crtc_vblank_on(struct drm_crtc *crtc); 918extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
1301extern void drm_vblank_cleanup(struct drm_device *dev); 919extern void drm_vblank_cleanup(struct drm_device *dev);
1302 920
1303extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
1304 struct timeval *tvblank, unsigned flags);
1305extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 921extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
1306 int crtc, int *max_error, 922 int crtc, int *max_error,
1307 struct timeval *vblank_time, 923 struct timeval *vblank_time,
@@ -1311,23 +927,23 @@ extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
1311extern void drm_calc_timestamping_constants(struct drm_crtc *crtc, 927extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
1312 const struct drm_display_mode *mode); 928 const struct drm_display_mode *mode);
1313 929
930/**
931 * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
932 * @crtc: which CRTC's vblank waitqueue to retrieve
933 *
934 * This function returns a pointer to the vblank waitqueue for the CRTC.
935 * Drivers can use this to implement vblank waits using wait_event() & co.
936 */
937static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
938{
939 return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
940}
1314 941
1315/* Modesetting support */ 942/* Modesetting support */
1316extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); 943extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
1317extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); 944extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
1318extern int drm_modeset_ctl(struct drm_device *dev, void *data,
1319 struct drm_file *file_priv);
1320
1321 /* AGP/GART support (drm_agpsupport.h) */
1322
1323#include <drm/drm_agpsupport.h>
1324 945
1325 /* Stub support (drm_stub.h) */ 946 /* Stub support (drm_stub.h) */
1326extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
1327 struct drm_file *file_priv);
1328extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
1329 struct drm_file *file_priv);
1330struct drm_master *drm_master_create(struct drm_minor *minor);
1331extern struct drm_master *drm_master_get(struct drm_master *master); 947extern struct drm_master *drm_master_get(struct drm_master *master);
1332extern void drm_master_put(struct drm_master **master); 948extern void drm_master_put(struct drm_master **master);
1333 949
@@ -1335,33 +951,14 @@ extern void drm_put_dev(struct drm_device *dev);
1335extern void drm_unplug_dev(struct drm_device *dev); 951extern void drm_unplug_dev(struct drm_device *dev);
1336extern unsigned int drm_debug; 952extern unsigned int drm_debug;
1337 953
1338extern unsigned int drm_vblank_offdelay;
1339extern unsigned int drm_timestamp_precision;
1340extern unsigned int drm_timestamp_monotonic;
1341
1342extern struct class *drm_class;
1343
1344extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
1345
1346 /* Debugfs support */ 954 /* Debugfs support */
1347#if defined(CONFIG_DEBUG_FS) 955#if defined(CONFIG_DEBUG_FS)
1348extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
1349 struct dentry *root);
1350extern int drm_debugfs_create_files(const struct drm_info_list *files, 956extern int drm_debugfs_create_files(const struct drm_info_list *files,
1351 int count, struct dentry *root, 957 int count, struct dentry *root,
1352 struct drm_minor *minor); 958 struct drm_minor *minor);
1353extern int drm_debugfs_remove_files(const struct drm_info_list *files, 959extern int drm_debugfs_remove_files(const struct drm_info_list *files,
1354 int count, struct drm_minor *minor); 960 int count, struct drm_minor *minor);
1355extern int drm_debugfs_cleanup(struct drm_minor *minor);
1356extern int drm_debugfs_connector_add(struct drm_connector *connector);
1357extern void drm_debugfs_connector_remove(struct drm_connector *connector);
1358#else 961#else
1359static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
1360 struct dentry *root)
1361{
1362 return 0;
1363}
1364
1365static inline int drm_debugfs_create_files(const struct drm_info_list *files, 962static inline int drm_debugfs_create_files(const struct drm_info_list *files,
1366 int count, struct dentry *root, 963 int count, struct dentry *root,
1367 struct drm_minor *minor) 964 struct drm_minor *minor)
@@ -1374,31 +971,8 @@ static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
1374{ 971{
1375 return 0; 972 return 0;
1376} 973}
1377
1378static inline int drm_debugfs_cleanup(struct drm_minor *minor)
1379{
1380 return 0;
1381}
1382
1383static inline int drm_debugfs_connector_add(struct drm_connector *connector)
1384{
1385 return 0;
1386}
1387static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
1388{
1389}
1390
1391#endif 974#endif
1392 975
1393 /* Info file support */
1394extern int drm_name_info(struct seq_file *m, void *data);
1395extern int drm_vm_info(struct seq_file *m, void *data);
1396extern int drm_bufs_info(struct seq_file *m, void *data);
1397extern int drm_vblank_info(struct seq_file *m, void *data);
1398extern int drm_clients_info(struct seq_file *m, void* data);
1399extern int drm_gem_name_info(struct seq_file *m, void *data);
1400
1401
1402extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev, 976extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
1403 struct drm_gem_object *obj, int flags); 977 struct drm_gem_object *obj, int flags);
1404extern int drm_gem_prime_handle_to_fd(struct drm_device *dev, 978extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
@@ -1410,150 +984,20 @@ extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
1410 struct drm_file *file_priv, int prime_fd, uint32_t *handle); 984 struct drm_file *file_priv, int prime_fd, uint32_t *handle);
1411extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf); 985extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
1412 986
1413extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
1414 struct drm_file *file_priv);
1415extern int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
1416 struct drm_file *file_priv);
1417
1418extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, 987extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
1419 dma_addr_t *addrs, int max_pages); 988 dma_addr_t *addrs, int max_pages);
1420extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages); 989extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
1421extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); 990extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
1422 991
1423int drm_gem_dumb_destroy(struct drm_file *file,
1424 struct drm_device *dev,
1425 uint32_t handle);
1426
1427void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
1428void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
1429void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
1430
1431#if DRM_DEBUG_CODE
1432extern int drm_vma_info(struct seq_file *m, void *data);
1433#endif
1434 992
1435 /* Scatter Gather Support (drm_scatter.h) */ 993extern struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
1436extern void drm_legacy_sg_cleanup(struct drm_device *dev); 994 size_t align);
1437extern int drm_sg_alloc(struct drm_device *dev, void *data, 995extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
1438 struct drm_file *file_priv);
1439extern int drm_sg_free(struct drm_device *dev, void *data,
1440 struct drm_file *file_priv);
1441
1442 /* ATI PCIGART support (ati_pcigart.h) */
1443extern int drm_ati_pcigart_init(struct drm_device *dev,
1444 struct drm_ati_pcigart_info * gart_info);
1445extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
1446 struct drm_ati_pcigart_info * gart_info);
1447
1448extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
1449 size_t align);
1450extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1451extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1452extern int drm_pci_set_unique(struct drm_device *dev,
1453 struct drm_master *master,
1454 struct drm_unique *u);
1455 996
1456 /* sysfs support (drm_sysfs.c) */ 997 /* sysfs support (drm_sysfs.c) */
1457struct drm_sysfs_class;
1458extern struct class *drm_sysfs_create(struct module *owner, char *name);
1459extern void drm_sysfs_destroy(void);
1460extern struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
1461extern void drm_sysfs_hotplug_event(struct drm_device *dev); 998extern void drm_sysfs_hotplug_event(struct drm_device *dev);
1462extern int drm_sysfs_connector_add(struct drm_connector *connector);
1463extern void drm_sysfs_connector_remove(struct drm_connector *connector);
1464
1465/* Graphics Execution Manager library functions (drm_gem.c) */
1466int drm_gem_init(struct drm_device *dev);
1467void drm_gem_destroy(struct drm_device *dev);
1468void drm_gem_object_release(struct drm_gem_object *obj);
1469void drm_gem_object_free(struct kref *kref);
1470int drm_gem_object_init(struct drm_device *dev,
1471 struct drm_gem_object *obj, size_t size);
1472void drm_gem_private_object_init(struct drm_device *dev,
1473 struct drm_gem_object *obj, size_t size);
1474void drm_gem_vm_open(struct vm_area_struct *vma);
1475void drm_gem_vm_close(struct vm_area_struct *vma);
1476int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1477 struct vm_area_struct *vma);
1478int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
1479
1480#include <drm/drm_global.h>
1481
1482static inline void
1483drm_gem_object_reference(struct drm_gem_object *obj)
1484{
1485 kref_get(&obj->refcount);
1486}
1487
1488static inline void
1489drm_gem_object_unreference(struct drm_gem_object *obj)
1490{
1491 if (obj != NULL)
1492 kref_put(&obj->refcount, drm_gem_object_free);
1493}
1494
1495static inline void
1496drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
1497{
1498 if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
1499 struct drm_device *dev = obj->dev;
1500
1501 mutex_lock(&dev->struct_mutex);
1502 if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
1503 drm_gem_object_free(&obj->refcount);
1504 mutex_unlock(&dev->struct_mutex);
1505 }
1506}
1507
1508int drm_gem_handle_create_tail(struct drm_file *file_priv,
1509 struct drm_gem_object *obj,
1510 u32 *handlep);
1511int drm_gem_handle_create(struct drm_file *file_priv,
1512 struct drm_gem_object *obj,
1513 u32 *handlep);
1514int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
1515 999
1516 1000
1517void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
1518int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
1519int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
1520
1521struct page **drm_gem_get_pages(struct drm_gem_object *obj);
1522void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
1523 bool dirty, bool accessed);
1524
1525struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
1526 struct drm_file *filp,
1527 u32 handle);
1528int drm_gem_close_ioctl(struct drm_device *dev, void *data,
1529 struct drm_file *file_priv);
1530int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1531 struct drm_file *file_priv);
1532int drm_gem_open_ioctl(struct drm_device *dev, void *data,
1533 struct drm_file *file_priv);
1534void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
1535void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
1536
1537extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev);
1538extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
1539extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
1540
1541static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
1542 unsigned int token)
1543{
1544 struct drm_map_list *_entry;
1545 list_for_each_entry(_entry, &dev->maplist, head)
1546 if (_entry->user_token == token)
1547 return _entry->map;
1548 return NULL;
1549}
1550
1551static __inline__ void drm_core_dropmap(struct drm_local_map *map)
1552{
1553}
1554
1555#include <drm/drm_mem_util.h>
1556
1557struct drm_device *drm_dev_alloc(struct drm_driver *driver, 1001struct drm_device *drm_dev_alloc(struct drm_driver *driver,
1558 struct device *parent); 1002 struct device *parent);
1559void drm_dev_ref(struct drm_device *dev); 1003void drm_dev_ref(struct drm_device *dev);
@@ -1587,6 +1031,7 @@ extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
1587extern int drm_get_pci_dev(struct pci_dev *pdev, 1031extern int drm_get_pci_dev(struct pci_dev *pdev,
1588 const struct pci_device_id *ent, 1032 const struct pci_device_id *ent,
1589 struct drm_driver *driver); 1033 struct drm_driver *driver);
1034extern int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
1590 1035
1591#define DRM_PCIE_SPEED_25 1 1036#define DRM_PCIE_SPEED_25 1
1592#define DRM_PCIE_SPEED_50 2 1037#define DRM_PCIE_SPEED_50 2
@@ -1596,6 +1041,7 @@ extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
1596 1041
1597/* platform section */ 1042/* platform section */
1598extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); 1043extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
1044extern int drm_platform_set_busid(struct drm_device *d, struct drm_master *m);
1599 1045
1600/* returns true if currently okay to sleep */ 1046/* returns true if currently okay to sleep */
1601static __inline__ bool drm_can_sleep(void) 1047static __inline__ bool drm_can_sleep(void)
@@ -1605,5 +1051,4 @@ static __inline__ bool drm_can_sleep(void)
1605 return true; 1051 return true;
1606} 1052}
1607 1053
1608#endif /* __KERNEL__ */
1609#endif 1054#endif
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
index 86a02188074b..055dc058d147 100644
--- a/include/drm/drm_agpsupport.h
+++ b/include/drm/drm_agpsupport.h
@@ -1,12 +1,32 @@
1#ifndef _DRM_AGPSUPPORT_H_ 1#ifndef _DRM_AGPSUPPORT_H_
2#define _DRM_AGPSUPPORT_H_ 2#define _DRM_AGPSUPPORT_H_
3 3
4#include <linux/agp_backend.h>
4#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/list.h>
5#include <linux/mm.h> 7#include <linux/mm.h>
6#include <linux/mutex.h> 8#include <linux/mutex.h>
7#include <linux/types.h> 9#include <linux/types.h>
8#include <linux/agp_backend.h> 10#include <uapi/drm/drm.h>
9#include <drm/drmP.h> 11
12struct drm_device;
13struct drm_file;
14
15#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && \
16 defined(MODULE)))
17
18struct drm_agp_head {
19 struct agp_kern_info agp_info;
20 struct list_head memory;
21 unsigned long mode;
22 struct agp_bridge_data *bridge;
23 int enabled;
24 int acquired;
25 unsigned long base;
26 int agp_mtrr;
27 int cant_use_aperture;
28 unsigned long page_mask;
29};
10 30
11#if __OS_HAS_AGP 31#if __OS_HAS_AGP
12 32
@@ -45,6 +65,7 @@ int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
45int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); 65int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
46int drm_agp_bind_ioctl(struct drm_device *dev, void *data, 66int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
47 struct drm_file *file_priv); 67 struct drm_file *file_priv);
68
48#else /* __OS_HAS_AGP */ 69#else /* __OS_HAS_AGP */
49 70
50static inline void drm_free_agp(struct agp_memory * handle, int pages) 71static inline void drm_free_agp(struct agp_memory * handle, int pages)
@@ -172,6 +193,7 @@ static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
172{ 193{
173 return -ENODEV; 194 return -ENODEV;
174} 195}
196
175#endif /* __OS_HAS_AGP */ 197#endif /* __OS_HAS_AGP */
176 198
177#endif /* _DRM_AGPSUPPORT_H_ */ 199#endif /* _DRM_AGPSUPPORT_H_ */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index f1105d0da059..c40070a92d6b 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -31,8 +31,8 @@
31#include <linux/idr.h> 31#include <linux/idr.h>
32#include <linux/fb.h> 32#include <linux/fb.h>
33#include <linux/hdmi.h> 33#include <linux/hdmi.h>
34#include <drm/drm_mode.h> 34#include <uapi/drm/drm_mode.h>
35#include <drm/drm_fourcc.h> 35#include <uapi/drm/drm_fourcc.h>
36#include <drm/drm_modeset_lock.h> 36#include <drm/drm_modeset_lock.h>
37 37
38struct drm_device; 38struct drm_device;
@@ -218,10 +218,6 @@ struct drm_property {
218 struct list_head enum_blob_list; 218 struct list_head enum_blob_list;
219}; 219};
220 220
221void drm_modeset_lock_all(struct drm_device *dev);
222void drm_modeset_unlock_all(struct drm_device *dev);
223void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
224
225struct drm_crtc; 221struct drm_crtc;
226struct drm_connector; 222struct drm_connector;
227struct drm_encoder; 223struct drm_encoder;
@@ -345,10 +341,6 @@ struct drm_crtc {
345 int cursor_x; 341 int cursor_x;
346 int cursor_y; 342 int cursor_y;
347 343
348 /* Temporary tracking of the old fb while a modeset is ongoing. Used
349 * by drm_mode_set_config_internal to implement correct refcounting. */
350 struct drm_framebuffer *old_fb;
351
352 bool enabled; 344 bool enabled;
353 345
354 /* Requested mode from modesetting. */ 346 /* Requested mode from modesetting. */
@@ -375,6 +367,12 @@ struct drm_crtc {
375 void *helper_private; 367 void *helper_private;
376 368
377 struct drm_object_properties properties; 369 struct drm_object_properties properties;
370
371 /*
372 * For legacy crtc ioctls so that atomic drivers can get at the locking
373 * acquire context.
374 */
375 struct drm_modeset_acquire_ctx *acquire_ctx;
378}; 376};
379 377
380 378
@@ -548,6 +546,7 @@ struct drm_connector {
548 void *helper_private; 546 void *helper_private;
549 547
550 /* forced on connector */ 548 /* forced on connector */
549 struct drm_cmdline_mode cmdline_mode;
551 enum drm_connector_force force; 550 enum drm_connector_force force;
552 bool override_edid; 551 bool override_edid;
553 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; 552 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
@@ -582,6 +581,7 @@ struct drm_plane_funcs {
582 uint32_t src_w, uint32_t src_h); 581 uint32_t src_w, uint32_t src_h);
583 int (*disable_plane)(struct drm_plane *plane); 582 int (*disable_plane)(struct drm_plane *plane);
584 void (*destroy)(struct drm_plane *plane); 583 void (*destroy)(struct drm_plane *plane);
584 void (*reset)(struct drm_plane *plane);
585 585
586 int (*set_property)(struct drm_plane *plane, 586 int (*set_property)(struct drm_plane *plane,
587 struct drm_property *property, uint64_t val); 587 struct drm_property *property, uint64_t val);
@@ -620,6 +620,10 @@ struct drm_plane {
620 struct drm_crtc *crtc; 620 struct drm_crtc *crtc;
621 struct drm_framebuffer *fb; 621 struct drm_framebuffer *fb;
622 622
623 /* Temporary tracking of the old fb while a modeset is ongoing. Used
624 * by drm_mode_set_config_internal to implement correct refcounting. */
625 struct drm_framebuffer *old_fb;
626
623 const struct drm_plane_funcs *funcs; 627 const struct drm_plane_funcs *funcs;
624 628
625 struct drm_object_properties properties; 629 struct drm_object_properties properties;
@@ -821,6 +825,7 @@ struct drm_mode_config {
821 struct drm_property *dpms_property; 825 struct drm_property *dpms_property;
822 struct drm_property *path_property; 826 struct drm_property *path_property;
823 struct drm_property *plane_type_property; 827 struct drm_property *plane_type_property;
828 struct drm_property *rotation_property;
824 829
825 /* DVI-I properties */ 830 /* DVI-I properties */
826 struct drm_property *dvi_i_subconnector_property; 831 struct drm_property *dvi_i_subconnector_property;
@@ -903,6 +908,7 @@ int drm_connector_register(struct drm_connector *connector);
903void drm_connector_unregister(struct drm_connector *connector); 908void drm_connector_unregister(struct drm_connector *connector);
904 909
905extern void drm_connector_cleanup(struct drm_connector *connector); 910extern void drm_connector_cleanup(struct drm_connector *connector);
911extern unsigned int drm_connector_index(struct drm_connector *connector);
906/* helper to unplug all connectors from sysfs for device */ 912/* helper to unplug all connectors from sysfs for device */
907extern void drm_connector_unplug_all(struct drm_device *dev); 913extern void drm_connector_unplug_all(struct drm_device *dev);
908 914
@@ -942,6 +948,7 @@ extern int drm_plane_init(struct drm_device *dev,
942 const uint32_t *formats, uint32_t format_count, 948 const uint32_t *formats, uint32_t format_count,
943 bool is_primary); 949 bool is_primary);
944extern void drm_plane_cleanup(struct drm_plane *plane); 950extern void drm_plane_cleanup(struct drm_plane *plane);
951extern unsigned int drm_plane_index(struct drm_plane *plane);
945extern void drm_plane_force_disable(struct drm_plane *plane); 952extern void drm_plane_force_disable(struct drm_plane *plane);
946extern int drm_crtc_check_viewport(const struct drm_crtc *crtc, 953extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
947 int x, int y, 954 int x, int y,
@@ -1120,6 +1127,9 @@ extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
1120 struct drm_file *file_priv); 1127 struct drm_file *file_priv);
1121extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, 1128extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
1122 struct drm_file *file_priv); 1129 struct drm_file *file_priv);
1130extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
1131 struct drm_property *property,
1132 uint64_t value);
1123 1133
1124extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, 1134extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
1125 int *bpp); 1135 int *bpp);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index a21568bf1514..9305c718d789 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -190,16 +190,16 @@
190# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 190# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
191# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 191# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
192# define DP_TRAIN_MAX_SWING_REACHED (1 << 2) 192# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
193# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0) 193# define DP_TRAIN_VOLTAGE_SWING_LEVEL_0 (0 << 0)
194# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0) 194# define DP_TRAIN_VOLTAGE_SWING_LEVEL_1 (1 << 0)
195# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0) 195# define DP_TRAIN_VOLTAGE_SWING_LEVEL_2 (2 << 0)
196# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0) 196# define DP_TRAIN_VOLTAGE_SWING_LEVEL_3 (3 << 0)
197 197
198# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) 198# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
199# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3) 199# define DP_TRAIN_PRE_EMPH_LEVEL_0 (0 << 3)
200# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3) 200# define DP_TRAIN_PRE_EMPH_LEVEL_1 (1 << 3)
201# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3) 201# define DP_TRAIN_PRE_EMPH_LEVEL_2 (2 << 3)
202# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3) 202# define DP_TRAIN_PRE_EMPH_LEVEL_3 (3 << 3)
203 203
204# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 204# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
205# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) 205# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 9b446ada2532..338fc1053835 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -388,6 +388,7 @@ struct drm_dp_payload {
388 int payload_state; 388 int payload_state;
389 int start_slot; 389 int start_slot;
390 int num_slots; 390 int num_slots;
391 int vcpi;
391}; 392};
392 393
393/** 394/**
@@ -454,6 +455,7 @@ struct drm_dp_mst_topology_mgr {
454 struct drm_dp_vcpi **proposed_vcpis; 455 struct drm_dp_vcpi **proposed_vcpis;
455 struct drm_dp_payload *payloads; 456 struct drm_dp_payload *payloads;
456 unsigned long payload_mask; 457 unsigned long payload_mask;
458 unsigned long vcpi_mask;
457 459
458 wait_queue_head_t tx_waitq; 460 wait_queue_head_t tx_waitq;
459 struct work_struct work; 461 struct work_struct work;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index bfd329d613c4..f4ad254e3488 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -77,7 +77,6 @@ struct drm_fb_helper_funcs {
77 77
78struct drm_fb_helper_connector { 78struct drm_fb_helper_connector {
79 struct drm_connector *connector; 79 struct drm_connector *connector;
80 struct drm_cmdline_mode cmdline_mode;
81}; 80};
82 81
83struct drm_fb_helper { 82struct drm_fb_helper {
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
new file mode 100644
index 000000000000..1e6ae1458f7a
--- /dev/null
+++ b/include/drm/drm_gem.h
@@ -0,0 +1,183 @@
1#ifndef __DRM_GEM_H__
2#define __DRM_GEM_H__
3
4/*
5 * GEM Graphics Execution Manager Driver Interfaces
6 *
7 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
8 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
9 * Copyright (c) 2009-2010, Code Aurora Forum.
10 * All rights reserved.
11 * Copyright © 2014 Intel Corporation
12 * Daniel Vetter <daniel.vetter@ffwll.ch>
13 *
14 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
15 * Author: Gareth Hughes <gareth@valinux.com>
16 *
17 * Permission is hereby granted, free of charge, to any person obtaining a
18 * copy of this software and associated documentation files (the "Software"),
19 * to deal in the Software without restriction, including without limitation
20 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21 * and/or sell copies of the Software, and to permit persons to whom the
22 * Software is furnished to do so, subject to the following conditions:
23 *
24 * The above copyright notice and this permission notice (including the next
25 * paragraph) shall be included in all copies or substantial portions of the
26 * Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34 * OTHER DEALINGS IN THE SOFTWARE.
35 */
36
37/**
38 * This structure defines the drm_mm memory object, which will be used by the
39 * DRM for its buffer objects.
40 */
41struct drm_gem_object {
42 /** Reference count of this object */
43 struct kref refcount;
44
45 /**
46 * handle_count - gem file_priv handle count of this object
47 *
48 * Each handle also holds a reference. Note that when the handle_count
49 * drops to 0 any global names (e.g. the id in the flink namespace) will
50 * be cleared.
51 *
52 * Protected by dev->object_name_lock.
53 * */
54 unsigned handle_count;
55
56 /** Related drm device */
57 struct drm_device *dev;
58
59 /** File representing the shmem storage */
60 struct file *filp;
61
62 /* Mapping info for this object */
63 struct drm_vma_offset_node vma_node;
64
65 /**
66 * Size of the object, in bytes. Immutable over the object's
67 * lifetime.
68 */
69 size_t size;
70
71 /**
72 * Global name for this object, starts at 1. 0 means unnamed.
73 * Access is covered by the object_name_lock in the related drm_device
74 */
75 int name;
76
77 /**
78 * Memory domains. These monitor which caches contain read/write data
79 * related to the object. When transitioning from one set of domains
80 * to another, the driver is called to ensure that caches are suitably
81 * flushed and invalidated
82 */
83 uint32_t read_domains;
84 uint32_t write_domain;
85
86 /**
87 * While validating an exec operation, the
88 * new read/write domain values are computed here.
89 * They will be transferred to the above values
90 * at the point that any cache flushing occurs
91 */
92 uint32_t pending_read_domains;
93 uint32_t pending_write_domain;
94
95 /**
96 * dma_buf - dma buf associated with this GEM object
97 *
98 * Pointer to the dma-buf associated with this gem object (either
99 * through importing or exporting). We break the resulting reference
100 * loop when the last gem handle for this object is released.
101 *
102 * Protected by obj->object_name_lock
103 */
104 struct dma_buf *dma_buf;
105
106 /**
107 * import_attach - dma buf attachment backing this object
108 *
109 * Any foreign dma_buf imported as a gem object has this set to the
110 * attachment point for the device. This is invariant over the lifetime
111 * of a gem object.
112 *
113 * The driver's ->gem_free_object callback is responsible for cleaning
114 * up the dma_buf attachment and references acquired at import time.
115 *
116 * Note that the drm gem/prime core does not depend upon drivers setting
117 * this field any more. So for drivers where this doesn't make sense
118 * (e.g. virtual devices or a displaylink behind an usb bus) they can
119 * simply leave it as NULL.
120 */
121 struct dma_buf_attachment *import_attach;
122};
123
124void drm_gem_object_release(struct drm_gem_object *obj);
125void drm_gem_object_free(struct kref *kref);
126int drm_gem_object_init(struct drm_device *dev,
127 struct drm_gem_object *obj, size_t size);
128void drm_gem_private_object_init(struct drm_device *dev,
129 struct drm_gem_object *obj, size_t size);
130void drm_gem_vm_open(struct vm_area_struct *vma);
131void drm_gem_vm_close(struct vm_area_struct *vma);
132int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
133 struct vm_area_struct *vma);
134int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
135
136static inline void
137drm_gem_object_reference(struct drm_gem_object *obj)
138{
139 kref_get(&obj->refcount);
140}
141
142static inline void
143drm_gem_object_unreference(struct drm_gem_object *obj)
144{
145 if (obj != NULL)
146 kref_put(&obj->refcount, drm_gem_object_free);
147}
148
149static inline void
150drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
151{
152 if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
153 struct drm_device *dev = obj->dev;
154
155 mutex_lock(&dev->struct_mutex);
156 if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
157 drm_gem_object_free(&obj->refcount);
158 mutex_unlock(&dev->struct_mutex);
159 }
160}
161
162int drm_gem_handle_create(struct drm_file *file_priv,
163 struct drm_gem_object *obj,
164 u32 *handlep);
165int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
166
167
168void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
169int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
170int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
171
172struct page **drm_gem_get_pages(struct drm_gem_object *obj);
173void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
174 bool dirty, bool accessed);
175
176struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
177 struct drm_file *filp,
178 u32 handle);
179int drm_gem_dumb_destroy(struct drm_file *file,
180 struct drm_device *dev,
181 uint32_t handle);
182
183#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 2a3cea91606d..2ff35f3de9c5 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -2,6 +2,7 @@
2#define __DRM_GEM_CMA_HELPER_H__ 2#define __DRM_GEM_CMA_HELPER_H__
3 3
4#include <drm/drmP.h> 4#include <drm/drmP.h>
5#include <drm/drm_gem.h>
5 6
6struct drm_gem_cma_object { 7struct drm_gem_cma_object {
7 struct drm_gem_object base; 8 struct drm_gem_object base;
@@ -44,7 +45,8 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
44 45
45struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj); 46struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj);
46struct drm_gem_object * 47struct drm_gem_object *
47drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size, 48drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
49 struct dma_buf_attachment *attach,
48 struct sg_table *sgt); 50 struct sg_table *sgt);
49int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, 51int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
50 struct vm_area_struct *vma); 52 struct vm_area_struct *vma);
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
new file mode 100644
index 000000000000..3e698038dc7b
--- /dev/null
+++ b/include/drm/drm_legacy.h
@@ -0,0 +1,203 @@
1#ifndef __DRM_DRM_LEGACY_H__
2#define __DRM_DRM_LEGACY_H__
3
4/*
5 * Legacy driver interfaces for the Direct Rendering Manager
6 *
7 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
8 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
9 * Copyright (c) 2009-2010, Code Aurora Forum.
10 * All rights reserved.
11 * Copyright © 2014 Intel Corporation
12 * Daniel Vetter <daniel.vetter@ffwll.ch>
13 *
14 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
15 * Author: Gareth Hughes <gareth@valinux.com>
16 *
17 * Permission is hereby granted, free of charge, to any person obtaining a
18 * copy of this software and associated documentation files (the "Software"),
19 * to deal in the Software without restriction, including without limitation
20 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21 * and/or sell copies of the Software, and to permit persons to whom the
22 * Software is furnished to do so, subject to the following conditions:
23 *
24 * The above copyright notice and this permission notice (including the next
25 * paragraph) shall be included in all copies or substantial portions of the
26 * Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34 * OTHER DEALINGS IN THE SOFTWARE.
35 */
36
37
38/*
39 * Legacy Support for palateontologic DRM drivers
40 *
41 * If you add a new driver and it uses any of these functions or structures,
42 * you're doing it terribly wrong.
43 */
44
45/**
46 * DMA buffer.
47 */
48struct drm_buf {
49 int idx; /**< Index into master buflist */
50 int total; /**< Buffer size */
51 int order; /**< log-base-2(total) */
52 int used; /**< Amount of buffer in use (for DMA) */
53 unsigned long offset; /**< Byte offset (used internally) */
54 void *address; /**< Address of buffer */
55 unsigned long bus_address; /**< Bus address of buffer */
56 struct drm_buf *next; /**< Kernel-only: used for free list */
57 __volatile__ int waiting; /**< On kernel DMA queue */
58 __volatile__ int pending; /**< On hardware DMA queue */
59 struct drm_file *file_priv; /**< Private of holding file descr */
60 int context; /**< Kernel queue for this buffer */
61 int while_locked; /**< Dispatch this buffer while locked */
62 enum {
63 DRM_LIST_NONE = 0,
64 DRM_LIST_FREE = 1,
65 DRM_LIST_WAIT = 2,
66 DRM_LIST_PEND = 3,
67 DRM_LIST_PRIO = 4,
68 DRM_LIST_RECLAIM = 5
69 } list; /**< Which list we're on */
70
71 int dev_priv_size; /**< Size of buffer private storage */
72 void *dev_private; /**< Per-buffer private storage */
73};
74
75typedef struct drm_dma_handle {
76 dma_addr_t busaddr;
77 void *vaddr;
78 size_t size;
79} drm_dma_handle_t;
80
81/**
82 * Buffer entry. There is one of this for each buffer size order.
83 */
84struct drm_buf_entry {
85 int buf_size; /**< size */
86 int buf_count; /**< number of buffers */
87 struct drm_buf *buflist; /**< buffer list */
88 int seg_count;
89 int page_order;
90 struct drm_dma_handle **seglist;
91
92 int low_mark; /**< Low water mark */
93 int high_mark; /**< High water mark */
94};
95
96/**
97 * DMA data.
98 */
99struct drm_device_dma {
100
101 struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
102 int buf_count; /**< total number of buffers */
103 struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
104 int seg_count;
105 int page_count; /**< number of pages */
106 unsigned long *pagelist; /**< page list */
107 unsigned long byte_count;
108 enum {
109 _DRM_DMA_USE_AGP = 0x01,
110 _DRM_DMA_USE_SG = 0x02,
111 _DRM_DMA_USE_FB = 0x04,
112 _DRM_DMA_USE_PCI_RO = 0x08
113 } flags;
114
115};
116
117/**
118 * Scatter-gather memory.
119 */
120struct drm_sg_mem {
121 unsigned long handle;
122 void *virtual;
123 int pages;
124 struct page **pagelist;
125 dma_addr_t *busaddr;
126};
127
128/**
129 * Kernel side of a mapping
130 */
131struct drm_local_map {
132 resource_size_t offset; /**< Requested physical address (0 for SAREA)*/
133 unsigned long size; /**< Requested physical size (bytes) */
134 enum drm_map_type type; /**< Type of memory to map */
135 enum drm_map_flags flags; /**< Flags */
136 void *handle; /**< User-space: "Handle" to pass to mmap() */
137 /**< Kernel-space: kernel-virtual address */
138 int mtrr; /**< MTRR slot used */
139};
140
141typedef struct drm_local_map drm_local_map_t;
142
143/**
144 * Mappings list
145 */
146struct drm_map_list {
147 struct list_head head; /**< list head */
148 struct drm_hash_item hash;
149 struct drm_local_map *map; /**< mapping */
150 uint64_t user_token;
151 struct drm_master *master;
152};
153
154int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
155 unsigned int size, enum drm_map_type type,
156 enum drm_map_flags flags, struct drm_local_map **map_p);
157int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
158int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
159struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
160int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
161
162int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
163int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
164
165/**
166 * Test that the hardware lock is held by the caller, returning otherwise.
167 *
168 * \param dev DRM device.
169 * \param filp file pointer of the caller.
170 */
171#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
172do { \
173 if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
174 _file_priv->master->lock.file_priv != _file_priv) { \
175 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
176 __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
177 _file_priv->master->lock.file_priv, _file_priv); \
178 return -EINVAL; \
179 } \
180} while (0)
181
182void drm_legacy_idlelock_take(struct drm_lock_data *lock);
183void drm_legacy_idlelock_release(struct drm_lock_data *lock);
184
185/* drm_pci.c dma alloc wrappers */
186void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
187
188/* drm_memory.c */
189void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
190void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
191void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
192
193static __inline__ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
194 unsigned int token)
195{
196 struct drm_map_list *_entry;
197 list_for_each_entry(_entry, &dev->maplist, head)
198 if (_entry->user_token == token)
199 return _entry->map;
200 return NULL;
201}
202
203#endif /* __DRM_DRM_LEGACY_H__ */
diff --git a/include/drm/drm_memory.h b/include/drm/drm_memory.h
deleted file mode 100644
index 4baf57a207e7..000000000000
--- a/include/drm/drm_memory.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/**
2 * \file drm_memory.h
3 * Memory management wrappers for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <linux/highmem.h>
37#include <linux/vmalloc.h>
38#include <drm/drmP.h>
39
40/**
41 * Cut down version of drm_memory_debug.h, which used to be called
42 * drm_memory.h.
43 */
44
45#if __OS_HAS_AGP
46
47#ifdef HAVE_PAGE_AGP
48#include <asm/agp.h>
49#else
50# ifdef __powerpc__
51# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
52# else
53# define PAGE_AGP PAGE_KERNEL
54# endif
55#endif
56
57#else /* __OS_HAS_AGP */
58
59#endif
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 2bb55b8b9031..8569dc5a1026 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -96,6 +96,8 @@ void mipi_dsi_host_unregister(struct mipi_dsi_host *host);
96#define MIPI_DSI_MODE_EOT_PACKET BIT(9) 96#define MIPI_DSI_MODE_EOT_PACKET BIT(9)
97/* device supports non-continuous clock behavior (DSI spec 5.6.1) */ 97/* device supports non-continuous clock behavior (DSI spec 5.6.1) */
98#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10) 98#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10)
99/* transmit data in low power */
100#define MIPI_DSI_MODE_LPM BIT(11)
99 101
100enum mipi_dsi_pixel_format { 102enum mipi_dsi_pixel_format {
101 MIPI_DSI_FMT_RGB888, 103 MIPI_DSI_FMT_RGB888,
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 402aa7a6a058..75a5c45e21c7 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -29,7 +29,7 @@
29struct drm_modeset_lock; 29struct drm_modeset_lock;
30 30
31/** 31/**
32 * drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx) 32 * struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx)
33 * @ww_ctx: base acquire ctx 33 * @ww_ctx: base acquire ctx
34 * @contended: used internally for -EDEADLK handling 34 * @contended: used internally for -EDEADLK handling
35 * @locked: list of held locks 35 * @locked: list of held locks
@@ -53,10 +53,15 @@ struct drm_modeset_acquire_ctx {
53 * list of held locks (drm_modeset_lock) 53 * list of held locks (drm_modeset_lock)
54 */ 54 */
55 struct list_head locked; 55 struct list_head locked;
56
57 /**
58 * Trylock mode, use only for panic handlers!
59 */
60 bool trylock_only;
56}; 61};
57 62
58/** 63/**
59 * drm_modeset_lock - used for locking modeset resources. 64 * struct drm_modeset_lock - used for locking modeset resources.
60 * @mutex: resource locking 65 * @mutex: resource locking
61 * @head: used to hold it's place on state->locked list when 66 * @head: used to hold it's place on state->locked list when
62 * part of an atomic update 67 * part of an atomic update
@@ -120,6 +125,17 @@ int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
120void drm_modeset_unlock(struct drm_modeset_lock *lock); 125void drm_modeset_unlock(struct drm_modeset_lock *lock);
121 126
122struct drm_device; 127struct drm_device;
128struct drm_crtc;
129
130void drm_modeset_lock_all(struct drm_device *dev);
131int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
132void drm_modeset_unlock_all(struct drm_device *dev);
133void drm_modeset_lock_crtc(struct drm_crtc *crtc);
134void drm_modeset_unlock_crtc(struct drm_crtc *crtc);
135void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
136struct drm_modeset_acquire_ctx *
137drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc);
138
123int drm_modeset_lock_all_crtcs(struct drm_device *dev, 139int drm_modeset_lock_all_crtcs(struct drm_device *dev,
124 struct drm_modeset_acquire_ctx *ctx); 140 struct drm_modeset_acquire_ctx *ctx);
125 141
diff --git a/include/drm/drm_usb.h b/include/drm/drm_usb.h
deleted file mode 100644
index 33506c11da8b..000000000000
--- a/include/drm/drm_usb.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef DRM_USB_H
2#define DRM_USB_H
3
4#include <drmP.h>
5
6#include <linux/usb.h>
7
8extern int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver);
9extern void drm_usb_exit(struct drm_driver *driver, struct usb_driver *udriver);
10
11int drm_get_usb_dev(struct usb_interface *interface,
12 const struct usb_device_id *id,
13 struct drm_driver *driver);
14
15#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 7526c5bf5610..0ccf7f267ff9 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -45,12 +45,24 @@ struct ttm_bo_device;
45 45
46struct drm_mm_node; 46struct drm_mm_node;
47 47
48/**
49 * struct ttm_place
50 *
51 * @fpfn: first valid page frame number to put the object
52 * @lpfn: last valid page frame number to put the object
53 * @flags: memory domain and caching flags for the object
54 *
55 * Structure indicating a possible place to put an object.
56 */
57struct ttm_place {
58 unsigned fpfn;
59 unsigned lpfn;
60 uint32_t flags;
61};
48 62
49/** 63/**
50 * struct ttm_placement 64 * struct ttm_placement
51 * 65 *
52 * @fpfn: first valid page frame number to put the object
53 * @lpfn: last valid page frame number to put the object
54 * @num_placement: number of preferred placements 66 * @num_placement: number of preferred placements
55 * @placement: preferred placements 67 * @placement: preferred placements
56 * @num_busy_placement: number of preferred placements when need to evict buffer 68 * @num_busy_placement: number of preferred placements when need to evict buffer
@@ -59,12 +71,10 @@ struct drm_mm_node;
59 * Structure indicating the placement you request for an object. 71 * Structure indicating the placement you request for an object.
60 */ 72 */
61struct ttm_placement { 73struct ttm_placement {
62 unsigned fpfn; 74 unsigned num_placement;
63 unsigned lpfn; 75 const struct ttm_place *placement;
64 unsigned num_placement; 76 unsigned num_busy_placement;
65 const uint32_t *placement; 77 const struct ttm_place *busy_placement;
66 unsigned num_busy_placement;
67 const uint32_t *busy_placement;
68}; 78};
69 79
70/** 80/**
@@ -163,7 +173,6 @@ struct ttm_tt;
163 * @lru: List head for the lru list. 173 * @lru: List head for the lru list.
164 * @ddestroy: List head for the delayed destroy list. 174 * @ddestroy: List head for the delayed destroy list.
165 * @swap: List head for swap LRU list. 175 * @swap: List head for swap LRU list.
166 * @sync_obj: Pointer to a synchronization object.
167 * @priv_flags: Flags describing buffer object internal state. 176 * @priv_flags: Flags describing buffer object internal state.
168 * @vma_node: Address space manager node. 177 * @vma_node: Address space manager node.
169 * @offset: The current GPU offset, which can have different meanings 178 * @offset: The current GPU offset, which can have different meanings
@@ -227,13 +236,9 @@ struct ttm_buffer_object {
227 struct list_head io_reserve_lru; 236 struct list_head io_reserve_lru;
228 237
229 /** 238 /**
230 * Members protected by struct buffer_object_device::fence_lock 239 * Members protected by a bo reservation.
231 * In addition, setting sync_obj to anything else
232 * than NULL requires bo::reserved to be held. This allows for
233 * checking NULL while reserved but not holding the mentioned lock.
234 */ 240 */
235 241
236 void *sync_obj;
237 unsigned long priv_flags; 242 unsigned long priv_flags;
238 243
239 struct drm_vma_offset_node vma_node; 244 struct drm_vma_offset_node vma_node;
@@ -455,6 +460,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
455 * point to the shmem object backing a GEM object if TTM is used to back a 460 * point to the shmem object backing a GEM object if TTM is used to back a
456 * GEM user interface. 461 * GEM user interface.
457 * @acc_size: Accounted size for this object. 462 * @acc_size: Accounted size for this object.
463 * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
458 * @destroy: Destroy function. Use NULL for kfree(). 464 * @destroy: Destroy function. Use NULL for kfree().
459 * 465 *
460 * This function initializes a pre-allocated struct ttm_buffer_object. 466 * This function initializes a pre-allocated struct ttm_buffer_object.
@@ -482,6 +488,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
482 struct file *persistent_swap_storage, 488 struct file *persistent_swap_storage,
483 size_t acc_size, 489 size_t acc_size,
484 struct sg_table *sg, 490 struct sg_table *sg,
491 struct reservation_object *resv,
485 void (*destroy) (struct ttm_buffer_object *)); 492 void (*destroy) (struct ttm_buffer_object *));
486 493
487/** 494/**
@@ -519,20 +526,6 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev,
519 struct ttm_buffer_object **p_bo); 526 struct ttm_buffer_object **p_bo);
520 527
521/** 528/**
522 * ttm_bo_check_placement
523 *
524 * @bo: the buffer object.
525 * @placement: placements
526 *
527 * Performs minimal validity checking on an intended change of
528 * placement flags.
529 * Returns
530 * -EINVAL: Intended change is invalid or not allowed.
531 */
532extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
533 struct ttm_placement *placement);
534
535/**
536 * ttm_bo_init_mm 529 * ttm_bo_init_mm
537 * 530 *
538 * @bdev: Pointer to a ttm_bo_device struct. 531 * @bdev: Pointer to a ttm_bo_device struct.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 1d9f0f1ff52d..142d752fc450 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -208,8 +208,7 @@ struct ttm_mem_type_manager_func {
208 */ 208 */
209 int (*get_node)(struct ttm_mem_type_manager *man, 209 int (*get_node)(struct ttm_mem_type_manager *man,
210 struct ttm_buffer_object *bo, 210 struct ttm_buffer_object *bo,
211 struct ttm_placement *placement, 211 const struct ttm_place *place,
212 uint32_t flags,
213 struct ttm_mem_reg *mem); 212 struct ttm_mem_reg *mem);
214 213
215 /** 214 /**
@@ -313,11 +312,6 @@ struct ttm_mem_type_manager {
313 * @move: Callback for a driver to hook in accelerated functions to 312 * @move: Callback for a driver to hook in accelerated functions to
314 * move a buffer. 313 * move a buffer.
315 * If set to NULL, a potentially slow memcpy() move is used. 314 * If set to NULL, a potentially slow memcpy() move is used.
316 * @sync_obj_signaled: See ttm_fence_api.h
317 * @sync_obj_wait: See ttm_fence_api.h
318 * @sync_obj_flush: See ttm_fence_api.h
319 * @sync_obj_unref: See ttm_fence_api.h
320 * @sync_obj_ref: See ttm_fence_api.h
321 */ 315 */
322 316
323struct ttm_bo_driver { 317struct ttm_bo_driver {
@@ -419,23 +413,6 @@ struct ttm_bo_driver {
419 int (*verify_access) (struct ttm_buffer_object *bo, 413 int (*verify_access) (struct ttm_buffer_object *bo,
420 struct file *filp); 414 struct file *filp);
421 415
422 /**
423 * In case a driver writer dislikes the TTM fence objects,
424 * the driver writer can replace those with sync objects of
425 * his / her own. If it turns out that no driver writer is
426 * using these. I suggest we remove these hooks and plug in
427 * fences directly. The bo driver needs the following functionality:
428 * See the corresponding functions in the fence object API
429 * documentation.
430 */
431
432 bool (*sync_obj_signaled) (void *sync_obj);
433 int (*sync_obj_wait) (void *sync_obj,
434 bool lazy, bool interruptible);
435 int (*sync_obj_flush) (void *sync_obj);
436 void (*sync_obj_unref) (void **sync_obj);
437 void *(*sync_obj_ref) (void *sync_obj);
438
439 /* hook to notify driver about a driver move so it 416 /* hook to notify driver about a driver move so it
440 * can do tiling things */ 417 * can do tiling things */
441 void (*move_notify)(struct ttm_buffer_object *bo, 418 void (*move_notify)(struct ttm_buffer_object *bo,
@@ -522,8 +499,6 @@ struct ttm_bo_global {
522 * 499 *
523 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. 500 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
524 * @man: An array of mem_type_managers. 501 * @man: An array of mem_type_managers.
525 * @fence_lock: Protects the synchronizing members on *all* bos belonging
526 * to this device.
527 * @vma_manager: Address space manager 502 * @vma_manager: Address space manager
528 * lru_lock: Spinlock that protects the buffer+device lru lists and 503 * lru_lock: Spinlock that protects the buffer+device lru lists and
529 * ddestroy lists. 504 * ddestroy lists.
@@ -543,7 +518,6 @@ struct ttm_bo_device {
543 struct ttm_bo_global *glob; 518 struct ttm_bo_global *glob;
544 struct ttm_bo_driver *driver; 519 struct ttm_bo_driver *driver;
545 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 520 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
546 spinlock_t fence_lock;
547 521
548 /* 522 /*
549 * Protected by internal locks. 523 * Protected by internal locks.
@@ -1026,7 +1000,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
1026 * ttm_bo_move_accel_cleanup. 1000 * ttm_bo_move_accel_cleanup.
1027 * 1001 *
1028 * @bo: A pointer to a struct ttm_buffer_object. 1002 * @bo: A pointer to a struct ttm_buffer_object.
1029 * @sync_obj: A sync object that signals when moving is complete. 1003 * @fence: A fence object that signals when moving is complete.
1030 * @evict: This is an evict move. Don't return until the buffer is idle. 1004 * @evict: This is an evict move. Don't return until the buffer is idle.
1031 * @no_wait_gpu: Return immediately if the GPU is busy. 1005 * @no_wait_gpu: Return immediately if the GPU is busy.
1032 * @new_mem: struct ttm_mem_reg indicating where to move. 1006 * @new_mem: struct ttm_mem_reg indicating where to move.
@@ -1040,7 +1014,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
1040 */ 1014 */
1041 1015
1042extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 1016extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
1043 void *sync_obj, 1017 struct fence *fence,
1044 bool evict, bool no_wait_gpu, 1018 bool evict, bool no_wait_gpu,
1045 struct ttm_mem_reg *new_mem); 1019 struct ttm_mem_reg *new_mem);
1046/** 1020/**
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 16db7d01a336..460441714413 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -39,19 +39,13 @@
39 * 39 *
40 * @head: list head for thread-private list. 40 * @head: list head for thread-private list.
41 * @bo: refcounted buffer object pointer. 41 * @bo: refcounted buffer object pointer.
42 * @reserved: Indicates whether @bo has been reserved for validation. 42 * @shared: should the fence be added shared?
43 * @removed: Indicates whether @bo has been removed from lru lists.
44 * @put_count: Number of outstanding references on bo::list_kref.
45 * @old_sync_obj: Pointer to a sync object about to be unreferenced
46 */ 43 */
47 44
48struct ttm_validate_buffer { 45struct ttm_validate_buffer {
49 struct list_head head; 46 struct list_head head;
50 struct ttm_buffer_object *bo; 47 struct ttm_buffer_object *bo;
51 bool reserved; 48 bool shared;
52 bool removed;
53 int put_count;
54 void *old_sync_obj;
55}; 49};
56 50
57/** 51/**
@@ -73,6 +67,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
73 * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only 67 * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
74 * non-blocking reserves should be tried. 68 * non-blocking reserves should be tried.
75 * @list: thread private list of ttm_validate_buffer structs. 69 * @list: thread private list of ttm_validate_buffer structs.
70 * @intr: should the wait be interruptible
76 * 71 *
77 * Tries to reserve bos pointed to by the list entries for validation. 72 * Tries to reserve bos pointed to by the list entries for validation.
78 * If the function returns 0, all buffers are marked as "unfenced", 73 * If the function returns 0, all buffers are marked as "unfenced",
@@ -84,9 +79,9 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
84 * CPU write reservations to be cleared, and for other threads to 79 * CPU write reservations to be cleared, and for other threads to
85 * unreserve their buffers. 80 * unreserve their buffers.
86 * 81 *
87 * This function may return -ERESTART or -EAGAIN if the calling process 82 * If intr is set to true, this function may return -ERESTARTSYS if the
88 * receives a signal while waiting. In that case, no buffers on the list 83 * calling process receives a signal while waiting. In that case, no
89 * will be reserved upon return. 84 * buffers on the list will be reserved upon return.
90 * 85 *
91 * Buffers reserved by this function should be unreserved by 86 * Buffers reserved by this function should be unreserved by
92 * a call to either ttm_eu_backoff_reservation() or 87 * a call to either ttm_eu_backoff_reservation() or
@@ -95,14 +90,14 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
95 */ 90 */
96 91
97extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 92extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
98 struct list_head *list); 93 struct list_head *list, bool intr);
99 94
100/** 95/**
101 * function ttm_eu_fence_buffer_objects. 96 * function ttm_eu_fence_buffer_objects.
102 * 97 *
103 * @ticket: ww_acquire_ctx from reserve call 98 * @ticket: ww_acquire_ctx from reserve call
104 * @list: thread private list of ttm_validate_buffer structs. 99 * @list: thread private list of ttm_validate_buffer structs.
105 * @sync_obj: The new sync object for the buffers. 100 * @fence: The new exclusive fence for the buffers.
106 * 101 *
107 * This function should be called when command submission is complete, and 102 * This function should be called when command submission is complete, and
108 * it will add a new sync object to bos pointed to by entries on @list. 103 * it will add a new sync object to bos pointed to by entries on @list.
@@ -111,6 +106,7 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
111 */ 106 */
112 107
113extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, 108extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
114 struct list_head *list, void *sync_obj); 109 struct list_head *list,
110 struct fence *fence);
115 111
116#endif 112#endif
diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h
index 1a2e9901a22e..a5f045e1d8fe 100644
--- a/include/linux/platform_data/rcar-du.h
+++ b/include/linux/platform_data/rcar-du.h
@@ -14,7 +14,7 @@
14#ifndef __RCAR_DU_H__ 14#ifndef __RCAR_DU_H__
15#define __RCAR_DU_H__ 15#define __RCAR_DU_H__
16 16
17#include <drm/drm_mode.h> 17#include <video/videomode.h>
18 18
19enum rcar_du_output { 19enum rcar_du_output {
20 RCAR_DU_OUTPUT_DPAD0, 20 RCAR_DU_OUTPUT_DPAD0,
@@ -35,7 +35,7 @@ enum rcar_du_encoder_type {
35struct rcar_du_panel_data { 35struct rcar_du_panel_data {
36 unsigned int width_mm; /* Panel width in mm */ 36 unsigned int width_mm; /* Panel width in mm */
37 unsigned int height_mm; /* Panel height in mm */ 37 unsigned int height_mm; /* Panel height in mm */
38 struct drm_mode_modeinfo mode; 38 struct videomode mode;
39}; 39};
40 40
41struct rcar_du_connector_lvds_data { 41struct rcar_du_connector_lvds_data {
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index d5844122ff32..5575ed1598bd 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -33,38 +33,6 @@ struct drm_exynos_gem_create {
33}; 33};
34 34
35/** 35/**
36 * A structure for getting buffer offset.
37 *
38 * @handle: a pointer to gem object created.
39 * @pad: just padding to be 64-bit aligned.
40 * @offset: relatived offset value of the memory region allocated.
41 * - this value should be set by user.
42 */
43struct drm_exynos_gem_map_off {
44 unsigned int handle;
45 unsigned int pad;
46 uint64_t offset;
47};
48
49/**
50 * A structure for mapping buffer.
51 *
52 * @handle: a handle to gem object created.
53 * @pad: just padding to be 64-bit aligned.
54 * @size: memory size to be mapped.
55 * @mapped: having user virtual address mmaped.
56 * - this variable would be filled by exynos gem module
57 * of kernel side with user virtual address which is allocated
58 * by do_mmap().
59 */
60struct drm_exynos_gem_mmap {
61 unsigned int handle;
62 unsigned int pad;
63 uint64_t size;
64 uint64_t mapped;
65};
66
67/**
68 * A structure to gem information. 36 * A structure to gem information.
69 * 37 *
70 * @handle: a handle to gem object created. 38 * @handle: a handle to gem object created.
@@ -316,8 +284,6 @@ struct drm_exynos_ipp_cmd_ctrl {
316}; 284};
317 285
318#define DRM_EXYNOS_GEM_CREATE 0x00 286#define DRM_EXYNOS_GEM_CREATE 0x00
319#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01
320#define DRM_EXYNOS_GEM_MMAP 0x02
321/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */ 287/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
322#define DRM_EXYNOS_GEM_GET 0x04 288#define DRM_EXYNOS_GEM_GET 0x04
323#define DRM_EXYNOS_VIDI_CONNECTION 0x07 289#define DRM_EXYNOS_VIDI_CONNECTION 0x07
@@ -336,12 +302,6 @@ struct drm_exynos_ipp_cmd_ctrl {
336#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ 302#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
337 DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create) 303 DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
338 304
339#define DRM_IOCTL_EXYNOS_GEM_MAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + \
340 DRM_EXYNOS_GEM_MAP_OFFSET, struct drm_exynos_gem_map_off)
341
342#define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \
343 DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap)
344
345#define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \ 305#define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \
346 DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info) 306 DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info)
347 307
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index fea6099608ef..50d0fb41a3bf 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -511,6 +511,7 @@ typedef struct {
511#define DRM_RADEON_GEM_BUSY 0x2a 511#define DRM_RADEON_GEM_BUSY 0x2a
512#define DRM_RADEON_GEM_VA 0x2b 512#define DRM_RADEON_GEM_VA 0x2b
513#define DRM_RADEON_GEM_OP 0x2c 513#define DRM_RADEON_GEM_OP 0x2c
514#define DRM_RADEON_GEM_USERPTR 0x2d
514 515
515#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) 516#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
516#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) 517#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -554,6 +555,7 @@ typedef struct {
554#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) 555#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
555#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va) 556#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
556#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op) 557#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op)
558#define DRM_IOCTL_RADEON_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_USERPTR, struct drm_radeon_gem_userptr)
557 559
558typedef struct drm_radeon_init { 560typedef struct drm_radeon_init {
559 enum { 561 enum {
@@ -799,6 +801,10 @@ struct drm_radeon_gem_info {
799#define RADEON_GEM_NO_BACKING_STORE (1 << 0) 801#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
800#define RADEON_GEM_GTT_UC (1 << 1) 802#define RADEON_GEM_GTT_UC (1 << 1)
801#define RADEON_GEM_GTT_WC (1 << 2) 803#define RADEON_GEM_GTT_WC (1 << 2)
804/* BO is expected to be accessed by the CPU */
805#define RADEON_GEM_CPU_ACCESS (1 << 3)
806/* CPU access is not expected to work for this BO */
807#define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
802 808
803struct drm_radeon_gem_create { 809struct drm_radeon_gem_create {
804 uint64_t size; 810 uint64_t size;
@@ -808,6 +814,23 @@ struct drm_radeon_gem_create {
808 uint32_t flags; 814 uint32_t flags;
809}; 815};
810 816
817/*
818 * This is not a reliable API and you should expect it to fail for any
819 * number of reasons and have fallback path that do not use userptr to
820 * perform any operation.
821 */
822#define RADEON_GEM_USERPTR_READONLY (1 << 0)
823#define RADEON_GEM_USERPTR_ANONONLY (1 << 1)
824#define RADEON_GEM_USERPTR_VALIDATE (1 << 2)
825#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
826
827struct drm_radeon_gem_userptr {
828 uint64_t addr;
829 uint64_t size;
830 uint32_t flags;
831 uint32_t handle;
832};
833
811#define RADEON_TILING_MACRO 0x1 834#define RADEON_TILING_MACRO 0x1
812#define RADEON_TILING_MICRO 0x2 835#define RADEON_TILING_MICRO 0x2
813#define RADEON_TILING_SWAP_16BIT 0x4 836#define RADEON_TILING_SWAP_16BIT 0x4
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 4fc66f6b12ce..c472bedbe38e 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -29,7 +29,7 @@
29#define __VMWGFX_DRM_H__ 29#define __VMWGFX_DRM_H__
30 30
31#ifndef __KERNEL__ 31#ifndef __KERNEL__
32#include <drm.h> 32#include <drm/drm.h>
33#endif 33#endif
34 34
35#define DRM_VMW_MAX_SURFACE_FACES 6 35#define DRM_VMW_MAX_SURFACE_FACES 6
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index ef64b66b18df..c74bf4a0520e 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -16,6 +16,7 @@
16#include <linux/videodev2.h> 16#include <linux/videodev2.h>
17#include <linux/bitmap.h> 17#include <linux/bitmap.h>
18#include <linux/fb.h> 18#include <linux/fb.h>
19#include <media/v4l2-mediabus.h>
19 20
20struct ipu_soc; 21struct ipu_soc;
21 22
@@ -61,6 +62,29 @@ struct ipu_di_signal_cfg {
61 u8 vsync_pin; 62 u8 vsync_pin;
62}; 63};
63 64
65/*
66 * Enumeration of CSI destinations
67 */
68enum ipu_csi_dest {
69 IPU_CSI_DEST_IDMAC, /* to memory via SMFC */
70 IPU_CSI_DEST_IC, /* to Image Converter */
71 IPU_CSI_DEST_VDIC, /* to VDIC */
72};
73
74/*
75 * Enumeration of IPU rotation modes
76 */
77enum ipu_rotate_mode {
78 IPU_ROTATE_NONE = 0,
79 IPU_ROTATE_VERT_FLIP,
80 IPU_ROTATE_HORIZ_FLIP,
81 IPU_ROTATE_180,
82 IPU_ROTATE_90_RIGHT,
83 IPU_ROTATE_90_RIGHT_VFLIP,
84 IPU_ROTATE_90_RIGHT_HFLIP,
85 IPU_ROTATE_90_LEFT,
86};
87
64enum ipu_color_space { 88enum ipu_color_space {
65 IPUV3_COLORSPACE_RGB, 89 IPUV3_COLORSPACE_RGB,
66 IPUV3_COLORSPACE_YUV, 90 IPUV3_COLORSPACE_YUV,
@@ -76,6 +100,36 @@ enum ipu_channel_irq {
76 IPU_IRQ_EOS = 192, 100 IPU_IRQ_EOS = 192,
77}; 101};
78 102
103/*
104 * Enumeration of IDMAC channels
105 */
106#define IPUV3_CHANNEL_CSI0 0
107#define IPUV3_CHANNEL_CSI1 1
108#define IPUV3_CHANNEL_CSI2 2
109#define IPUV3_CHANNEL_CSI3 3
110#define IPUV3_CHANNEL_VDI_MEM_IC_VF 5
111#define IPUV3_CHANNEL_MEM_IC_PP 11
112#define IPUV3_CHANNEL_MEM_IC_PRP_VF 12
113#define IPUV3_CHANNEL_G_MEM_IC_PRP_VF 14
114#define IPUV3_CHANNEL_G_MEM_IC_PP 15
115#define IPUV3_CHANNEL_IC_PRP_ENC_MEM 20
116#define IPUV3_CHANNEL_IC_PRP_VF_MEM 21
117#define IPUV3_CHANNEL_IC_PP_MEM 22
118#define IPUV3_CHANNEL_MEM_BG_SYNC 23
119#define IPUV3_CHANNEL_MEM_BG_ASYNC 24
120#define IPUV3_CHANNEL_MEM_FG_SYNC 27
121#define IPUV3_CHANNEL_MEM_DC_SYNC 28
122#define IPUV3_CHANNEL_MEM_FG_ASYNC 29
123#define IPUV3_CHANNEL_MEM_FG_SYNC_ALPHA 31
124#define IPUV3_CHANNEL_MEM_DC_ASYNC 41
125#define IPUV3_CHANNEL_MEM_ROT_ENC 45
126#define IPUV3_CHANNEL_MEM_ROT_VF 46
127#define IPUV3_CHANNEL_MEM_ROT_PP 47
128#define IPUV3_CHANNEL_ROT_ENC_MEM 48
129#define IPUV3_CHANNEL_ROT_VF_MEM 49
130#define IPUV3_CHANNEL_ROT_PP_MEM 50
131#define IPUV3_CHANNEL_MEM_BG_SYNC_ALPHA 51
132
79int ipu_map_irq(struct ipu_soc *ipu, int irq); 133int ipu_map_irq(struct ipu_soc *ipu, int irq);
80int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel, 134int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
81 enum ipu_channel_irq irq); 135 enum ipu_channel_irq irq);
@@ -93,6 +147,13 @@ int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
93#define IPU_IRQ_VSYNC_PRE_1 (448 + 15) 147#define IPU_IRQ_VSYNC_PRE_1 (448 + 15)
94 148
95/* 149/*
150 * IPU Common functions
151 */
152void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2);
153void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi);
154void ipu_dump(struct ipu_soc *ipu);
155
156/*
96 * IPU Image DMA Controller (idmac) functions 157 * IPU Image DMA Controller (idmac) functions
97 */ 158 */
98struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned channel); 159struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned channel);
@@ -100,12 +161,16 @@ void ipu_idmac_put(struct ipuv3_channel *);
100 161
101int ipu_idmac_enable_channel(struct ipuv3_channel *channel); 162int ipu_idmac_enable_channel(struct ipuv3_channel *channel);
102int ipu_idmac_disable_channel(struct ipuv3_channel *channel); 163int ipu_idmac_disable_channel(struct ipuv3_channel *channel);
164void ipu_idmac_enable_watermark(struct ipuv3_channel *channel, bool enable);
165int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts);
103int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms); 166int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms);
104 167
105void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel, 168void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
106 bool doublebuffer); 169 bool doublebuffer);
107int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel); 170int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel);
171bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num);
108void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num); 172void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num);
173void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num);
109 174
110/* 175/*
111 * IPU Channel Parameter Memory (cpmem) functions 176 * IPU Channel Parameter Memory (cpmem) functions
@@ -121,7 +186,8 @@ struct ipu_rgb {
121struct ipu_image { 186struct ipu_image {
122 struct v4l2_pix_format pix; 187 struct v4l2_pix_format pix;
123 struct v4l2_rect rect; 188 struct v4l2_rect rect;
124 dma_addr_t phys; 189 dma_addr_t phys0;
190 dma_addr_t phys1;
125}; 191};
126 192
127void ipu_cpmem_zero(struct ipuv3_channel *ch); 193void ipu_cpmem_zero(struct ipuv3_channel *ch);
@@ -130,7 +196,11 @@ void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride);
130void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch); 196void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch);
131void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf); 197void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf);
132void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride); 198void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride);
199void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id);
133void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize); 200void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize);
201void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch);
202void ipu_cpmem_set_rotation(struct ipuv3_channel *ch,
203 enum ipu_rotate_mode rot);
134int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch, 204int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
135 const struct ipu_rgb *rgb); 205 const struct ipu_rgb *rgb);
136int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width); 206int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width);
@@ -142,6 +212,7 @@ void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
142 u32 pixel_format, int stride, int height); 212 u32 pixel_format, int stride, int height);
143int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc); 213int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc);
144int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image); 214int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image);
215void ipu_cpmem_dump(struct ipuv3_channel *ch);
145 216
146/* 217/*
147 * IPU Display Controller (dc) functions 218 * IPU Display Controller (dc) functions
@@ -205,19 +276,78 @@ int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable, u8 alpha,
205/* 276/*
206 * IPU CMOS Sensor Interface (csi) functions 277 * IPU CMOS Sensor Interface (csi) functions
207 */ 278 */
208int ipu_csi_enable(struct ipu_soc *ipu, int csi); 279struct ipu_csi;
209int ipu_csi_disable(struct ipu_soc *ipu, int csi); 280int ipu_csi_init_interface(struct ipu_csi *csi,
281 struct v4l2_mbus_config *mbus_cfg,
282 struct v4l2_mbus_framefmt *mbus_fmt);
283bool ipu_csi_is_interlaced(struct ipu_csi *csi);
284void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w);
285void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w);
286void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
287 u32 r_value, u32 g_value, u32 b_value,
288 u32 pix_clk);
289int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
290 struct v4l2_mbus_framefmt *mbus_fmt);
291int ipu_csi_set_skip_smfc(struct ipu_csi *csi, u32 skip,
292 u32 max_ratio, u32 id);
293int ipu_csi_set_dest(struct ipu_csi *csi, enum ipu_csi_dest csi_dest);
294int ipu_csi_enable(struct ipu_csi *csi);
295int ipu_csi_disable(struct ipu_csi *csi);
296struct ipu_csi *ipu_csi_get(struct ipu_soc *ipu, int id);
297void ipu_csi_put(struct ipu_csi *csi);
298void ipu_csi_dump(struct ipu_csi *csi);
299
300/*
301 * IPU Image Converter (ic) functions
302 */
303enum ipu_ic_task {
304 IC_TASK_ENCODER,
305 IC_TASK_VIEWFINDER,
306 IC_TASK_POST_PROCESSOR,
307 IC_NUM_TASKS,
308};
309
310struct ipu_ic;
311int ipu_ic_task_init(struct ipu_ic *ic,
312 int in_width, int in_height,
313 int out_width, int out_height,
314 enum ipu_color_space in_cs,
315 enum ipu_color_space out_cs);
316int ipu_ic_task_graphics_init(struct ipu_ic *ic,
317 enum ipu_color_space in_g_cs,
318 bool galpha_en, u32 galpha,
319 bool colorkey_en, u32 colorkey);
320void ipu_ic_task_enable(struct ipu_ic *ic);
321void ipu_ic_task_disable(struct ipu_ic *ic);
322int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
323 u32 width, u32 height, int burst_size,
324 enum ipu_rotate_mode rot);
325int ipu_ic_enable(struct ipu_ic *ic);
326int ipu_ic_disable(struct ipu_ic *ic);
327struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task);
328void ipu_ic_put(struct ipu_ic *ic);
329void ipu_ic_dump(struct ipu_ic *ic);
210 330
211/* 331/*
212 * IPU Sensor Multiple FIFO Controller (SMFC) functions 332 * IPU Sensor Multiple FIFO Controller (SMFC) functions
213 */ 333 */
214int ipu_smfc_enable(struct ipu_soc *ipu); 334struct ipu_smfc *ipu_smfc_get(struct ipu_soc *ipu, unsigned int chno);
215int ipu_smfc_disable(struct ipu_soc *ipu); 335void ipu_smfc_put(struct ipu_smfc *smfc);
216int ipu_smfc_map_channel(struct ipu_soc *ipu, int channel, int csi_id, int mipi_id); 336int ipu_smfc_enable(struct ipu_smfc *smfc);
217int ipu_smfc_set_burstsize(struct ipu_soc *ipu, int channel, int burstsize); 337int ipu_smfc_disable(struct ipu_smfc *smfc);
338int ipu_smfc_map_channel(struct ipu_smfc *smfc, int csi_id, int mipi_id);
339int ipu_smfc_set_burstsize(struct ipu_smfc *smfc, int burstsize);
340int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level);
218 341
219enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc); 342enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc);
220enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat); 343enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat);
344enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code);
345int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat);
346bool ipu_pixelformat_is_planar(u32 pixelformat);
347int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
348 bool hflip, bool vflip);
349int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
350 bool hflip, bool vflip);
221 351
222struct ipu_client_platformdata { 352struct ipu_client_platformdata {
223 int csi; 353 int csi;