aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2013-07-07 23:40:34 -0400
committerBen Skeggs <bskeggs@redhat.com>2013-07-07 23:40:34 -0400
commit06d5a24f08831e167fae42a64ef2083a89f8e617 (patch)
tree509ef5096fcf1b32029b912afba9a2c1ce9067d9
parentd2989b534ef6834ebf2425aecc040b894b567c91 (diff)
parent7c6ca3040e9ac174e6d2189811da603e9c19a150 (diff)
Merge remote-tracking branch 'airlied/drm-next' into drm-nouveau-next
-rw-r--r--Documentation/DocBook/device-drivers.tmpl2
-rw-r--r--Documentation/DocBook/drm.tmpl271
-rw-r--r--Documentation/devicetree/bindings/drm/tilcdc/tilcdc.txt8
-rw-r--r--Documentation/devicetree/bindings/video/display-timing.txt1
-rw-r--r--Documentation/devicetree/bindings/video/exynos_hdmi.txt7
-rw-r--r--Documentation/devicetree/bindings/video/exynos_hdmiddc.txt7
-rw-r--r--Documentation/devicetree/bindings/video/exynos_hdmiphy.txt7
-rw-r--r--Documentation/devicetree/bindings/video/exynos_mixer.txt9
-rw-r--r--Documentation/fb/uvesafb.txt16
-rw-r--r--Documentation/ww-mutex-design.txt344
-rw-r--r--MAINTAINERS8
-rw-r--r--arch/arm/boot/dts/cros5250-common.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts4
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi4
-rw-r--r--arch/ia64/include/asm/mutex.h10
-rw-r--r--arch/powerpc/include/asm/mutex.h10
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c24
-rw-r--r--arch/s390/include/asm/dma-mapping.h3
-rw-r--r--arch/s390/kernel/ipl.c8
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/mm/mem_detect.c3
-rw-r--r--arch/sh/include/asm/mutex-llsc.h4
-rw-r--r--arch/x86/include/asm/io.h7
-rw-r--r--arch/x86/include/asm/mtrr.h10
-rw-r--r--arch/x86/include/asm/mutex_32.h11
-rw-r--r--arch/x86/include/asm/mutex_64.h11
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c71
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/reservation.c39
-rw-r--r--drivers/char/agp/ati-agp.c4
-rw-r--r--drivers/char/agp/frontend.c8
-rw-r--r--drivers/char/agp/nvidia-agp.c6
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h20
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c5
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c31
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h21
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c33
-rw-r--r--drivers/gpu/drm/drm_bufs.c26
-rw-r--r--drivers/gpu/drm/drm_crtc.c191
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c45
-rw-r--r--drivers/gpu/drm/drm_drv.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c115
-rw-r--r--drivers/gpu/drm/drm_edid_load.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c43
-rw-r--r--drivers/gpu/drm/drm_fops.c25
-rw-r--r--drivers/gpu/drm/drm_gem.c113
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c412
-rw-r--r--drivers/gpu/drm/drm_ioctl.c15
-rw-r--r--drivers/gpu/drm/drm_mm.c45
-rw-r--r--drivers/gpu/drm/drm_modes.c14
-rw-r--r--drivers/gpu/drm/drm_pci.c8
-rw-r--r--drivers/gpu/drm/drm_prime.c179
-rw-r--r--drivers/gpu/drm/drm_rect.c295
-rw-r--r--drivers/gpu/drm/drm_stub.c14
-rw-r--r--drivers/gpu/drm/drm_sysfs.c33
-rw-r--r--drivers/gpu/drm/drm_trace.h6
-rw-r--r--drivers/gpu/drm/drm_vm.c25
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c51
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c129
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c146
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c102
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c63
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c202
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c53
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c50
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c131
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c122
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h7
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c473
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c90
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c116
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h359
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c308
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c118
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c114
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c31
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1034
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h708
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c10
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c74
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c90
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c100
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c35
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c754
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3130
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c670
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h186
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c31
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c28
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c158
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c297
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c110
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c17
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c296
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1387
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c236
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h31
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c168
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c177
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c229
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c11
-rw-r--r--drivers/gpu/drm/mgag200/Makefile2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c275
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h44
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c25
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c72
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c32
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c13
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c35
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c252
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c138
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h18
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c16
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c24
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h5
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h40
-rw-r--r--drivers/gpu/drm/radeon/atombios.h547
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c88
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c51
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c2737
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.h57
-rw-r--r--drivers/gpu/drm/radeon/btcd.h181
-rw-r--r--drivers/gpu/drm/radeon/cik.c6987
-rw-r--r--drivers/gpu/drm/radeon/cik_blit_shaders.c246
-rw-r--r--drivers/gpu/drm/radeon/cik_blit_shaders.h32
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h147
-rw-r--r--drivers/gpu/drm/radeon/cikd.h1297
-rw-r--r--drivers/gpu/drm/radeon/clearstate_cayman.h1081
-rw-r--r--drivers/gpu/drm/radeon/clearstate_defs.h44
-rw-r--r--drivers/gpu/drm/radeon/clearstate_evergreen.h1080
-rw-r--r--drivers/gpu/drm/radeon/clearstate_si.h941
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2176
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.h160
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c642
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c11
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h12
-rw-r--r--drivers/gpu/drm/radeon/evergreen_smc.h67
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h389
-rw-r--r--drivers/gpu/drm/radeon/ni.c198
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c4332
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.h248
-rw-r--r--drivers/gpu/drm/radeon/nid.h565
-rw-r--r--drivers/gpu/drm/radeon/nislands_smc.h329
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h113
-rw-r--r--drivers/gpu/drm/radeon/r600.c147
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c1024
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h226
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c11
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h6
-rw-r--r--drivers/gpu/drm/radeon/r600d.h232
-rw-r--r--drivers/gpu/drm/radeon/radeon.h522
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c145
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c706
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h190
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c881
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c106
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h93
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h30
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c647
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c75
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h129
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c59
-rw-r--r--drivers/gpu/drm/radeon/rs690.c291
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c963
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.h109
-rw-r--r--drivers/gpu/drm/radeon/rs780d.h168
-rw-r--r--drivers/gpu/drm/radeon/rv515.c224
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c2085
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.h95
-rw-r--r--drivers/gpu/drm/radeon/rv6xxd.h246
-rw-r--r--drivers/gpu/drm/radeon/rv730_dpm.c508
-rw-r--r--drivers/gpu/drm/radeon/rv730d.h165
-rw-r--r--drivers/gpu/drm/radeon/rv740_dpm.c416
-rw-r--r--drivers/gpu/drm/radeon/rv740d.h117
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c2493
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.h288
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.c621
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.h209
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h283
-rw-r--r--drivers/gpu/drm/radeon/si.c1337
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6407
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.h227
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c284
-rw-r--r--drivers/gpu/drm/radeon/sid.h603
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h397
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c1832
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.h220
-rw-r--r--drivers/gpu/drm/radeon/sumo_smc.c222
-rw-r--r--drivers/gpu/drm/radeon/sumod.h372
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c1917
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.h131
-rw-r--r--drivers/gpu/drm/radeon/trinity_smc.c115
-rw-r--r--drivers/gpu/drm/radeon/trinityd.h228
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig9
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c595
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h50
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c325
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h66
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c245
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h59
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvds.c216
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvds.h24
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c507
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h67
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h445
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vga.c149
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vga.h24
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c43
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h5
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c35
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c9
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c122
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c37
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h25
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_regs.h1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c51
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c239
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c86
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c15
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c27
-rw-r--r--drivers/gpu/host1x/dev.h8
-rw-r--r--drivers/gpu/host1x/drm/dc.c5
-rw-r--r--drivers/gpu/host1x/drm/drm.c14
-rw-r--r--drivers/gpu/host1x/drm/gr2d.c12
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c2
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c12
-rw-r--r--drivers/gpu/host1x/job.c135
-rw-r--r--drivers/gpu/host1x/syncpt.c26
-rw-r--r--drivers/gpu/host1x/syncpt.h13
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/Kconfig1
-rw-r--r--drivers/input/serio/Kconfig1
-rw-r--r--drivers/input/tablet/wacom_wac.c2
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c28
-rw-r--r--drivers/input/touchscreen/cyttsp_core.h2
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-s3c64xx.c2
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c5
-rw-r--r--drivers/video/console/vgacon.c17
-rw-r--r--drivers/video/of_display_timing.c7
-rw-r--r--drivers/video/uvesafb.c70
-rw-r--r--fs/fuse/file.c12
-rw-r--r--fs/splice.c1
-rw-r--r--include/asm-generic/mutex-dec.h10
-rw-r--r--include/asm-generic/mutex-null.h2
-rw-r--r--include/asm-generic/mutex-xchg.h10
-rw-r--r--include/drm/drmP.h38
-rw-r--r--include/drm/drm_crtc.h37
-rw-r--r--include/drm/drm_fixed.h94
-rw-r--r--include/drm/drm_gem_cma_helper.h9
-rw-r--r--include/drm/drm_mm.h38
-rw-r--r--include/drm/drm_os_linux.h16
-rw-r--r--include/drm/drm_pciids.h24
-rw-r--r--include/drm/drm_rect.h167
-rw-r--r--include/drm/i915_powerwell.h36
-rw-r--r--include/drm/ttm/ttm_bo_api.h37
-rw-r--r--include/drm/ttm/ttm_bo_driver.h169
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h12
-rw-r--r--include/linux/io.h25
-rw-r--r--include/linux/mutex-debug.h1
-rw-r--r--include/linux/mutex.h363
-rw-r--r--include/linux/platform_data/rcar-du.h54
-rw-r--r--include/linux/reservation.h62
-rw-r--r--include/uapi/drm/drm.h1
-rw-r--r--include/uapi/drm/drm_mode.h13
-rw-r--r--include/uapi/drm/i915_drm.h3
-rw-r--r--include/uapi/drm/tegra_drm.h2
-rw-r--r--include/video/display_timing.h1
-rw-r--r--include/video/uvesafb.h1
-rw-r--r--kernel/mutex.c384
-rw-r--r--lib/Kconfig.debug13
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/locking-selftest.c720
-rw-r--r--sound/pci/hda/Kconfig10
-rw-r--r--sound/pci/hda/Makefile2
-rw-r--r--sound/pci/hda/hda_i915.c75
-rw-r--r--sound/pci/hda/hda_i915.h35
-rw-r--r--sound/pci/hda/hda_intel.c87
339 files changed, 69936 insertions, 7106 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index c36892c072da..f0648a8b09b6 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -126,6 +126,8 @@ X!Edrivers/base/interface.c
126 </sect1> 126 </sect1>
127 <sect1><title>Device Drivers DMA Management</title> 127 <sect1><title>Device Drivers DMA Management</title>
128!Edrivers/base/dma-buf.c 128!Edrivers/base/dma-buf.c
129!Edrivers/base/reservation.c
130!Iinclude/linux/reservation.h
129!Edrivers/base/dma-coherent.c 131!Edrivers/base/dma-coherent.c
130!Edrivers/base/dma-mapping.c 132!Edrivers/base/dma-mapping.c
131 </sect1> 133 </sect1>
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index f9df3b872c16..4d54ac8b2032 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -186,11 +186,12 @@
186 <varlistentry> 186 <varlistentry>
187 <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term> 187 <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
188 <listitem><para> 188 <listitem><para>
189 DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler. The 189 DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler
190 DRM core will automatically register an interrupt handler when the 190 managed by the DRM Core. The core will support simple IRQ handler
191 flag is set. DRIVER_IRQ_SHARED indicates whether the device &amp; 191 installation when the flag is set. The installation process is
192 handler support shared IRQs (note that this is required of PCI 192 described in <xref linkend="drm-irq-registration"/>.</para>
193 drivers). 193 <para>DRIVER_IRQ_SHARED indicates whether the device &amp; handler
194 support shared IRQs (note that this is required of PCI drivers).
194 </para></listitem> 195 </para></listitem>
195 </varlistentry> 196 </varlistentry>
196 <varlistentry> 197 <varlistentry>
@@ -344,50 +345,71 @@ char *date;</synopsis>
344 The DRM core tries to facilitate IRQ handler registration and 345 The DRM core tries to facilitate IRQ handler registration and
345 unregistration by providing <function>drm_irq_install</function> and 346 unregistration by providing <function>drm_irq_install</function> and
346 <function>drm_irq_uninstall</function> functions. Those functions only 347 <function>drm_irq_uninstall</function> functions. Those functions only
347 support a single interrupt per device. 348 support a single interrupt per device, devices that use more than one
348 </para> 349 IRQs need to be handled manually.
349 <!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
350 <para>
351 Both functions get the device IRQ by calling
352 <function>drm_dev_to_irq</function>. This inline function will call a
353 bus-specific operation to retrieve the IRQ number. For platform devices,
354 <function>platform_get_irq</function>(..., 0) is used to retrieve the
355 IRQ number.
356 </para>
357 <para>
358 <function>drm_irq_install</function> starts by calling the
359 <methodname>irq_preinstall</methodname> driver operation. The operation
360 is optional and must make sure that the interrupt will not get fired by
361 clearing all pending interrupt flags or disabling the interrupt.
362 </para>
363 <para>
364 The IRQ will then be requested by a call to
365 <function>request_irq</function>. If the DRIVER_IRQ_SHARED driver
366 feature flag is set, a shared (IRQF_SHARED) IRQ handler will be
367 requested.
368 </para>
369 <para>
370 The IRQ handler function must be provided as the mandatory irq_handler
371 driver operation. It will get passed directly to
372 <function>request_irq</function> and thus has the same prototype as all
373 IRQ handlers. It will get called with a pointer to the DRM device as the
374 second argument.
375 </para>
376 <para>
377 Finally the function calls the optional
378 <methodname>irq_postinstall</methodname> driver operation. The operation
379 usually enables interrupts (excluding the vblank interrupt, which is
380 enabled separately), but drivers may choose to enable/disable interrupts
381 at a different time.
382 </para>
383 <para>
384 <function>drm_irq_uninstall</function> is similarly used to uninstall an
385 IRQ handler. It starts by waking up all processes waiting on a vblank
386 interrupt to make sure they don't hang, and then calls the optional
387 <methodname>irq_uninstall</methodname> driver operation. The operation
388 must disable all hardware interrupts. Finally the function frees the IRQ
389 by calling <function>free_irq</function>.
390 </para> 350 </para>
351 <sect4>
352 <title>Managed IRQ Registration</title>
353 <para>
354 Both the <function>drm_irq_install</function> and
355 <function>drm_irq_uninstall</function> functions get the device IRQ by
356 calling <function>drm_dev_to_irq</function>. This inline function will
357 call a bus-specific operation to retrieve the IRQ number. For platform
358 devices, <function>platform_get_irq</function>(..., 0) is used to
359 retrieve the IRQ number.
360 </para>
361 <para>
362 <function>drm_irq_install</function> starts by calling the
363 <methodname>irq_preinstall</methodname> driver operation. The operation
364 is optional and must make sure that the interrupt will not get fired by
365 clearing all pending interrupt flags or disabling the interrupt.
366 </para>
367 <para>
368 The IRQ will then be requested by a call to
369 <function>request_irq</function>. If the DRIVER_IRQ_SHARED driver
370 feature flag is set, a shared (IRQF_SHARED) IRQ handler will be
371 requested.
372 </para>
373 <para>
374 The IRQ handler function must be provided as the mandatory irq_handler
375 driver operation. It will get passed directly to
376 <function>request_irq</function> and thus has the same prototype as all
377 IRQ handlers. It will get called with a pointer to the DRM device as the
378 second argument.
379 </para>
380 <para>
381 Finally the function calls the optional
382 <methodname>irq_postinstall</methodname> driver operation. The operation
383 usually enables interrupts (excluding the vblank interrupt, which is
384 enabled separately), but drivers may choose to enable/disable interrupts
385 at a different time.
386 </para>
387 <para>
388 <function>drm_irq_uninstall</function> is similarly used to uninstall an
389 IRQ handler. It starts by waking up all processes waiting on a vblank
390 interrupt to make sure they don't hang, and then calls the optional
391 <methodname>irq_uninstall</methodname> driver operation. The operation
392 must disable all hardware interrupts. Finally the function frees the IRQ
393 by calling <function>free_irq</function>.
394 </para>
395 </sect4>
396 <sect4>
397 <title>Manual IRQ Registration</title>
398 <para>
399 Drivers that require multiple interrupt handlers can't use the managed
400 IRQ registration functions. In that case IRQs must be registered and
401 unregistered manually (usually with the <function>request_irq</function>
402 and <function>free_irq</function> functions, or their devm_* equivalent).
403 </para>
404 <para>
405 When manually registering IRQs, drivers must not set the DRIVER_HAVE_IRQ
406 driver feature flag, and must not provide the
407 <methodname>irq_handler</methodname> driver operation. They must set the
408 <structname>drm_device</structname> <structfield>irq_enabled</structfield>
409 field to 1 upon registration of the IRQs, and clear it to 0 after
410 unregistering the IRQs.
411 </para>
412 </sect4>
391 </sect3> 413 </sect3>
392 <sect3> 414 <sect3>
393 <title>Memory Manager Initialization</title> 415 <title>Memory Manager Initialization</title>
@@ -1214,6 +1236,15 @@ int max_width, max_height;</synopsis>
1214 <title>Miscellaneous</title> 1236 <title>Miscellaneous</title>
1215 <itemizedlist> 1237 <itemizedlist>
1216 <listitem> 1238 <listitem>
1239 <synopsis>void (*set_property)(struct drm_crtc *crtc,
1240 struct drm_property *property, uint64_t value);</synopsis>
1241 <para>
1242 Set the value of the given CRTC property to
1243 <parameter>value</parameter>. See <xref linkend="drm-kms-properties"/>
1244 for more information about properties.
1245 </para>
1246 </listitem>
1247 <listitem>
1217 <synopsis>void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 1248 <synopsis>void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
1218 uint32_t start, uint32_t size);</synopsis> 1249 uint32_t start, uint32_t size);</synopsis>
1219 <para> 1250 <para>
@@ -1363,6 +1394,15 @@ int max_width, max_height;</synopsis>
1363 <xref linkend="drm-kms-init"/>. 1394 <xref linkend="drm-kms-init"/>.
1364 </para> 1395 </para>
1365 </listitem> 1396 </listitem>
1397 <listitem>
1398 <synopsis>void (*set_property)(struct drm_plane *plane,
1399 struct drm_property *property, uint64_t value);</synopsis>
1400 <para>
1401 Set the value of the given plane property to
1402 <parameter>value</parameter>. See <xref linkend="drm-kms-properties"/>
1403 for more information about properties.
1404 </para>
1405 </listitem>
1366 </itemizedlist> 1406 </itemizedlist>
1367 </sect3> 1407 </sect3>
1368 </sect2> 1408 </sect2>
@@ -1572,6 +1612,15 @@ int max_width, max_height;</synopsis>
1572 <title>Miscellaneous</title> 1612 <title>Miscellaneous</title>
1573 <itemizedlist> 1613 <itemizedlist>
1574 <listitem> 1614 <listitem>
1615 <synopsis>void (*set_property)(struct drm_connector *connector,
1616 struct drm_property *property, uint64_t value);</synopsis>
1617 <para>
1618 Set the value of the given connector property to
1619 <parameter>value</parameter>. See <xref linkend="drm-kms-properties"/>
1620 for more information about properties.
1621 </para>
1622 </listitem>
1623 <listitem>
1575 <synopsis>void (*destroy)(struct drm_connector *connector);</synopsis> 1624 <synopsis>void (*destroy)(struct drm_connector *connector);</synopsis>
1576 <para> 1625 <para>
1577 Destroy the connector when not needed anymore. See 1626 Destroy the connector when not needed anymore. See
@@ -1846,10 +1895,6 @@ void intel_crt_init(struct drm_device *dev)
1846 <synopsis>bool (*mode_fixup)(struct drm_encoder *encoder, 1895 <synopsis>bool (*mode_fixup)(struct drm_encoder *encoder,
1847 const struct drm_display_mode *mode, 1896 const struct drm_display_mode *mode,
1848 struct drm_display_mode *adjusted_mode);</synopsis> 1897 struct drm_display_mode *adjusted_mode);</synopsis>
1849 <note><para>
1850 FIXME: The mode argument be const, but the i915 driver modifies
1851 mode-&gt;clock in <function>intel_dp_mode_fixup</function>.
1852 </para></note>
1853 <para> 1898 <para>
1854 Let encoders adjust the requested mode or reject it completely. This 1899 Let encoders adjust the requested mode or reject it completely. This
1855 operation returns true if the mode is accepted (possibly after being 1900 operation returns true if the mode is accepted (possibly after being
@@ -2161,6 +2206,128 @@ void intel_crt_init(struct drm_device *dev)
2161 <title>EDID Helper Functions Reference</title> 2206 <title>EDID Helper Functions Reference</title>
2162!Edrivers/gpu/drm/drm_edid.c 2207!Edrivers/gpu/drm/drm_edid.c
2163 </sect2> 2208 </sect2>
2209 <sect2>
2210 <title>Rectangle Utilities Reference</title>
2211!Pinclude/drm/drm_rect.h rect utils
2212!Iinclude/drm/drm_rect.h
2213!Edrivers/gpu/drm/drm_rect.c
2214 </sect2>
2215 </sect1>
2216
2217 <!-- Internals: kms properties -->
2218
2219 <sect1 id="drm-kms-properties">
2220 <title>KMS Properties</title>
2221 <para>
2222 Drivers may need to expose additional parameters to applications than
2223 those described in the previous sections. KMS supports attaching
2224 properties to CRTCs, connectors and planes and offers a userspace API to
2225 list, get and set the property values.
2226 </para>
2227 <para>
2228 Properties are identified by a name that uniquely defines the property
2229 purpose, and store an associated value. For all property types except blob
2230 properties the value is a 64-bit unsigned integer.
2231 </para>
2232 <para>
2233 KMS differentiates between properties and property instances. Drivers
2234 first create properties and then create and associate individual instances
2235 of those properties to objects. A property can be instantiated multiple
2236 times and associated with different objects. Values are stored in property
2237 instances, and all other property information are stored in the propery
2238 and shared between all instances of the property.
2239 </para>
2240 <para>
2241 Every property is created with a type that influences how the KMS core
2242 handles the property. Supported property types are
2243 <variablelist>
2244 <varlistentry>
2245 <term>DRM_MODE_PROP_RANGE</term>
2246 <listitem><para>Range properties report their minimum and maximum
2247 admissible values. The KMS core verifies that values set by
2248 application fit in that range.</para></listitem>
2249 </varlistentry>
2250 <varlistentry>
2251 <term>DRM_MODE_PROP_ENUM</term>
2252 <listitem><para>Enumerated properties take a numerical value that
2253 ranges from 0 to the number of enumerated values defined by the
2254 property minus one, and associate a free-formed string name to each
2255 value. Applications can retrieve the list of defined value-name pairs
2256 and use the numerical value to get and set property instance values.
2257 </para></listitem>
2258 </varlistentry>
2259 <varlistentry>
2260 <term>DRM_MODE_PROP_BITMASK</term>
2261 <listitem><para>Bitmask properties are enumeration properties that
2262 additionally restrict all enumerated values to the 0..63 range.
2263 Bitmask property instance values combine one or more of the
2264 enumerated bits defined by the property.</para></listitem>
2265 </varlistentry>
2266 <varlistentry>
2267 <term>DRM_MODE_PROP_BLOB</term>
2268 <listitem><para>Blob properties store a binary blob without any format
2269 restriction. The binary blobs are created as KMS standalone objects,
2270 and blob property instance values store the ID of their associated
2271 blob object.</para>
2272 <para>Blob properties are only used for the connector EDID property
2273 and cannot be created by drivers.</para></listitem>
2274 </varlistentry>
2275 </variablelist>
2276 </para>
2277 <para>
2278 To create a property drivers call one of the following functions depending
2279 on the property type. All property creation functions take property flags
2280 and name, as well as type-specific arguments.
2281 <itemizedlist>
2282 <listitem>
2283 <synopsis>struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
2284 const char *name,
2285 uint64_t min, uint64_t max);</synopsis>
2286 <para>Create a range property with the given minimum and maximum
2287 values.</para>
2288 </listitem>
2289 <listitem>
2290 <synopsis>struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
2291 const char *name,
2292 const struct drm_prop_enum_list *props,
2293 int num_values);</synopsis>
2294 <para>Create an enumerated property. The <parameter>props</parameter>
2295 argument points to an array of <parameter>num_values</parameter>
2296 value-name pairs.</para>
2297 </listitem>
2298 <listitem>
2299 <synopsis>struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
2300 int flags, const char *name,
2301 const struct drm_prop_enum_list *props,
2302 int num_values);</synopsis>
2303 <para>Create a bitmask property. The <parameter>props</parameter>
2304 argument points to an array of <parameter>num_values</parameter>
2305 value-name pairs.</para>
2306 </listitem>
2307 </itemizedlist>
2308 </para>
2309 <para>
2310 Properties can additionally be created as immutable, in which case they
2311 will be read-only for applications but can be modified by the driver. To
2312 create an immutable property drivers must set the DRM_MODE_PROP_IMMUTABLE
2313 flag at property creation time.
2314 </para>
2315 <para>
2316 When no array of value-name pairs is readily available at property
2317 creation time for enumerated or range properties, drivers can create
2318 the property using the <function>drm_property_create</function> function
2319 and manually add enumeration value-name pairs by calling the
2320 <function>drm_property_add_enum</function> function. Care must be taken to
2321 properly specify the property type through the <parameter>flags</parameter>
2322 argument.
2323 </para>
2324 <para>
2325 After creating properties drivers can attach property instances to CRTC,
2326 connector and plane objects by calling the
2327 <function>drm_object_attach_property</function>. The function takes a
2328 pointer to the target object, a pointer to the previously created property
2329 and an initial instance value.
2330 </para>
2164 </sect1> 2331 </sect1>
2165 2332
2166 <!-- Internals: vertical blanking --> 2333 <!-- Internals: vertical blanking -->
diff --git a/Documentation/devicetree/bindings/drm/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/drm/tilcdc/tilcdc.txt
index e5f130159ae1..fff10da5e927 100644
--- a/Documentation/devicetree/bindings/drm/tilcdc/tilcdc.txt
+++ b/Documentation/devicetree/bindings/drm/tilcdc/tilcdc.txt
@@ -10,6 +10,14 @@ Recommended properties:
10 services interrupts for this device. 10 services interrupts for this device.
11 - ti,hwmods: Name of the hwmod associated to the LCDC 11 - ti,hwmods: Name of the hwmod associated to the LCDC
12 12
13Optional properties:
14 - max-bandwidth: The maximum pixels per second that the memory
15 interface / lcd controller combination can sustain
16 - max-width: The maximum horizontal pixel width supported by
17 the lcd controller.
18 - max-pixelclock: The maximum pixel clock that can be supported
19 by the lcd controller in KHz.
20
13Example: 21Example:
14 22
15 fb: fb@4830e000 { 23 fb: fb@4830e000 {
diff --git a/Documentation/devicetree/bindings/video/display-timing.txt b/Documentation/devicetree/bindings/video/display-timing.txt
index 150038552bc3..e1d4a0b59612 100644
--- a/Documentation/devicetree/bindings/video/display-timing.txt
+++ b/Documentation/devicetree/bindings/video/display-timing.txt
@@ -34,6 +34,7 @@ optional properties:
34 - ignored = ignored 34 - ignored = ignored
35 - interlaced (bool): boolean to enable interlaced mode 35 - interlaced (bool): boolean to enable interlaced mode
36 - doublescan (bool): boolean to enable doublescan mode 36 - doublescan (bool): boolean to enable doublescan mode
37 - doubleclk (bool): boolean to enable doubleclock mode
37 38
38All the optional properties that are not bool follow the following logic: 39All the optional properties that are not bool follow the following logic:
39 <1>: high active 40 <1>: high active
diff --git a/Documentation/devicetree/bindings/video/exynos_hdmi.txt b/Documentation/devicetree/bindings/video/exynos_hdmi.txt
index 589edee37394..c71d0f0b750a 100644
--- a/Documentation/devicetree/bindings/video/exynos_hdmi.txt
+++ b/Documentation/devicetree/bindings/video/exynos_hdmi.txt
@@ -1,7 +1,10 @@
1Device-Tree bindings for drm hdmi driver 1Device-Tree bindings for drm hdmi driver
2 2
3Required properties: 3Required properties:
4- compatible: value should be "samsung,exynos5-hdmi". 4- compatible: value should be one among the following:
5 1) "samsung,exynos5-hdmi" <DEPRECATED>
6 2) "samsung,exynos4210-hdmi"
7 3) "samsung,exynos4212-hdmi"
5- reg: physical base address of the hdmi and length of memory mapped 8- reg: physical base address of the hdmi and length of memory mapped
6 region. 9 region.
7- interrupts: interrupt number to the cpu. 10- interrupts: interrupt number to the cpu.
@@ -15,7 +18,7 @@ Required properties:
15Example: 18Example:
16 19
17 hdmi { 20 hdmi {
18 compatible = "samsung,exynos5-hdmi"; 21 compatible = "samsung,exynos4212-hdmi";
19 reg = <0x14530000 0x100000>; 22 reg = <0x14530000 0x100000>;
20 interrupts = <0 95 0>; 23 interrupts = <0 95 0>;
21 hpd-gpio = <&gpx3 7 0xf 1 3>; 24 hpd-gpio = <&gpx3 7 0xf 1 3>;
diff --git a/Documentation/devicetree/bindings/video/exynos_hdmiddc.txt b/Documentation/devicetree/bindings/video/exynos_hdmiddc.txt
index fa166d945809..41eee971562b 100644
--- a/Documentation/devicetree/bindings/video/exynos_hdmiddc.txt
+++ b/Documentation/devicetree/bindings/video/exynos_hdmiddc.txt
@@ -1,12 +1,15 @@
1Device-Tree bindings for hdmiddc driver 1Device-Tree bindings for hdmiddc driver
2 2
3Required properties: 3Required properties:
4- compatible: value should be "samsung,exynos5-hdmiddc". 4- compatible: value should be one of the following
5 1) "samsung,exynos5-hdmiddc" <DEPRECATED>
6 2) "samsung,exynos4210-hdmiddc"
7
5- reg: I2C address of the hdmiddc device. 8- reg: I2C address of the hdmiddc device.
6 9
7Example: 10Example:
8 11
9 hdmiddc { 12 hdmiddc {
10 compatible = "samsung,exynos5-hdmiddc"; 13 compatible = "samsung,exynos4210-hdmiddc";
11 reg = <0x50>; 14 reg = <0x50>;
12 }; 15 };
diff --git a/Documentation/devicetree/bindings/video/exynos_hdmiphy.txt b/Documentation/devicetree/bindings/video/exynos_hdmiphy.txt
index 858f4f9b902f..162f641f7639 100644
--- a/Documentation/devicetree/bindings/video/exynos_hdmiphy.txt
+++ b/Documentation/devicetree/bindings/video/exynos_hdmiphy.txt
@@ -1,12 +1,15 @@
1Device-Tree bindings for hdmiphy driver 1Device-Tree bindings for hdmiphy driver
2 2
3Required properties: 3Required properties:
4- compatible: value should be "samsung,exynos5-hdmiphy". 4- compatible: value should be one of the following:
5 1) "samsung,exynos5-hdmiphy" <DEPRECATED>
6 2) "samsung,exynos4210-hdmiphy".
7 3) "samsung,exynos4212-hdmiphy".
5- reg: I2C address of the hdmiphy device. 8- reg: I2C address of the hdmiphy device.
6 9
7Example: 10Example:
8 11
9 hdmiphy { 12 hdmiphy {
10 compatible = "samsung,exynos5-hdmiphy"; 13 compatible = "samsung,exynos4210-hdmiphy";
11 reg = <0x38>; 14 reg = <0x38>;
12 }; 15 };
diff --git a/Documentation/devicetree/bindings/video/exynos_mixer.txt b/Documentation/devicetree/bindings/video/exynos_mixer.txt
index 9b2ea0343566..3334b0a8e343 100644
--- a/Documentation/devicetree/bindings/video/exynos_mixer.txt
+++ b/Documentation/devicetree/bindings/video/exynos_mixer.txt
@@ -1,7 +1,12 @@
1Device-Tree bindings for mixer driver 1Device-Tree bindings for mixer driver
2 2
3Required properties: 3Required properties:
4- compatible: value should be "samsung,exynos5-mixer". 4- compatible: value should be one of the following:
5 1) "samsung,exynos5-mixer" <DEPRECATED>
6 2) "samsung,exynos4210-mixer"
7 3) "samsung,exynos5250-mixer"
8 4) "samsung,exynos5420-mixer"
9
5- reg: physical base address of the mixer and length of memory mapped 10- reg: physical base address of the mixer and length of memory mapped
6 region. 11 region.
7- interrupts: interrupt number to the cpu. 12- interrupts: interrupt number to the cpu.
@@ -9,7 +14,7 @@ Required properties:
9Example: 14Example:
10 15
11 mixer { 16 mixer {
12 compatible = "samsung,exynos5-mixer"; 17 compatible = "samsung,exynos5250-mixer";
13 reg = <0x14450000 0x10000>; 18 reg = <0x14450000 0x10000>;
14 interrupts = <0 94 0>; 19 interrupts = <0 94 0>;
15 }; 20 };
diff --git a/Documentation/fb/uvesafb.txt b/Documentation/fb/uvesafb.txt
index eefdd91d298a..f6362d88763b 100644
--- a/Documentation/fb/uvesafb.txt
+++ b/Documentation/fb/uvesafb.txt
@@ -81,17 +81,11 @@ pmipal Use the protected mode interface for palette changes.
81 81
82mtrr:n Setup memory type range registers for the framebuffer 82mtrr:n Setup memory type range registers for the framebuffer
83 where n: 83 where n:
84 0 - disabled (equivalent to nomtrr) (default) 84 0 - disabled (equivalent to nomtrr)
85 1 - uncachable 85 3 - write-combining (default)
86 2 - write-back 86
87 3 - write-combining 87 Values other than 0 and 3 will result in a warning and will be
88 4 - write-through 88 treated just like 3.
89
90 If you see the following in dmesg, choose the type that matches
91 the old one. In this example, use "mtrr:2".
92...
93mtrr: type mismatch for e0000000,8000000 old: write-back new: write-combining
94...
95 89
96nomtrr Do not use memory type range registers. 90nomtrr Do not use memory type range registers.
97 91
diff --git a/Documentation/ww-mutex-design.txt b/Documentation/ww-mutex-design.txt
new file mode 100644
index 000000000000..8a112dc304c3
--- /dev/null
+++ b/Documentation/ww-mutex-design.txt
@@ -0,0 +1,344 @@
1Wait/Wound Deadlock-Proof Mutex Design
2======================================
3
4Please read mutex-design.txt first, as it applies to wait/wound mutexes too.
5
6Motivation for WW-Mutexes
7-------------------------
8
9GPU's do operations that commonly involve many buffers. Those buffers
10can be shared across contexts/processes, exist in different memory
11domains (for example VRAM vs system memory), and so on. And with
12PRIME / dmabuf, they can even be shared across devices. So there are
13a handful of situations where the driver needs to wait for buffers to
14become ready. If you think about this in terms of waiting on a buffer
15mutex for it to become available, this presents a problem because
16there is no way to guarantee that buffers appear in a execbuf/batch in
17the same order in all contexts. That is directly under control of
18userspace, and a result of the sequence of GL calls that an application
19makes. Which results in the potential for deadlock. The problem gets
20more complex when you consider that the kernel may need to migrate the
21buffer(s) into VRAM before the GPU operates on the buffer(s), which
22may in turn require evicting some other buffers (and you don't want to
23evict other buffers which are already queued up to the GPU), but for a
24simplified understanding of the problem you can ignore this.
25
26The algorithm that the TTM graphics subsystem came up with for dealing with
27this problem is quite simple. For each group of buffers (execbuf) that need
28to be locked, the caller would be assigned a unique reservation id/ticket,
29from a global counter. In case of deadlock while locking all the buffers
30associated with a execbuf, the one with the lowest reservation ticket (i.e.
31the oldest task) wins, and the one with the higher reservation id (i.e. the
32younger task) unlocks all of the buffers that it has already locked, and then
33tries again.
34
35In the RDBMS literature this deadlock handling approach is called wait/wound:
36The older tasks waits until it can acquire the contended lock. The younger tasks
37needs to back off and drop all the locks it is currently holding, i.e. the
38younger task is wounded.
39
40Concepts
41--------
42
43Compared to normal mutexes two additional concepts/objects show up in the lock
44interface for w/w mutexes:
45
46Acquire context: To ensure eventual forward progress it is important the a task
47trying to acquire locks doesn't grab a new reservation id, but keeps the one it
48acquired when starting the lock acquisition. This ticket is stored in the
49acquire context. Furthermore the acquire context keeps track of debugging state
50to catch w/w mutex interface abuse.
51
52W/w class: In contrast to normal mutexes the lock class needs to be explicit for
53w/w mutexes, since it is required to initialize the acquire context.
54
55Furthermore there are three different class of w/w lock acquire functions:
56
57* Normal lock acquisition with a context, using ww_mutex_lock.
58
59* Slowpath lock acquisition on the contending lock, used by the wounded task
60 after having dropped all already acquired locks. These functions have the
61 _slow postfix.
62
63 From a simple semantics point-of-view the _slow functions are not strictly
64 required, since simply calling the normal ww_mutex_lock functions on the
65 contending lock (after having dropped all other already acquired locks) will
66 work correctly. After all if no other ww mutex has been acquired yet there's
67 no deadlock potential and hence the ww_mutex_lock call will block and not
68 prematurely return -EDEADLK. The advantage of the _slow functions is in
69 interface safety:
70 - ww_mutex_lock has a __must_check int return type, whereas ww_mutex_lock_slow
71 has a void return type. Note that since ww mutex code needs loops/retries
72 anyway the __must_check doesn't result in spurious warnings, even though the
73 very first lock operation can never fail.
74 - When full debugging is enabled ww_mutex_lock_slow checks that all acquired
75 ww mutex have been released (preventing deadlocks) and makes sure that we
76 block on the contending lock (preventing spinning through the -EDEADLK
77 slowpath until the contended lock can be acquired).
78
79* Functions to only acquire a single w/w mutex, which results in the exact same
80 semantics as a normal mutex. This is done by calling ww_mutex_lock with a NULL
81 context.
82
83 Again this is not strictly required. But often you only want to acquire a
84 single lock in which case it's pointless to set up an acquire context (and so
85 better to avoid grabbing a deadlock avoidance ticket).
86
87Of course, all the usual variants for handling wake-ups due to signals are also
88provided.
89
90Usage
91-----
92
93Three different ways to acquire locks within the same w/w class. Common
94definitions for methods #1 and #2:
95
96static DEFINE_WW_CLASS(ww_class);
97
98struct obj {
99 struct ww_mutex lock;
100 /* obj data */
101};
102
103struct obj_entry {
104 struct list_head head;
105 struct obj *obj;
106};
107
108Method 1, using a list in execbuf->buffers that's not allowed to be reordered.
109This is useful if a list of required objects is already tracked somewhere.
110Furthermore the lock helper can use propagate the -EALREADY return code back to
111the caller as a signal that an object is twice on the list. This is useful if
112the list is constructed from userspace input and the ABI requires userspace to
113not have duplicate entries (e.g. for a gpu commandbuffer submission ioctl).
114
115int lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
116{
117 struct obj *res_obj = NULL;
118 struct obj_entry *contended_entry = NULL;
119 struct obj_entry *entry;
120
121 ww_acquire_init(ctx, &ww_class);
122
123retry:
124 list_for_each_entry (entry, list, head) {
125 if (entry->obj == res_obj) {
126 res_obj = NULL;
127 continue;
128 }
129 ret = ww_mutex_lock(&entry->obj->lock, ctx);
130 if (ret < 0) {
131 contended_entry = entry;
132 goto err;
133 }
134 }
135
136 ww_acquire_done(ctx);
137 return 0;
138
139err:
140 list_for_each_entry_continue_reverse (entry, list, head)
141 ww_mutex_unlock(&entry->obj->lock);
142
143 if (res_obj)
144 ww_mutex_unlock(&res_obj->lock);
145
146 if (ret == -EDEADLK) {
147 /* we lost out in a seqno race, lock and retry.. */
148 ww_mutex_lock_slow(&contended_entry->obj->lock, ctx);
149 res_obj = contended_entry->obj;
150 goto retry;
151 }
152 ww_acquire_fini(ctx);
153
154 return ret;
155}
156
157Method 2, using a list in execbuf->buffers that can be reordered. Same semantics
158of duplicate entry detection using -EALREADY as method 1 above. But the
159list-reordering allows for a bit more idiomatic code.
160
161int lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
162{
163 struct obj_entry *entry, *entry2;
164
165 ww_acquire_init(ctx, &ww_class);
166
167 list_for_each_entry (entry, list, head) {
168 ret = ww_mutex_lock(&entry->obj->lock, ctx);
169 if (ret < 0) {
170 entry2 = entry;
171
172 list_for_each_entry_continue_reverse (entry2, list, head)
173 ww_mutex_unlock(&entry2->obj->lock);
174
175 if (ret != -EDEADLK) {
176 ww_acquire_fini(ctx);
177 return ret;
178 }
179
180 /* we lost out in a seqno race, lock and retry.. */
181 ww_mutex_lock_slow(&entry->obj->lock, ctx);
182
183 /*
184 * Move buf to head of the list, this will point
185 * buf->next to the first unlocked entry,
186 * restarting the for loop.
187 */
188 list_del(&entry->head);
189 list_add(&entry->head, list);
190 }
191 }
192
193 ww_acquire_done(ctx);
194 return 0;
195}
196
197Unlocking works the same way for both methods #1 and #2:
198
199void unlock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
200{
201 struct obj_entry *entry;
202
203 list_for_each_entry (entry, list, head)
204 ww_mutex_unlock(&entry->obj->lock);
205
206 ww_acquire_fini(ctx);
207}
208
209Method 3 is useful if the list of objects is constructed ad-hoc and not upfront,
210e.g. when adjusting edges in a graph where each node has its own ww_mutex lock,
211and edges can only be changed when holding the locks of all involved nodes. w/w
212mutexes are a natural fit for such a case for two reasons:
213- They can handle lock-acquisition in any order which allows us to start walking
214 a graph from a starting point and then iteratively discovering new edges and
215 locking down the nodes those edges connect to.
216- Due to the -EALREADY return code signalling that a given objects is already
217 held there's no need for additional book-keeping to break cycles in the graph
218 or keep track off which looks are already held (when using more than one node
219 as a starting point).
220
221Note that this approach differs in two important ways from the above methods:
222- Since the list of objects is dynamically constructed (and might very well be
223 different when retrying due to hitting the -EDEADLK wound condition) there's
224 no need to keep any object on a persistent list when it's not locked. We can
225 therefore move the list_head into the object itself.
226- On the other hand the dynamic object list construction also means that the -EALREADY return
227 code can't be propagated.
228
229Note also that methods #1 and #2 and method #3 can be combined, e.g. to first lock a
230list of starting nodes (passed in from userspace) using one of the above
231methods. And then lock any additional objects affected by the operations using
232method #3 below. The backoff/retry procedure will be a bit more involved, since
233when the dynamic locking step hits -EDEADLK we also need to unlock all the
234objects acquired with the fixed list. But the w/w mutex debug checks will catch
235any interface misuse for these cases.
236
237Also, method 3 can't fail the lock acquisition step since it doesn't return
238-EALREADY. Of course this would be different when using the _interruptible
239variants, but that's outside of the scope of these examples here.
240
241struct obj {
242 struct ww_mutex ww_mutex;
243 struct list_head locked_list;
244};
245
246static DEFINE_WW_CLASS(ww_class);
247
248void __unlock_objs(struct list_head *list)
249{
250 struct obj *entry, *temp;
251
252 list_for_each_entry_safe (entry, temp, list, locked_list) {
253 /* need to do that before unlocking, since only the current lock holder is
254 allowed to use object */
255 list_del(&entry->locked_list);
256 ww_mutex_unlock(entry->ww_mutex)
257 }
258}
259
260void lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
261{
262 struct obj *obj;
263
264 ww_acquire_init(ctx, &ww_class);
265
266retry:
267 /* re-init loop start state */
268 loop {
269 /* magic code which walks over a graph and decides which objects
270 * to lock */
271
272 ret = ww_mutex_lock(obj->ww_mutex, ctx);
273 if (ret == -EALREADY) {
274 /* we have that one already, get to the next object */
275 continue;
276 }
277 if (ret == -EDEADLK) {
278 __unlock_objs(list);
279
280 ww_mutex_lock_slow(obj, ctx);
281 list_add(&entry->locked_list, list);
282 goto retry;
283 }
284
285 /* locked a new object, add it to the list */
286 list_add_tail(&entry->locked_list, list);
287 }
288
289 ww_acquire_done(ctx);
290 return 0;
291}
292
293void unlock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
294{
295 __unlock_objs(list);
296 ww_acquire_fini(ctx);
297}
298
299Method 4: Only lock one single objects. In that case deadlock detection and
300prevention is obviously overkill, since with grabbing just one lock you can't
301produce a deadlock within just one class. To simplify this case the w/w mutex
302api can be used with a NULL context.
303
304Implementation Details
305----------------------
306
307Design:
308 ww_mutex currently encapsulates a struct mutex, this means no extra overhead for
309 normal mutex locks, which are far more common. As such there is only a small
310 increase in code size if wait/wound mutexes are not used.
311
312 In general, not much contention is expected. The locks are typically used to
313 serialize access to resources for devices. The only way to make wakeups
314 smarter would be at the cost of adding a field to struct mutex_waiter. This
315 would add overhead to all cases where normal mutexes are used, and
316 ww_mutexes are generally less performance sensitive.
317
318Lockdep:
319 Special care has been taken to warn for as many cases of api abuse
320 as possible. Some common api abuses will be caught with
321 CONFIG_DEBUG_MUTEXES, but CONFIG_PROVE_LOCKING is recommended.
322
323 Some of the errors which will be warned about:
324 - Forgetting to call ww_acquire_fini or ww_acquire_init.
325 - Attempting to lock more mutexes after ww_acquire_done.
326 - Attempting to lock the wrong mutex after -EDEADLK and
327 unlocking all mutexes.
328 - Attempting to lock the right mutex after -EDEADLK,
329 before unlocking all mutexes.
330
331 - Calling ww_mutex_lock_slow before -EDEADLK was returned.
332
333 - Unlocking mutexes with the wrong unlock function.
334 - Calling one of the ww_acquire_* twice on the same context.
335 - Using a different ww_class for the mutex than for the ww_acquire_ctx.
336 - Normal lockdep errors that can result in deadlocks.
337
338 Some of the lockdep errors that can result in deadlocks:
339 - Calling ww_acquire_init to initialize a second ww_acquire_ctx before
340 having called ww_acquire_fini on the first.
341 - 'normal' deadlocks that can occur.
342
343FIXME: Update this section once we have the TASK_DEADLOCK task state flag magic
344implemented.
diff --git a/MAINTAINERS b/MAINTAINERS
index 5be702cc8449..437dd12ab9cf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2697,12 +2697,14 @@ F: include/drm/exynos*
2697F: include/uapi/drm/exynos* 2697F: include/uapi/drm/exynos*
2698 2698
2699DRM DRIVERS FOR NVIDIA TEGRA 2699DRM DRIVERS FOR NVIDIA TEGRA
2700M: Thierry Reding <thierry.reding@avionic-design.de> 2700M: Thierry Reding <thierry.reding@gmail.com>
2701M: Terje Bergström <tbergstrom@nvidia.com>
2701L: dri-devel@lists.freedesktop.org 2702L: dri-devel@lists.freedesktop.org
2702L: linux-tegra@vger.kernel.org 2703L: linux-tegra@vger.kernel.org
2703T: git git://gitorious.org/thierryreding/linux.git 2704T: git git://anongit.freedesktop.org/tegra/linux.git
2704S: Maintained 2705S: Maintained
2705F: drivers/gpu/drm/tegra/ 2706F: drivers/gpu/host1x/
2707F: include/uapi/drm/tegra_drm.h
2706F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt 2708F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
2707 2709
2708DSBR100 USB FM RADIO DRIVER 2710DSBR100 USB FM RADIO DRIVER
diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi
index 3f0239ec1bc5..dc259e8b8a73 100644
--- a/arch/arm/boot/dts/cros5250-common.dtsi
+++ b/arch/arm/boot/dts/cros5250-common.dtsi
@@ -190,7 +190,7 @@
190 samsung,i2c-max-bus-freq = <66000>; 190 samsung,i2c-max-bus-freq = <66000>;
191 191
192 hdmiddc@50 { 192 hdmiddc@50 {
193 compatible = "samsung,exynos5-hdmiddc"; 193 compatible = "samsung,exynos4210-hdmiddc";
194 reg = <0x50>; 194 reg = <0x50>;
195 }; 195 };
196 }; 196 };
@@ -224,7 +224,7 @@
224 samsung,i2c-max-bus-freq = <378000>; 224 samsung,i2c-max-bus-freq = <378000>;
225 225
226 hdmiphy@38 { 226 hdmiphy@38 {
227 compatible = "samsung,exynos5-hdmiphy"; 227 compatible = "samsung,exynos4212-hdmiphy";
228 reg = <0x38>; 228 reg = <0x38>;
229 }; 229 };
230 }; 230 };
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 3e0c792e2767..f320d7cb4174 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -72,7 +72,7 @@
72 samsung,i2c-max-bus-freq = <66000>; 72 samsung,i2c-max-bus-freq = <66000>;
73 73
74 hdmiddc@50 { 74 hdmiddc@50 {
75 compatible = "samsung,exynos5-hdmiddc"; 75 compatible = "samsung,exynos4210-hdmiddc";
76 reg = <0x50>; 76 reg = <0x50>;
77 }; 77 };
78 }; 78 };
@@ -102,7 +102,7 @@
102 samsung,i2c-max-bus-freq = <66000>; 102 samsung,i2c-max-bus-freq = <66000>;
103 103
104 hdmiphy@38 { 104 hdmiphy@38 {
105 compatible = "samsung,exynos5-hdmiphy"; 105 compatible = "samsung,exynos4212-hdmiphy";
106 reg = <0x38>; 106 reg = <0x38>;
107 }; 107 };
108 }; 108 };
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index fc9fb3d526e2..8b815c945949 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -601,7 +601,7 @@
601 }; 601 };
602 602
603 hdmi { 603 hdmi {
604 compatible = "samsung,exynos5-hdmi"; 604 compatible = "samsung,exynos4212-hdmi";
605 reg = <0x14530000 0x70000>; 605 reg = <0x14530000 0x70000>;
606 interrupts = <0 95 0>; 606 interrupts = <0 95 0>;
607 clocks = <&clock 333>, <&clock 136>, <&clock 137>, 607 clocks = <&clock 333>, <&clock 136>, <&clock 137>,
@@ -611,7 +611,7 @@
611 }; 611 };
612 612
613 mixer { 613 mixer {
614 compatible = "samsung,exynos5-mixer"; 614 compatible = "samsung,exynos5250-mixer";
615 reg = <0x14450000 0x10000>; 615 reg = <0x14450000 0x10000>;
616 interrupts = <0 94 0>; 616 interrupts = <0 94 0>;
617 }; 617 };
diff --git a/arch/ia64/include/asm/mutex.h b/arch/ia64/include/asm/mutex.h
index bed73a643a56..f41e66d65e31 100644
--- a/arch/ia64/include/asm/mutex.h
+++ b/arch/ia64/include/asm/mutex.h
@@ -29,17 +29,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
29 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 29 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
30 * from 1 to a 0 value 30 * from 1 to a 0 value
31 * @count: pointer of type atomic_t 31 * @count: pointer of type atomic_t
32 * @fail_fn: function to call if the original value was not 1
33 * 32 *
34 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 33 * Change the count from 1 to a value lower than 1. This function returns 0
35 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 34 * if the fastpath succeeds, or -1 otherwise.
36 * or anything the slow path function returns.
37 */ 35 */
38static inline int 36static inline int
39__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 37__mutex_fastpath_lock_retval(atomic_t *count)
40{ 38{
41 if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) 39 if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
42 return fail_fn(count); 40 return -1;
43 return 0; 41 return 0;
44} 42}
45 43
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h
index 5399f7e18102..127ab23e1f6c 100644
--- a/arch/powerpc/include/asm/mutex.h
+++ b/arch/powerpc/include/asm/mutex.h
@@ -82,17 +82,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
82 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 82 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
83 * from 1 to a 0 value 83 * from 1 to a 0 value
84 * @count: pointer of type atomic_t 84 * @count: pointer of type atomic_t
85 * @fail_fn: function to call if the original value was not 1
86 * 85 *
87 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 86 * Change the count from 1 to a value lower than 1. This function returns 0
88 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 87 * if the fastpath succeeds, or -1 otherwise.
89 * or anything the slow path function returns.
90 */ 88 */
91static inline int 89static inline int
92__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 90__mutex_fastpath_lock_retval(atomic_t *count)
93{ 91{
94 if (unlikely(__mutex_dec_return_lock(count) < 0)) 92 if (unlikely(__mutex_dec_return_lock(count) < 0))
95 return fail_fn(count); 93 return -1;
96 return 0; 94 return 0;
97} 95}
98 96
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 028ac1f71b51..46ac1ddea683 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -97,22 +97,14 @@ static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
97 return indirect_read_config(bus, devfn, offset, len, val); 97 return indirect_read_config(bus, devfn, offset, len, val);
98} 98}
99 99
100static struct pci_ops fsl_indirect_pci_ops = 100#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
101
102static struct pci_ops fsl_indirect_pcie_ops =
101{ 103{
102 .read = fsl_indirect_read_config, 104 .read = fsl_indirect_read_config,
103 .write = indirect_write_config, 105 .write = indirect_write_config,
104}; 106};
105 107
106static void __init fsl_setup_indirect_pci(struct pci_controller* hose,
107 resource_size_t cfg_addr,
108 resource_size_t cfg_data, u32 flags)
109{
110 setup_indirect_pci(hose, cfg_addr, cfg_data, flags);
111 hose->ops = &fsl_indirect_pci_ops;
112}
113
114#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
115
116#define MAX_PHYS_ADDR_BITS 40 108#define MAX_PHYS_ADDR_BITS 40
117static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS; 109static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
118 110
@@ -504,13 +496,15 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
504 if (!hose->private_data) 496 if (!hose->private_data)
505 goto no_bridge; 497 goto no_bridge;
506 498
507 fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, 499 setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
508 PPC_INDIRECT_TYPE_BIG_ENDIAN); 500 PPC_INDIRECT_TYPE_BIG_ENDIAN);
509 501
510 if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) 502 if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
511 hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK; 503 hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
512 504
513 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { 505 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
506 /* use fsl_indirect_read_config for PCIe */
507 hose->ops = &fsl_indirect_pcie_ops;
514 /* For PCIE read HEADER_TYPE to identify controler mode */ 508 /* For PCIE read HEADER_TYPE to identify controler mode */
515 early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type); 509 early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
516 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) 510 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
@@ -814,8 +808,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
814 if (ret) 808 if (ret)
815 goto err0; 809 goto err0;
816 } else { 810 } else {
817 fsl_setup_indirect_pci(hose, rsrc_cfg.start, 811 setup_indirect_pci(hose, rsrc_cfg.start,
818 rsrc_cfg.start + 4, 0); 812 rsrc_cfg.start + 4, 0);
819 } 813 }
820 814
821 printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " 815 printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 886ac7d4937a..2f8c1abeb086 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -50,9 +50,10 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
50{ 50{
51 struct dma_map_ops *dma_ops = get_dma_ops(dev); 51 struct dma_map_ops *dma_ops = get_dma_ops(dev);
52 52
53 debug_dma_mapping_error(dev, dma_addr);
53 if (dma_ops->mapping_error) 54 if (dma_ops->mapping_error)
54 return dma_ops->mapping_error(dev, dma_addr); 55 return dma_ops->mapping_error(dev, dma_addr);
55 return (dma_addr == 0UL); 56 return (dma_addr == DMA_ERROR_CODE);
56} 57}
57 58
58static inline void *dma_alloc_coherent(struct device *dev, size_t size, 59static inline void *dma_alloc_coherent(struct device *dev, size_t size,
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index d8a6a385d048..feb719d3c851 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -754,9 +754,9 @@ static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
754 .write = reipl_fcp_scpdata_write, 754 .write = reipl_fcp_scpdata_write,
755}; 755};
756 756
757DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", 757DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
758 reipl_block_fcp->ipl_info.fcp.wwpn); 758 reipl_block_fcp->ipl_info.fcp.wwpn);
759DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", 759DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
760 reipl_block_fcp->ipl_info.fcp.lun); 760 reipl_block_fcp->ipl_info.fcp.lun);
761DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n", 761DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
762 reipl_block_fcp->ipl_info.fcp.bootprog); 762 reipl_block_fcp->ipl_info.fcp.bootprog);
@@ -1323,9 +1323,9 @@ static struct shutdown_action __refdata reipl_action = {
1323 1323
1324/* FCP dump device attributes */ 1324/* FCP dump device attributes */
1325 1325
1326DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n", 1326DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
1327 dump_block_fcp->ipl_info.fcp.wwpn); 1327 dump_block_fcp->ipl_info.fcp.wwpn);
1328DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n", 1328DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
1329 dump_block_fcp->ipl_info.fcp.lun); 1329 dump_block_fcp->ipl_info.fcp.lun);
1330DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n", 1330DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
1331 dump_block_fcp->ipl_info.fcp.bootprog); 1331 dump_block_fcp->ipl_info.fcp.bootprog);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 408e866ae548..dd3c1994b8bd 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -312,6 +312,7 @@ void measurement_alert_subclass_unregister(void)
312} 312}
313EXPORT_SYMBOL(measurement_alert_subclass_unregister); 313EXPORT_SYMBOL(measurement_alert_subclass_unregister);
314 314
315#ifdef CONFIG_SMP
315void synchronize_irq(unsigned int irq) 316void synchronize_irq(unsigned int irq)
316{ 317{
317 /* 318 /*
@@ -320,6 +321,7 @@ void synchronize_irq(unsigned int irq)
320 */ 321 */
321} 322}
322EXPORT_SYMBOL_GPL(synchronize_irq); 323EXPORT_SYMBOL_GPL(synchronize_irq);
324#endif
323 325
324#ifndef CONFIG_PCI 326#ifndef CONFIG_PCI
325 327
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index 3cbd3b8bf311..cca388253a39 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -123,7 +123,8 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
123 continue; 123 continue;
124 } else if ((addr <= chunk->addr) && 124 } else if ((addr <= chunk->addr) &&
125 (addr + size >= chunk->addr + chunk->size)) { 125 (addr + size >= chunk->addr + chunk->size)) {
126 memset(chunk, 0 , sizeof(*chunk)); 126 memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk));
127 memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk));
127 } else if (addr + size < chunk->addr + chunk->size) { 128 } else if (addr + size < chunk->addr + chunk->size) {
128 chunk->size = chunk->addr + chunk->size - addr - size; 129 chunk->size = chunk->addr + chunk->size - addr - size;
129 chunk->addr = addr + size; 130 chunk->addr = addr + size;
diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h
index 090358a7e1bb..dad29b687bd3 100644
--- a/arch/sh/include/asm/mutex-llsc.h
+++ b/arch/sh/include/asm/mutex-llsc.h
@@ -37,7 +37,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
37} 37}
38 38
39static inline int 39static inline int
40__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 40__mutex_fastpath_lock_retval(atomic_t *count)
41{ 41{
42 int __done, __res; 42 int __done, __res;
43 43
@@ -51,7 +51,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
51 : "t"); 51 : "t");
52 52
53 if (unlikely(!__done || __res != 0)) 53 if (unlikely(!__done || __res != 0))
54 __res = fail_fn(count); 54 __res = -1;
55 55
56 return __res; 56 return __res;
57} 57}
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index d8e8eefbe24c..34f69cb9350a 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -345,4 +345,11 @@ extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
345 345
346#define IO_SPACE_LIMIT 0xffff 346#define IO_SPACE_LIMIT 0xffff
347 347
348#ifdef CONFIG_MTRR
349extern int __must_check arch_phys_wc_add(unsigned long base,
350 unsigned long size);
351extern void arch_phys_wc_del(int handle);
352#define arch_phys_wc_add arch_phys_wc_add
353#endif
354
348#endif /* _ASM_X86_IO_H */ 355#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index e235582f9930..f768f6298419 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -26,7 +26,10 @@
26#include <uapi/asm/mtrr.h> 26#include <uapi/asm/mtrr.h>
27 27
28 28
29/* The following functions are for use by other drivers */ 29/*
30 * The following functions are for use by other drivers that cannot use
31 * arch_phys_wc_add and arch_phys_wc_del.
32 */
30# ifdef CONFIG_MTRR 33# ifdef CONFIG_MTRR
31extern u8 mtrr_type_lookup(u64 addr, u64 end); 34extern u8 mtrr_type_lookup(u64 addr, u64 end);
32extern void mtrr_save_fixed_ranges(void *); 35extern void mtrr_save_fixed_ranges(void *);
@@ -45,6 +48,7 @@ extern void mtrr_aps_init(void);
45extern void mtrr_bp_restore(void); 48extern void mtrr_bp_restore(void);
46extern int mtrr_trim_uncached_memory(unsigned long end_pfn); 49extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
47extern int amd_special_default_mtrr(void); 50extern int amd_special_default_mtrr(void);
51extern int phys_wc_to_mtrr_index(int handle);
48# else 52# else
49static inline u8 mtrr_type_lookup(u64 addr, u64 end) 53static inline u8 mtrr_type_lookup(u64 addr, u64 end)
50{ 54{
@@ -80,6 +84,10 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
80static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) 84static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
81{ 85{
82} 86}
87static inline int phys_wc_to_mtrr_index(int handle)
88{
89 return -1;
90}
83 91
84#define mtrr_ap_init() do {} while (0) 92#define mtrr_ap_init() do {} while (0)
85#define mtrr_bp_init() do {} while (0) 93#define mtrr_bp_init() do {} while (0)
diff --git a/arch/x86/include/asm/mutex_32.h b/arch/x86/include/asm/mutex_32.h
index 03f90c8a5a7c..0208c3c2cbc6 100644
--- a/arch/x86/include/asm/mutex_32.h
+++ b/arch/x86/include/asm/mutex_32.h
@@ -42,17 +42,14 @@ do { \
42 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 42 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
43 * from 1 to a 0 value 43 * from 1 to a 0 value
44 * @count: pointer of type atomic_t 44 * @count: pointer of type atomic_t
45 * @fail_fn: function to call if the original value was not 1
46 * 45 *
47 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it 46 * Change the count from 1 to a value lower than 1. This function returns 0
48 * wasn't 1 originally. This function returns 0 if the fastpath succeeds, 47 * if the fastpath succeeds, or -1 otherwise.
49 * or anything the slow path function returns
50 */ 48 */
51static inline int __mutex_fastpath_lock_retval(atomic_t *count, 49static inline int __mutex_fastpath_lock_retval(atomic_t *count)
52 int (*fail_fn)(atomic_t *))
53{ 50{
54 if (unlikely(atomic_dec_return(count) < 0)) 51 if (unlikely(atomic_dec_return(count) < 0))
55 return fail_fn(count); 52 return -1;
56 else 53 else
57 return 0; 54 return 0;
58} 55}
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index 68a87b0f8e29..2c543fff241b 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -37,17 +37,14 @@ do { \
37 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 37 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
38 * from 1 to a 0 value 38 * from 1 to a 0 value
39 * @count: pointer of type atomic_t 39 * @count: pointer of type atomic_t
40 * @fail_fn: function to call if the original value was not 1
41 * 40 *
42 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 41 * Change the count from 1 to a value lower than 1. This function returns 0
43 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 42 * if the fastpath succeeds, or -1 otherwise.
44 * or anything the slow path function returns
45 */ 43 */
46static inline int __mutex_fastpath_lock_retval(atomic_t *count, 44static inline int __mutex_fastpath_lock_retval(atomic_t *count)
47 int (*fail_fn)(atomic_t *))
48{ 45{
49 if (unlikely(atomic_dec_return(count) < 0)) 46 if (unlikely(atomic_dec_return(count) < 0))
50 return fail_fn(count); 47 return -1;
51 else 48 else
52 return 0; 49 return 0;
53} 50}
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 726bf963c227..3533d4d16f8c 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -51,9 +51,13 @@
51#include <asm/e820.h> 51#include <asm/e820.h>
52#include <asm/mtrr.h> 52#include <asm/mtrr.h>
53#include <asm/msr.h> 53#include <asm/msr.h>
54#include <asm/pat.h>
54 55
55#include "mtrr.h" 56#include "mtrr.h"
56 57
58/* arch_phys_wc_add returns an MTRR register index plus this offset. */
59#define MTRR_TO_PHYS_WC_OFFSET 1000
60
57u32 num_var_ranges; 61u32 num_var_ranges;
58 62
59unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; 63unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
@@ -524,6 +528,73 @@ int mtrr_del(int reg, unsigned long base, unsigned long size)
524} 528}
525EXPORT_SYMBOL(mtrr_del); 529EXPORT_SYMBOL(mtrr_del);
526 530
531/**
532 * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable
533 * @base: Physical base address
534 * @size: Size of region
535 *
536 * If PAT is available, this does nothing. If PAT is unavailable, it
537 * attempts to add a WC MTRR covering size bytes starting at base and
538 * logs an error if this fails.
539 *
540 * Drivers must store the return value to pass to mtrr_del_wc_if_needed,
541 * but drivers should not try to interpret that return value.
542 */
543int arch_phys_wc_add(unsigned long base, unsigned long size)
544{
545 int ret;
546
547 if (pat_enabled)
548 return 0; /* Success! (We don't need to do anything.) */
549
550 ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
551 if (ret < 0) {
552 pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.",
553 (void *)base, (void *)(base + size - 1));
554 return ret;
555 }
556 return ret + MTRR_TO_PHYS_WC_OFFSET;
557}
558EXPORT_SYMBOL(arch_phys_wc_add);
559
560/*
561 * arch_phys_wc_del - undoes arch_phys_wc_add
562 * @handle: Return value from arch_phys_wc_add
563 *
564 * This cleans up after mtrr_add_wc_if_needed.
565 *
566 * The API guarantees that mtrr_del_wc_if_needed(error code) and
567 * mtrr_del_wc_if_needed(0) do nothing.
568 */
569void arch_phys_wc_del(int handle)
570{
571 if (handle >= 1) {
572 WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET);
573 mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0);
574 }
575}
576EXPORT_SYMBOL(arch_phys_wc_del);
577
578/*
579 * phys_wc_to_mtrr_index - translates arch_phys_wc_add's return value
580 * @handle: Return value from arch_phys_wc_add
581 *
582 * This will turn the return value from arch_phys_wc_add into an mtrr
583 * index suitable for debugging.
584 *
585 * Note: There is no legitimate use for this function, except possibly
586 * in printk line. Alas there is an illegitimate use in some ancient
587 * drm ioctls.
588 */
589int phys_wc_to_mtrr_index(int handle)
590{
591 if (handle < MTRR_TO_PHYS_WC_OFFSET)
592 return -1;
593 else
594 return handle - MTRR_TO_PHYS_WC_OFFSET;
595}
596EXPORT_SYMBOL_GPL(phys_wc_to_mtrr_index);
597
527/* 598/*
528 * HACK ALERT! 599 * HACK ALERT!
529 * These should be called implicitly, but we can't yet until all the initcall 600 * These should be called implicitly, but we can't yet until all the initcall
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 4e22ce3ed73d..48029aa477d9 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_CMA) += dma-contiguous.o
10obj-y += power/ 10obj-y += power/
11obj-$(CONFIG_HAS_DMA) += dma-mapping.o 11obj-$(CONFIG_HAS_DMA) += dma-mapping.o
12obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o 12obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
13obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o 13obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o reservation.o
14obj-$(CONFIG_ISA) += isa.o 14obj-$(CONFIG_ISA) += isa.o
15obj-$(CONFIG_FW_LOADER) += firmware_class.o 15obj-$(CONFIG_FW_LOADER) += firmware_class.o
16obj-$(CONFIG_NUMA) += node.o 16obj-$(CONFIG_NUMA) += node.o
diff --git a/drivers/base/reservation.c b/drivers/base/reservation.c
new file mode 100644
index 000000000000..a73fbf3b8e56
--- /dev/null
+++ b/drivers/base/reservation.c
@@ -0,0 +1,39 @@
1/*
2 * Copyright (C) 2012-2013 Canonical Ltd
3 *
4 * Based on bo.c which bears the following copyright notice,
5 * but is dual licensed:
6 *
7 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8 * All Rights Reserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sub license, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial portions
20 * of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 **************************************************************************/
31/*
32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33 */
34
35#include <linux/reservation.h>
36#include <linux/export.h>
37
38DEFINE_WW_CLASS(reservation_ww_class);
39EXPORT_SYMBOL(reservation_ww_class);
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 0628d7b65c71..03c1dc1ab552 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -236,14 +236,14 @@ static int ati_configure(void)
236static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state) 236static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state)
237{ 237{
238 pci_save_state(dev); 238 pci_save_state(dev);
239 pci_set_power_state(dev, 3); 239 pci_set_power_state(dev, PCI_D3hot);
240 240
241 return 0; 241 return 0;
242} 242}
243 243
244static int agp_ati_resume(struct pci_dev *dev) 244static int agp_ati_resume(struct pci_dev *dev)
245{ 245{
246 pci_set_power_state(dev, 0); 246 pci_set_power_state(dev, PCI_D0);
247 pci_restore_state(dev); 247 pci_restore_state(dev);
248 248
249 return ati_configure(); 249 return ati_configure();
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 2e044338753c..1b192395a90c 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -603,7 +603,8 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma)
603 vma->vm_ops = kerninfo.vm_ops; 603 vma->vm_ops = kerninfo.vm_ops;
604 } else if (io_remap_pfn_range(vma, vma->vm_start, 604 } else if (io_remap_pfn_range(vma, vma->vm_start,
605 (kerninfo.aper_base + offset) >> PAGE_SHIFT, 605 (kerninfo.aper_base + offset) >> PAGE_SHIFT,
606 size, vma->vm_page_prot)) { 606 size,
607 pgprot_writecombine(vma->vm_page_prot))) {
607 goto out_again; 608 goto out_again;
608 } 609 }
609 mutex_unlock(&(agp_fe.agp_mutex)); 610 mutex_unlock(&(agp_fe.agp_mutex));
@@ -618,8 +619,9 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma)
618 if (kerninfo.vm_ops) { 619 if (kerninfo.vm_ops) {
619 vma->vm_ops = kerninfo.vm_ops; 620 vma->vm_ops = kerninfo.vm_ops;
620 } else if (io_remap_pfn_range(vma, vma->vm_start, 621 } else if (io_remap_pfn_range(vma, vma->vm_start,
621 kerninfo.aper_base >> PAGE_SHIFT, 622 kerninfo.aper_base >> PAGE_SHIFT,
622 size, vma->vm_page_prot)) { 623 size,
624 pgprot_writecombine(vma->vm_page_prot))) {
623 goto out_again; 625 goto out_again;
624 } 626 }
625 mutex_unlock(&(agp_fe.agp_mutex)); 627 mutex_unlock(&(agp_fe.agp_mutex));
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 62be3ec0da4b..be42a2312dc9 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -399,8 +399,8 @@ static void agp_nvidia_remove(struct pci_dev *pdev)
399#ifdef CONFIG_PM 399#ifdef CONFIG_PM
400static int agp_nvidia_suspend(struct pci_dev *pdev, pm_message_t state) 400static int agp_nvidia_suspend(struct pci_dev *pdev, pm_message_t state)
401{ 401{
402 pci_save_state (pdev); 402 pci_save_state(pdev);
403 pci_set_power_state (pdev, 3); 403 pci_set_power_state(pdev, PCI_D3hot);
404 404
405 return 0; 405 return 0;
406} 406}
@@ -408,7 +408,7 @@ static int agp_nvidia_suspend(struct pci_dev *pdev, pm_message_t state)
408static int agp_nvidia_resume(struct pci_dev *pdev) 408static int agp_nvidia_resume(struct pci_dev *pdev)
409{ 409{
410 /* set power state 0 and restore PCI space */ 410 /* set power state 0 and restore PCI space */
411 pci_set_power_state (pdev, 0); 411 pci_set_power_state(pdev, PCI_D0);
412 pci_restore_state(pdev); 412 pci_restore_state(pdev);
413 413
414 /* reconfigure AGP hardware again */ 414 /* reconfigure AGP hardware again */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index b16c50ee769c..a7c54c843291 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -139,6 +139,7 @@ config DRM_I915
139 select BACKLIGHT_CLASS_DEVICE if ACPI 139 select BACKLIGHT_CLASS_DEVICE if ACPI
140 select VIDEO_OUTPUT_CONTROL if ACPI 140 select VIDEO_OUTPUT_CONTROL if ACPI
141 select INPUT if ACPI 141 select INPUT if ACPI
142 select THERMAL if ACPI
142 select ACPI_VIDEO if ACPI 143 select ACPI_VIDEO if ACPI
143 select ACPI_BUTTON if ACPI 144 select ACPI_BUTTON if ACPI
144 help 145 help
@@ -213,6 +214,8 @@ source "drivers/gpu/drm/mgag200/Kconfig"
213 214
214source "drivers/gpu/drm/cirrus/Kconfig" 215source "drivers/gpu/drm/cirrus/Kconfig"
215 216
217source "drivers/gpu/drm/rcar-du/Kconfig"
218
216source "drivers/gpu/drm/shmobile/Kconfig" 219source "drivers/gpu/drm/shmobile/Kconfig"
217 220
218source "drivers/gpu/drm/omapdrm/Kconfig" 221source "drivers/gpu/drm/omapdrm/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1c9f24396002..801bcafa3028 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,8 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ 12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
13 drm_crtc.o drm_modes.o drm_edid.o \ 13 drm_crtc.o drm_modes.o drm_edid.o \
14 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 14 drm_info.o drm_debugfs.o drm_encoder_slave.o \
15 drm_trace_points.o drm_global.o drm_prime.o 15 drm_trace_points.o drm_global.o drm_prime.o \
16 drm_rect.o
16 17
17drm-$(CONFIG_COMPAT) += drm_ioc32.o 18drm-$(CONFIG_COMPAT) += drm_ioc32.o
18drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o 19drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -48,6 +49,7 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/
48obj-$(CONFIG_DRM_GMA500) += gma500/ 49obj-$(CONFIG_DRM_GMA500) += gma500/
49obj-$(CONFIG_DRM_UDL) += udl/ 50obj-$(CONFIG_DRM_UDL) += udl/
50obj-$(CONFIG_DRM_AST) += ast/ 51obj-$(CONFIG_DRM_AST) += ast/
52obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
51obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ 53obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
52obj-$(CONFIG_DRM_OMAP) += omapdrm/ 54obj-$(CONFIG_DRM_OMAP) += omapdrm/
53obj-$(CONFIG_DRM_TILCDC) += tilcdc/ 55obj-$(CONFIG_DRM_TILCDC) += tilcdc/
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 02e52d543e4b..622d4ae7eb9e 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -348,8 +348,24 @@ int ast_gem_create(struct drm_device *dev,
348int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr); 348int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr);
349int ast_bo_unpin(struct ast_bo *bo); 349int ast_bo_unpin(struct ast_bo *bo);
350 350
351int ast_bo_reserve(struct ast_bo *bo, bool no_wait); 351static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
352void ast_bo_unreserve(struct ast_bo *bo); 352{
353 int ret;
354
355 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
356 if (ret) {
357 if (ret != -ERESTARTSYS && ret != -EBUSY)
358 DRM_ERROR("reserve failed %p\n", bo);
359 return ret;
360 }
361 return 0;
362}
363
364static inline void ast_bo_unreserve(struct ast_bo *bo)
365{
366 ttm_bo_unreserve(&bo->bo);
367}
368
353void ast_ttm_placement(struct ast_bo *bo, int domain); 369void ast_ttm_placement(struct ast_bo *bo, int domain);
354int ast_bo_push_sysram(struct ast_bo *bo); 370int ast_bo_push_sysram(struct ast_bo *bo);
355int ast_mmap(struct file *filp, struct vm_area_struct *vma); 371int ast_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index fbc0823cfa18..7b33e14e44aa 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -51,7 +51,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
51 struct ast_bo *bo; 51 struct ast_bo *bo;
52 int src_offset, dst_offset; 52 int src_offset, dst_offset;
53 int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8; 53 int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
54 int ret; 54 int ret = -EBUSY;
55 bool unmap = false; 55 bool unmap = false;
56 bool store_for_later = false; 56 bool store_for_later = false;
57 int x2, y2; 57 int x2, y2;
@@ -65,7 +65,8 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
65 * then the BO is being moved and we should 65 * then the BO is being moved and we should
66 * store up the damage until later. 66 * store up the damage until later.
67 */ 67 */
68 ret = ast_bo_reserve(bo, true); 68 if (!in_interrupt())
69 ret = ast_bo_reserve(bo, true);
69 if (ret) { 70 if (ret) {
70 if (ret != -EBUSY) 71 if (ret != -EBUSY)
71 return; 72 return;
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 09da3393c527..98d670825a1a 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -271,26 +271,19 @@ int ast_mm_init(struct ast_private *ast)
271 return ret; 271 return ret;
272 } 272 }
273 273
274 ast->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0), 274 ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
275 pci_resource_len(dev->pdev, 0), 275 pci_resource_len(dev->pdev, 0));
276 DRM_MTRR_WC);
277 276
278 return 0; 277 return 0;
279} 278}
280 279
281void ast_mm_fini(struct ast_private *ast) 280void ast_mm_fini(struct ast_private *ast)
282{ 281{
283 struct drm_device *dev = ast->dev;
284 ttm_bo_device_release(&ast->ttm.bdev); 282 ttm_bo_device_release(&ast->ttm.bdev);
285 283
286 ast_ttm_global_release(ast); 284 ast_ttm_global_release(ast);
287 285
288 if (ast->fb_mtrr >= 0) { 286 arch_phys_wc_del(ast->fb_mtrr);
289 drm_mtrr_del(ast->fb_mtrr,
290 pci_resource_start(dev->pdev, 0),
291 pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
292 ast->fb_mtrr = -1;
293 }
294} 287}
295 288
296void ast_ttm_placement(struct ast_bo *bo, int domain) 289void ast_ttm_placement(struct ast_bo *bo, int domain)
@@ -310,24 +303,6 @@ void ast_ttm_placement(struct ast_bo *bo, int domain)
310 bo->placement.num_busy_placement = c; 303 bo->placement.num_busy_placement = c;
311} 304}
312 305
313int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
314{
315 int ret;
316
317 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
318 if (ret) {
319 if (ret != -ERESTARTSYS && ret != -EBUSY)
320 DRM_ERROR("reserve failed %p\n", bo);
321 return ret;
322 }
323 return 0;
324}
325
326void ast_bo_unreserve(struct ast_bo *bo)
327{
328 ttm_bo_unreserve(&bo->bo);
329}
330
331int ast_bo_create(struct drm_device *dev, int size, int align, 306int ast_bo_create(struct drm_device *dev, int size, int align,
332 uint32_t flags, struct ast_bo **pastbo) 307 uint32_t flags, struct ast_bo **pastbo)
333{ 308{
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 7ca059596887..bae55609e6c3 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -240,8 +240,25 @@ void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
240int cirrus_bo_create(struct drm_device *dev, int size, int align, 240int cirrus_bo_create(struct drm_device *dev, int size, int align,
241 uint32_t flags, struct cirrus_bo **pcirrusbo); 241 uint32_t flags, struct cirrus_bo **pcirrusbo);
242int cirrus_mmap(struct file *filp, struct vm_area_struct *vma); 242int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
243int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait); 243
244void cirrus_bo_unreserve(struct cirrus_bo *bo); 244static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
245{
246 int ret;
247
248 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
249 if (ret) {
250 if (ret != -ERESTARTSYS && ret != -EBUSY)
251 DRM_ERROR("reserve failed %p\n", bo);
252 return ret;
253 }
254 return 0;
255}
256
257static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
258{
259 ttm_bo_unreserve(&bo->bo);
260}
261
245int cirrus_bo_push_sysram(struct cirrus_bo *bo); 262int cirrus_bo_push_sysram(struct cirrus_bo *bo);
246int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr); 263int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
247#endif /* __CIRRUS_DRV_H__ */ 264#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 3541b567bbd8..b27e95666fab 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -25,7 +25,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
25 struct cirrus_bo *bo; 25 struct cirrus_bo *bo;
26 int src_offset, dst_offset; 26 int src_offset, dst_offset;
27 int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8; 27 int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
28 int ret; 28 int ret = -EBUSY;
29 bool unmap = false; 29 bool unmap = false;
30 bool store_for_later = false; 30 bool store_for_later = false;
31 int x2, y2; 31 int x2, y2;
@@ -39,7 +39,8 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
39 * then the BO is being moved and we should 39 * then the BO is being moved and we should
40 * store up the damage until later. 40 * store up the damage until later.
41 */ 41 */
42 ret = cirrus_bo_reserve(bo, true); 42 if (!in_interrupt())
43 ret = cirrus_bo_reserve(bo, true);
43 if (ret) { 44 if (ret) {
44 if (ret != -EBUSY) 45 if (ret != -EBUSY)
45 return; 46 return;
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 2ed8cfc740c9..0047012045c2 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -271,9 +271,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
271 return ret; 271 return ret;
272 } 272 }
273 273
274 cirrus->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0), 274 cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
275 pci_resource_len(dev->pdev, 0), 275 pci_resource_len(dev->pdev, 0));
276 DRM_MTRR_WC);
277 276
278 cirrus->mm_inited = true; 277 cirrus->mm_inited = true;
279 return 0; 278 return 0;
@@ -281,8 +280,6 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
281 280
282void cirrus_mm_fini(struct cirrus_device *cirrus) 281void cirrus_mm_fini(struct cirrus_device *cirrus)
283{ 282{
284 struct drm_device *dev = cirrus->dev;
285
286 if (!cirrus->mm_inited) 283 if (!cirrus->mm_inited)
287 return; 284 return;
288 285
@@ -290,12 +287,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
290 287
291 cirrus_ttm_global_release(cirrus); 288 cirrus_ttm_global_release(cirrus);
292 289
293 if (cirrus->fb_mtrr >= 0) { 290 arch_phys_wc_del(cirrus->fb_mtrr);
294 drm_mtrr_del(cirrus->fb_mtrr, 291 cirrus->fb_mtrr = 0;
295 pci_resource_start(dev->pdev, 0),
296 pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
297 cirrus->fb_mtrr = -1;
298 }
299} 292}
300 293
301void cirrus_ttm_placement(struct cirrus_bo *bo, int domain) 294void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
@@ -315,24 +308,6 @@ void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
315 bo->placement.num_busy_placement = c; 308 bo->placement.num_busy_placement = c;
316} 309}
317 310
318int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
319{
320 int ret;
321
322 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
323 if (ret) {
324 if (ret != -ERESTARTSYS && ret != -EBUSY)
325 DRM_ERROR("reserve failed %p\n", bo);
326 return ret;
327 }
328 return 0;
329}
330
331void cirrus_bo_unreserve(struct cirrus_bo *bo)
332{
333 ttm_bo_unreserve(&bo->bo);
334}
335
336int cirrus_bo_create(struct drm_device *dev, int size, int align, 311int cirrus_bo_create(struct drm_device *dev, int size, int align,
337 uint32_t flags, struct cirrus_bo **pcirrusbo) 312 uint32_t flags, struct cirrus_bo **pcirrusbo)
338{ 313{
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 0128147265f3..5a4dbb410b71 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -210,12 +210,16 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
210 if (drm_core_has_MTRR(dev)) { 210 if (drm_core_has_MTRR(dev)) {
211 if (map->type == _DRM_FRAME_BUFFER || 211 if (map->type == _DRM_FRAME_BUFFER ||
212 (map->flags & _DRM_WRITE_COMBINING)) { 212 (map->flags & _DRM_WRITE_COMBINING)) {
213 map->mtrr = mtrr_add(map->offset, map->size, 213 map->mtrr =
214 MTRR_TYPE_WRCOMB, 1); 214 arch_phys_wc_add(map->offset, map->size);
215 } 215 }
216 } 216 }
217 if (map->type == _DRM_REGISTERS) { 217 if (map->type == _DRM_REGISTERS) {
218 map->handle = ioremap(map->offset, map->size); 218 if (map->flags & _DRM_WRITE_COMBINING)
219 map->handle = ioremap_wc(map->offset,
220 map->size);
221 else
222 map->handle = ioremap(map->offset, map->size);
219 if (!map->handle) { 223 if (!map->handle) {
220 kfree(map); 224 kfree(map);
221 return -ENOMEM; 225 return -ENOMEM;
@@ -410,6 +414,15 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
410 414
411 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ 415 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
412 map->handle = (void *)(unsigned long)maplist->user_token; 416 map->handle = (void *)(unsigned long)maplist->user_token;
417
418 /*
419 * It appears that there are no users of this value whatsoever --
420 * drmAddMap just discards it. Let's not encourage its use.
421 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
422 * it's not a real mtrr index anymore.)
423 */
424 map->mtrr = -1;
425
413 return 0; 426 return 0;
414} 427}
415 428
@@ -451,11 +464,8 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
451 iounmap(map->handle); 464 iounmap(map->handle);
452 /* FALLTHROUGH */ 465 /* FALLTHROUGH */
453 case _DRM_FRAME_BUFFER: 466 case _DRM_FRAME_BUFFER:
454 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { 467 if (drm_core_has_MTRR(dev))
455 int retcode; 468 arch_phys_wc_del(map->mtrr);
456 retcode = mtrr_del(map->mtrr, map->offset, map->size);
457 DRM_DEBUG("mtrr_del=%d\n", retcode);
458 }
459 break; 469 break;
460 case _DRM_SHM: 470 case _DRM_SHM:
461 vfree(map->handle); 471 vfree(map->handle);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index e7e92429d10f..fc83bb9eb514 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -29,6 +29,7 @@
29 * Dave Airlie <airlied@linux.ie> 29 * Dave Airlie <airlied@linux.ie>
30 * Jesse Barnes <jesse.barnes@intel.com> 30 * Jesse Barnes <jesse.barnes@intel.com>
31 */ 31 */
32#include <linux/ctype.h>
32#include <linux/list.h> 33#include <linux/list.h>
33#include <linux/slab.h> 34#include <linux/slab.h>
34#include <linux/export.h> 35#include <linux/export.h>
@@ -91,7 +92,7 @@ EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
91 92
92/* Avoid boilerplate. I'm tired of typing. */ 93/* Avoid boilerplate. I'm tired of typing. */
93#define DRM_ENUM_NAME_FN(fnname, list) \ 94#define DRM_ENUM_NAME_FN(fnname, list) \
94 char *fnname(int val) \ 95 const char *fnname(int val) \
95 { \ 96 { \
96 int i; \ 97 int i; \
97 for (i = 0; i < ARRAY_SIZE(list); i++) { \ 98 for (i = 0; i < ARRAY_SIZE(list); i++) { \
@@ -104,7 +105,7 @@ EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
104/* 105/*
105 * Global properties 106 * Global properties
106 */ 107 */
107static struct drm_prop_enum_list drm_dpms_enum_list[] = 108static const struct drm_prop_enum_list drm_dpms_enum_list[] =
108{ { DRM_MODE_DPMS_ON, "On" }, 109{ { DRM_MODE_DPMS_ON, "On" },
109 { DRM_MODE_DPMS_STANDBY, "Standby" }, 110 { DRM_MODE_DPMS_STANDBY, "Standby" },
110 { DRM_MODE_DPMS_SUSPEND, "Suspend" }, 111 { DRM_MODE_DPMS_SUSPEND, "Suspend" },
@@ -116,7 +117,7 @@ DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
116/* 117/*
117 * Optional properties 118 * Optional properties
118 */ 119 */
119static struct drm_prop_enum_list drm_scaling_mode_enum_list[] = 120static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
120{ 121{
121 { DRM_MODE_SCALE_NONE, "None" }, 122 { DRM_MODE_SCALE_NONE, "None" },
122 { DRM_MODE_SCALE_FULLSCREEN, "Full" }, 123 { DRM_MODE_SCALE_FULLSCREEN, "Full" },
@@ -124,7 +125,7 @@ static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
124 { DRM_MODE_SCALE_ASPECT, "Full aspect" }, 125 { DRM_MODE_SCALE_ASPECT, "Full aspect" },
125}; 126};
126 127
127static struct drm_prop_enum_list drm_dithering_mode_enum_list[] = 128static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
128{ 129{
129 { DRM_MODE_DITHERING_OFF, "Off" }, 130 { DRM_MODE_DITHERING_OFF, "Off" },
130 { DRM_MODE_DITHERING_ON, "On" }, 131 { DRM_MODE_DITHERING_ON, "On" },
@@ -134,7 +135,7 @@ static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
134/* 135/*
135 * Non-global properties, but "required" for certain connectors. 136 * Non-global properties, but "required" for certain connectors.
136 */ 137 */
137static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = 138static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
138{ 139{
139 { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ 140 { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
140 { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ 141 { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
@@ -143,7 +144,7 @@ static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
143 144
144DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list) 145DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
145 146
146static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = 147static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
147{ 148{
148 { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ 149 { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
149 { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ 150 { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
@@ -153,7 +154,7 @@ static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
153DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name, 154DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
154 drm_dvi_i_subconnector_enum_list) 155 drm_dvi_i_subconnector_enum_list)
155 156
156static struct drm_prop_enum_list drm_tv_select_enum_list[] = 157static const struct drm_prop_enum_list drm_tv_select_enum_list[] =
157{ 158{
158 { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ 159 { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
159 { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ 160 { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
@@ -164,7 +165,7 @@ static struct drm_prop_enum_list drm_tv_select_enum_list[] =
164 165
165DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) 166DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
166 167
167static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = 168static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
168{ 169{
169 { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ 170 { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
170 { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ 171 { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
@@ -176,7 +177,7 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
176DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, 177DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
177 drm_tv_subconnector_enum_list) 178 drm_tv_subconnector_enum_list)
178 179
179static struct drm_prop_enum_list drm_dirty_info_enum_list[] = { 180static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
180 { DRM_MODE_DIRTY_OFF, "Off" }, 181 { DRM_MODE_DIRTY_OFF, "Off" },
181 { DRM_MODE_DIRTY_ON, "On" }, 182 { DRM_MODE_DIRTY_ON, "On" },
182 { DRM_MODE_DIRTY_ANNOTATE, "Annotate" }, 183 { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
@@ -184,7 +185,7 @@ static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
184 185
185struct drm_conn_prop_enum_list { 186struct drm_conn_prop_enum_list {
186 int type; 187 int type;
187 char *name; 188 const char *name;
188 int count; 189 int count;
189}; 190};
190 191
@@ -210,7 +211,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
210 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0}, 211 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
211}; 212};
212 213
213static struct drm_prop_enum_list drm_encoder_enum_list[] = 214static const struct drm_prop_enum_list drm_encoder_enum_list[] =
214{ { DRM_MODE_ENCODER_NONE, "None" }, 215{ { DRM_MODE_ENCODER_NONE, "None" },
215 { DRM_MODE_ENCODER_DAC, "DAC" }, 216 { DRM_MODE_ENCODER_DAC, "DAC" },
216 { DRM_MODE_ENCODER_TMDS, "TMDS" }, 217 { DRM_MODE_ENCODER_TMDS, "TMDS" },
@@ -219,7 +220,7 @@ static struct drm_prop_enum_list drm_encoder_enum_list[] =
219 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, 220 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
220}; 221};
221 222
222char *drm_get_encoder_name(struct drm_encoder *encoder) 223const char *drm_get_encoder_name(const struct drm_encoder *encoder)
223{ 224{
224 static char buf[32]; 225 static char buf[32];
225 226
@@ -230,7 +231,7 @@ char *drm_get_encoder_name(struct drm_encoder *encoder)
230} 231}
231EXPORT_SYMBOL(drm_get_encoder_name); 232EXPORT_SYMBOL(drm_get_encoder_name);
232 233
233char *drm_get_connector_name(struct drm_connector *connector) 234const char *drm_get_connector_name(const struct drm_connector *connector)
234{ 235{
235 static char buf[32]; 236 static char buf[32];
236 237
@@ -241,7 +242,7 @@ char *drm_get_connector_name(struct drm_connector *connector)
241} 242}
242EXPORT_SYMBOL(drm_get_connector_name); 243EXPORT_SYMBOL(drm_get_connector_name);
243 244
244char *drm_get_connector_status_name(enum drm_connector_status status) 245const char *drm_get_connector_status_name(enum drm_connector_status status)
245{ 246{
246 if (status == connector_status_connected) 247 if (status == connector_status_connected)
247 return "connected"; 248 return "connected";
@@ -252,6 +253,28 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
252} 253}
253EXPORT_SYMBOL(drm_get_connector_status_name); 254EXPORT_SYMBOL(drm_get_connector_status_name);
254 255
256static char printable_char(int c)
257{
258 return isascii(c) && isprint(c) ? c : '?';
259}
260
261const char *drm_get_format_name(uint32_t format)
262{
263 static char buf[32];
264
265 snprintf(buf, sizeof(buf),
266 "%c%c%c%c %s-endian (0x%08x)",
267 printable_char(format & 0xff),
268 printable_char((format >> 8) & 0xff),
269 printable_char((format >> 16) & 0xff),
270 printable_char((format >> 24) & 0x7f),
271 format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
272 format);
273
274 return buf;
275}
276EXPORT_SYMBOL(drm_get_format_name);
277
255/** 278/**
256 * drm_mode_object_get - allocate a new modeset identifier 279 * drm_mode_object_get - allocate a new modeset identifier
257 * @dev: DRM device 280 * @dev: DRM device
@@ -569,16 +592,8 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
569 } 592 }
570 593
571 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 594 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
572 if (plane->fb == fb) { 595 if (plane->fb == fb)
573 /* should turn off the crtc */ 596 drm_plane_force_disable(plane);
574 ret = plane->funcs->disable_plane(plane);
575 if (ret)
576 DRM_ERROR("failed to disable plane with busy fb\n");
577 /* disconnect the plane from the fb and crtc: */
578 __drm_framebuffer_unreference(plane->fb);
579 plane->fb = NULL;
580 plane->crtc = NULL;
581 }
582 } 597 }
583 drm_modeset_unlock_all(dev); 598 drm_modeset_unlock_all(dev);
584 } 599 }
@@ -593,7 +608,7 @@ EXPORT_SYMBOL(drm_framebuffer_remove);
593 * @crtc: CRTC object to init 608 * @crtc: CRTC object to init
594 * @funcs: callbacks for the new CRTC 609 * @funcs: callbacks for the new CRTC
595 * 610 *
596 * Inits a new object created as base part of an driver crtc object. 611 * Inits a new object created as base part of a driver crtc object.
597 * 612 *
598 * RETURNS: 613 * RETURNS:
599 * Zero on success, error code on failure. 614 * Zero on success, error code on failure.
@@ -628,11 +643,12 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
628EXPORT_SYMBOL(drm_crtc_init); 643EXPORT_SYMBOL(drm_crtc_init);
629 644
630/** 645/**
631 * drm_crtc_cleanup - Cleans up the core crtc usage. 646 * drm_crtc_cleanup - Clean up the core crtc usage
632 * @crtc: CRTC to cleanup 647 * @crtc: CRTC to cleanup
633 * 648 *
634 * Cleanup @crtc. Removes from drm modesetting space 649 * This function cleans up @crtc and removes it from the DRM mode setting
635 * does NOT free object, caller does that. 650 * core. Note that the function does *not* free the crtc structure itself,
651 * this is the responsibility of the caller.
636 */ 652 */
637void drm_crtc_cleanup(struct drm_crtc *crtc) 653void drm_crtc_cleanup(struct drm_crtc *crtc)
638{ 654{
@@ -657,7 +673,7 @@ EXPORT_SYMBOL(drm_crtc_cleanup);
657void drm_mode_probed_add(struct drm_connector *connector, 673void drm_mode_probed_add(struct drm_connector *connector,
658 struct drm_display_mode *mode) 674 struct drm_display_mode *mode)
659{ 675{
660 list_add(&mode->head, &connector->probed_modes); 676 list_add_tail(&mode->head, &connector->probed_modes);
661} 677}
662EXPORT_SYMBOL(drm_mode_probed_add); 678EXPORT_SYMBOL(drm_mode_probed_add);
663 679
@@ -803,6 +819,21 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
803} 819}
804EXPORT_SYMBOL(drm_encoder_cleanup); 820EXPORT_SYMBOL(drm_encoder_cleanup);
805 821
822/**
823 * drm_plane_init - Initialise a new plane object
824 * @dev: DRM device
825 * @plane: plane object to init
826 * @possible_crtcs: bitmask of possible CRTCs
827 * @funcs: callbacks for the new plane
828 * @formats: array of supported formats (%DRM_FORMAT_*)
829 * @format_count: number of elements in @formats
830 * @priv: plane is private (hidden from userspace)?
831 *
832 * Inits a new object created as base part of a driver plane object.
833 *
834 * RETURNS:
835 * Zero on success, error code on failure.
836 */
806int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, 837int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
807 unsigned long possible_crtcs, 838 unsigned long possible_crtcs,
808 const struct drm_plane_funcs *funcs, 839 const struct drm_plane_funcs *funcs,
@@ -851,6 +882,14 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
851} 882}
852EXPORT_SYMBOL(drm_plane_init); 883EXPORT_SYMBOL(drm_plane_init);
853 884
885/**
886 * drm_plane_cleanup - Clean up the core plane usage
887 * @plane: plane to cleanup
888 *
889 * This function cleans up @plane and removes it from the DRM mode setting
890 * core. Note that the function does *not* free the plane structure itself,
891 * this is the responsibility of the caller.
892 */
854void drm_plane_cleanup(struct drm_plane *plane) 893void drm_plane_cleanup(struct drm_plane *plane)
855{ 894{
856 struct drm_device *dev = plane->dev; 895 struct drm_device *dev = plane->dev;
@@ -868,6 +907,32 @@ void drm_plane_cleanup(struct drm_plane *plane)
868EXPORT_SYMBOL(drm_plane_cleanup); 907EXPORT_SYMBOL(drm_plane_cleanup);
869 908
870/** 909/**
910 * drm_plane_force_disable - Forcibly disable a plane
911 * @plane: plane to disable
912 *
913 * Forces the plane to be disabled.
914 *
915 * Used when the plane's current framebuffer is destroyed,
916 * and when restoring fbdev mode.
917 */
918void drm_plane_force_disable(struct drm_plane *plane)
919{
920 int ret;
921
922 if (!plane->fb)
923 return;
924
925 ret = plane->funcs->disable_plane(plane);
926 if (ret)
927 DRM_ERROR("failed to disable plane with busy fb\n");
928 /* disconnect the plane from the fb and crtc: */
929 __drm_framebuffer_unreference(plane->fb);
930 plane->fb = NULL;
931 plane->crtc = NULL;
932}
933EXPORT_SYMBOL(drm_plane_force_disable);
934
935/**
871 * drm_mode_create - create a new display mode 936 * drm_mode_create - create a new display mode
872 * @dev: DRM device 937 * @dev: DRM device
873 * 938 *
@@ -1740,7 +1805,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
1740 1805
1741 plane_resp->plane_id = plane->base.id; 1806 plane_resp->plane_id = plane->base.id;
1742 plane_resp->possible_crtcs = plane->possible_crtcs; 1807 plane_resp->possible_crtcs = plane->possible_crtcs;
1743 plane_resp->gamma_size = plane->gamma_size; 1808 plane_resp->gamma_size = 0;
1744 1809
1745 /* 1810 /*
1746 * This ioctl is called twice, once to determine how much space is 1811 * This ioctl is called twice, once to determine how much space is
@@ -1834,7 +1899,8 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
1834 if (fb->pixel_format == plane->format_types[i]) 1899 if (fb->pixel_format == plane->format_types[i])
1835 break; 1900 break;
1836 if (i == plane->format_count) { 1901 if (i == plane->format_count) {
1837 DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format); 1902 DRM_DEBUG_KMS("Invalid pixel format %s\n",
1903 drm_get_format_name(fb->pixel_format));
1838 ret = -EINVAL; 1904 ret = -EINVAL;
1839 goto out; 1905 goto out;
1840 } 1906 }
@@ -1906,18 +1972,31 @@ out:
1906int drm_mode_set_config_internal(struct drm_mode_set *set) 1972int drm_mode_set_config_internal(struct drm_mode_set *set)
1907{ 1973{
1908 struct drm_crtc *crtc = set->crtc; 1974 struct drm_crtc *crtc = set->crtc;
1909 struct drm_framebuffer *fb, *old_fb; 1975 struct drm_framebuffer *fb;
1976 struct drm_crtc *tmp;
1910 int ret; 1977 int ret;
1911 1978
1912 old_fb = crtc->fb; 1979 /*
1980 * NOTE: ->set_config can also disable other crtcs (if we steal all
1981 * connectors from it), hence we need to refcount the fbs across all
1982 * crtcs. Atomic modeset will have saner semantics ...
1983 */
1984 list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
1985 tmp->old_fb = tmp->fb;
1986
1913 fb = set->fb; 1987 fb = set->fb;
1914 1988
1915 ret = crtc->funcs->set_config(set); 1989 ret = crtc->funcs->set_config(set);
1916 if (ret == 0) { 1990 if (ret == 0) {
1917 if (old_fb) 1991 /* crtc->fb must be updated by ->set_config, enforces this. */
1918 drm_framebuffer_unreference(old_fb); 1992 WARN_ON(fb != crtc->fb);
1919 if (fb) 1993 }
1920 drm_framebuffer_reference(fb); 1994
1995 list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
1996 if (tmp->fb)
1997 drm_framebuffer_reference(tmp->fb);
1998 if (tmp->old_fb)
1999 drm_framebuffer_unreference(tmp->old_fb);
1921 } 2000 }
1922 2001
1923 return ret; 2002 return ret;
@@ -2099,10 +2178,10 @@ out:
2099 return ret; 2178 return ret;
2100} 2179}
2101 2180
2102int drm_mode_cursor_ioctl(struct drm_device *dev, 2181static int drm_mode_cursor_common(struct drm_device *dev,
2103 void *data, struct drm_file *file_priv) 2182 struct drm_mode_cursor2 *req,
2183 struct drm_file *file_priv)
2104{ 2184{
2105 struct drm_mode_cursor *req = data;
2106 struct drm_mode_object *obj; 2185 struct drm_mode_object *obj;
2107 struct drm_crtc *crtc; 2186 struct drm_crtc *crtc;
2108 int ret = 0; 2187 int ret = 0;
@@ -2122,13 +2201,17 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
2122 2201
2123 mutex_lock(&crtc->mutex); 2202 mutex_lock(&crtc->mutex);
2124 if (req->flags & DRM_MODE_CURSOR_BO) { 2203 if (req->flags & DRM_MODE_CURSOR_BO) {
2125 if (!crtc->funcs->cursor_set) { 2204 if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
2126 ret = -ENXIO; 2205 ret = -ENXIO;
2127 goto out; 2206 goto out;
2128 } 2207 }
2129 /* Turns off the cursor if handle is 0 */ 2208 /* Turns off the cursor if handle is 0 */
2130 ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle, 2209 if (crtc->funcs->cursor_set2)
2131 req->width, req->height); 2210 ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
2211 req->width, req->height, req->hot_x, req->hot_y);
2212 else
2213 ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
2214 req->width, req->height);
2132 } 2215 }
2133 2216
2134 if (req->flags & DRM_MODE_CURSOR_MOVE) { 2217 if (req->flags & DRM_MODE_CURSOR_MOVE) {
@@ -2143,6 +2226,25 @@ out:
2143 mutex_unlock(&crtc->mutex); 2226 mutex_unlock(&crtc->mutex);
2144 2227
2145 return ret; 2228 return ret;
2229
2230}
2231int drm_mode_cursor_ioctl(struct drm_device *dev,
2232 void *data, struct drm_file *file_priv)
2233{
2234 struct drm_mode_cursor *req = data;
2235 struct drm_mode_cursor2 new_req;
2236
2237 memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
2238 new_req.hot_x = new_req.hot_y = 0;
2239
2240 return drm_mode_cursor_common(dev, &new_req, file_priv);
2241}
2242
2243int drm_mode_cursor2_ioctl(struct drm_device *dev,
2244 void *data, struct drm_file *file_priv)
2245{
2246 struct drm_mode_cursor2 *req = data;
2247 return drm_mode_cursor_common(dev, req, file_priv);
2146} 2248}
2147 2249
2148/* Original addfb only supported RGB formats, so figure out which one */ 2250/* Original addfb only supported RGB formats, so figure out which one */
@@ -2312,7 +2414,8 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
2312 2414
2313 ret = format_check(r); 2415 ret = format_check(r);
2314 if (ret) { 2416 if (ret) {
2315 DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format); 2417 DRM_DEBUG_KMS("bad framebuffer format %s\n",
2418 drm_get_format_name(r->pixel_format));
2316 return ret; 2419 return ret;
2317 } 2420 }
2318 2421
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index ed1334e27c33..738a4294d820 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -189,13 +189,14 @@ prune:
189 if (list_empty(&connector->modes)) 189 if (list_empty(&connector->modes))
190 return 0; 190 return 0;
191 191
192 list_for_each_entry(mode, &connector->modes, head)
193 mode->vrefresh = drm_mode_vrefresh(mode);
194
192 drm_mode_sort(&connector->modes); 195 drm_mode_sort(&connector->modes);
193 196
194 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id, 197 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
195 drm_get_connector_name(connector)); 198 drm_get_connector_name(connector));
196 list_for_each_entry(mode, &connector->modes, head) { 199 list_for_each_entry(mode, &connector->modes, head) {
197 mode->vrefresh = drm_mode_vrefresh(mode);
198
199 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 200 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
200 drm_mode_debug_printmodeline(mode); 201 drm_mode_debug_printmodeline(mode);
201 } 202 }
@@ -564,14 +565,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
564 565
565 DRM_DEBUG_KMS("\n"); 566 DRM_DEBUG_KMS("\n");
566 567
567 if (!set) 568 BUG_ON(!set);
568 return -EINVAL; 569 BUG_ON(!set->crtc);
569 570 BUG_ON(!set->crtc->helper_private);
570 if (!set->crtc)
571 return -EINVAL;
572 571
573 if (!set->crtc->helper_private) 572 /* Enforce sane interface api - has been abused by the fb helper. */
574 return -EINVAL; 573 BUG_ON(!set->mode && set->fb);
574 BUG_ON(set->fb && set->num_connectors == 0);
575 575
576 crtc_funcs = set->crtc->helper_private; 576 crtc_funcs = set->crtc->helper_private;
577 577
@@ -645,11 +645,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
645 mode_changed = true; 645 mode_changed = true;
646 } else if (set->fb == NULL) { 646 } else if (set->fb == NULL) {
647 mode_changed = true; 647 mode_changed = true;
648 } else if (set->fb->depth != set->crtc->fb->depth) {
649 mode_changed = true;
650 } else if (set->fb->bits_per_pixel !=
651 set->crtc->fb->bits_per_pixel) {
652 mode_changed = true;
653 } else if (set->fb->pixel_format != 648 } else if (set->fb->pixel_format !=
654 set->crtc->fb->pixel_format) { 649 set->crtc->fb->pixel_format) {
655 mode_changed = true; 650 mode_changed = true;
@@ -759,12 +754,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
759 ret = -EINVAL; 754 ret = -EINVAL;
760 goto fail; 755 goto fail;
761 } 756 }
762 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
763 for (i = 0; i < set->num_connectors; i++) {
764 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
765 drm_get_connector_name(set->connectors[i]));
766 set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
767 }
768 } 757 }
769 drm_helper_disable_unused_functions(dev); 758 drm_helper_disable_unused_functions(dev);
770 } else if (fb_changed) { 759 } else if (fb_changed) {
@@ -782,6 +771,22 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
782 } 771 }
783 } 772 }
784 773
774 /*
775 * crtc set_config helpers implicit set the crtc and all connected
776 * encoders to DPMS on for a full mode set. But for just an fb update it
777 * doesn't do that. To not confuse userspace, do an explicit DPMS_ON
778 * unconditionally. This will also ensure driver internal dpms state is
779 * consistent again.
780 */
781 if (set->crtc->enabled) {
782 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
783 for (i = 0; i < set->num_connectors; i++) {
784 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
785 drm_get_connector_name(set->connectors[i]));
786 set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
787 }
788 }
789
785 kfree(save_connectors); 790 kfree(save_connectors);
786 kfree(save_encoders); 791 kfree(save_encoders);
787 kfree(save_crtcs); 792 kfree(save_crtcs);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 9cc247f55502..99fcd7c32ea2 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -166,6 +166,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
166 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 166 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
167 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 167 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
168 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 168 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
169 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
169}; 170};
170 171
171#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 172#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 9e62bbedb5ad..95d6f4b6967c 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -968,6 +968,9 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
968 u8 csum = 0; 968 u8 csum = 0;
969 struct edid *edid = (struct edid *)raw_edid; 969 struct edid *edid = (struct edid *)raw_edid;
970 970
971 if (WARN_ON(!raw_edid))
972 return false;
973
971 if (edid_fixup > 8 || edid_fixup < 0) 974 if (edid_fixup > 8 || edid_fixup < 0)
972 edid_fixup = 6; 975 edid_fixup = 6;
973 976
@@ -1010,15 +1013,15 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
1010 break; 1013 break;
1011 } 1014 }
1012 1015
1013 return 1; 1016 return true;
1014 1017
1015bad: 1018bad:
1016 if (raw_edid && print_bad_edid) { 1019 if (print_bad_edid) {
1017 printk(KERN_ERR "Raw EDID:\n"); 1020 printk(KERN_ERR "Raw EDID:\n");
1018 print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1, 1021 print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
1019 raw_edid, EDID_LENGTH, false); 1022 raw_edid, EDID_LENGTH, false);
1020 } 1023 }
1021 return 0; 1024 return false;
1022} 1025}
1023EXPORT_SYMBOL(drm_edid_block_valid); 1026EXPORT_SYMBOL(drm_edid_block_valid);
1024 1027
@@ -1706,11 +1709,11 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
1706 return NULL; 1709 return NULL;
1707 1710
1708 if (pt->misc & DRM_EDID_PT_STEREO) { 1711 if (pt->misc & DRM_EDID_PT_STEREO) {
1709 printk(KERN_WARNING "stereo mode not supported\n"); 1712 DRM_DEBUG_KMS("stereo mode not supported\n");
1710 return NULL; 1713 return NULL;
1711 } 1714 }
1712 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { 1715 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
1713 printk(KERN_WARNING "composite sync not supported\n"); 1716 DRM_DEBUG_KMS("composite sync not supported\n");
1714 } 1717 }
1715 1718
1716 /* it is incorrect if hsync/vsync width is zero */ 1719 /* it is incorrect if hsync/vsync width is zero */
@@ -2321,6 +2324,31 @@ u8 *drm_find_cea_extension(struct edid *edid)
2321} 2324}
2322EXPORT_SYMBOL(drm_find_cea_extension); 2325EXPORT_SYMBOL(drm_find_cea_extension);
2323 2326
2327/*
2328 * Calculate the alternate clock for the CEA mode
2329 * (60Hz vs. 59.94Hz etc.)
2330 */
2331static unsigned int
2332cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
2333{
2334 unsigned int clock = cea_mode->clock;
2335
2336 if (cea_mode->vrefresh % 6 != 0)
2337 return clock;
2338
2339 /*
2340 * edid_cea_modes contains the 59.94Hz
2341 * variant for 240 and 480 line modes,
2342 * and the 60Hz variant otherwise.
2343 */
2344 if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480)
2345 clock = clock * 1001 / 1000;
2346 else
2347 clock = DIV_ROUND_UP(clock * 1000, 1001);
2348
2349 return clock;
2350}
2351
2324/** 2352/**
2325 * drm_match_cea_mode - look for a CEA mode matching given mode 2353 * drm_match_cea_mode - look for a CEA mode matching given mode
2326 * @to_match: display mode 2354 * @to_match: display mode
@@ -2339,21 +2367,9 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2339 const struct drm_display_mode *cea_mode = &edid_cea_modes[mode]; 2367 const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
2340 unsigned int clock1, clock2; 2368 unsigned int clock1, clock2;
2341 2369
2342 clock1 = clock2 = cea_mode->clock;
2343
2344 /* Check both 60Hz and 59.94Hz */ 2370 /* Check both 60Hz and 59.94Hz */
2345 if (cea_mode->vrefresh % 6 == 0) { 2371 clock1 = cea_mode->clock;
2346 /* 2372 clock2 = cea_mode_alternate_clock(cea_mode);
2347 * edid_cea_modes contains the 59.94Hz
2348 * variant for 240 and 480 line modes,
2349 * and the 60Hz variant otherwise.
2350 */
2351 if (cea_mode->vdisplay == 240 ||
2352 cea_mode->vdisplay == 480)
2353 clock1 = clock1 * 1001 / 1000;
2354 else
2355 clock2 = DIV_ROUND_UP(clock2 * 1000, 1001);
2356 }
2357 2373
2358 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || 2374 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2359 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && 2375 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
@@ -2364,6 +2380,66 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2364} 2380}
2365EXPORT_SYMBOL(drm_match_cea_mode); 2381EXPORT_SYMBOL(drm_match_cea_mode);
2366 2382
2383static int
2384add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2385{
2386 struct drm_device *dev = connector->dev;
2387 struct drm_display_mode *mode, *tmp;
2388 LIST_HEAD(list);
2389 int modes = 0;
2390
2391 /* Don't add CEA modes if the CEA extension block is missing */
2392 if (!drm_find_cea_extension(edid))
2393 return 0;
2394
2395 /*
2396 * Go through all probed modes and create a new mode
2397 * with the alternate clock for certain CEA modes.
2398 */
2399 list_for_each_entry(mode, &connector->probed_modes, head) {
2400 const struct drm_display_mode *cea_mode;
2401 struct drm_display_mode *newmode;
2402 u8 cea_mode_idx = drm_match_cea_mode(mode) - 1;
2403 unsigned int clock1, clock2;
2404
2405 if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes))
2406 continue;
2407
2408 cea_mode = &edid_cea_modes[cea_mode_idx];
2409
2410 clock1 = cea_mode->clock;
2411 clock2 = cea_mode_alternate_clock(cea_mode);
2412
2413 if (clock1 == clock2)
2414 continue;
2415
2416 if (mode->clock != clock1 && mode->clock != clock2)
2417 continue;
2418
2419 newmode = drm_mode_duplicate(dev, cea_mode);
2420 if (!newmode)
2421 continue;
2422
2423 /*
2424 * The current mode could be either variant. Make
2425 * sure to pick the "other" clock for the new mode.
2426 */
2427 if (mode->clock != clock1)
2428 newmode->clock = clock1;
2429 else
2430 newmode->clock = clock2;
2431
2432 list_add_tail(&newmode->head, &list);
2433 }
2434
2435 list_for_each_entry_safe(mode, tmp, &list, head) {
2436 list_del(&mode->head);
2437 drm_mode_probed_add(connector, mode);
2438 modes++;
2439 }
2440
2441 return modes;
2442}
2367 2443
2368static int 2444static int
2369do_cea_modes (struct drm_connector *connector, u8 *db, u8 len) 2445do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
@@ -2946,6 +3022,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
2946 if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) 3022 if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
2947 num_modes += add_inferred_modes(connector, edid); 3023 num_modes += add_inferred_modes(connector, edid);
2948 num_modes += add_cea_modes(connector, edid); 3024 num_modes += add_cea_modes(connector, edid);
3025 num_modes += add_alternate_cea_modes(connector, edid);
2949 3026
2950 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) 3027 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
2951 edid_fixup_preferred(connector, quirks); 3028 edid_fixup_preferred(connector, quirks);
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index fa445dd4dc00..a4f5ce14dc1c 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -186,12 +186,11 @@ static u8 *edid_load(struct drm_connector *connector, char *name,
186 goto relfw_out; 186 goto relfw_out;
187 } 187 }
188 188
189 edid = kmalloc(fwsize, GFP_KERNEL); 189 edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
190 if (edid == NULL) { 190 if (edid == NULL) {
191 err = -ENOMEM; 191 err = -ENOMEM;
192 goto relfw_out; 192 goto relfw_out;
193 } 193 }
194 memcpy(edid, fwdata, fwsize);
195 194
196 if (!drm_edid_block_valid(edid, 0, print_bad_edid)) { 195 if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
197 connector->bad_edid_counter++; 196 connector->bad_edid_counter++;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index b78cbe74dadf..3d13ca6e257f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -168,6 +168,9 @@ static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_h
168 uint16_t *r_base, *g_base, *b_base; 168 uint16_t *r_base, *g_base, *b_base;
169 int i; 169 int i;
170 170
171 if (helper->funcs->gamma_get == NULL)
172 return;
173
171 r_base = crtc->gamma_store; 174 r_base = crtc->gamma_store;
172 g_base = r_base + crtc->gamma_size; 175 g_base = r_base + crtc->gamma_size;
173 b_base = g_base + crtc->gamma_size; 176 b_base = g_base + crtc->gamma_size;
@@ -284,13 +287,27 @@ EXPORT_SYMBOL(drm_fb_helper_debug_leave);
284 */ 287 */
285bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) 288bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
286{ 289{
290 struct drm_device *dev = fb_helper->dev;
291 struct drm_plane *plane;
287 bool error = false; 292 bool error = false;
288 int i, ret; 293 int i;
294
295 drm_warn_on_modeset_not_all_locked(dev);
289 296
290 drm_warn_on_modeset_not_all_locked(fb_helper->dev); 297 list_for_each_entry(plane, &dev->mode_config.plane_list, head)
298 drm_plane_force_disable(plane);
291 299
292 for (i = 0; i < fb_helper->crtc_count; i++) { 300 for (i = 0; i < fb_helper->crtc_count; i++) {
293 struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; 301 struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
302 struct drm_crtc *crtc = mode_set->crtc;
303 int ret;
304
305 if (crtc->funcs->cursor_set) {
306 ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
307 if (ret)
308 error = true;
309 }
310
294 ret = drm_mode_set_config_internal(mode_set); 311 ret = drm_mode_set_config_internal(mode_set);
295 if (ret) 312 if (ret)
296 error = true; 313 error = true;
@@ -583,6 +600,14 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
583 return 0; 600 return 0;
584 } 601 }
585 602
603 /*
604 * The driver really shouldn't advertise pseudo/directcolor
605 * visuals if it can't deal with the palette.
606 */
607 if (WARN_ON(!fb_helper->funcs->gamma_set ||
608 !fb_helper->funcs->gamma_get))
609 return -EINVAL;
610
586 pindex = regno; 611 pindex = regno;
587 612
588 if (fb->bits_per_pixel == 16) { 613 if (fb->bits_per_pixel == 16) {
@@ -626,12 +651,19 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
626int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) 651int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
627{ 652{
628 struct drm_fb_helper *fb_helper = info->par; 653 struct drm_fb_helper *fb_helper = info->par;
654 struct drm_device *dev = fb_helper->dev;
629 struct drm_crtc_helper_funcs *crtc_funcs; 655 struct drm_crtc_helper_funcs *crtc_funcs;
630 u16 *red, *green, *blue, *transp; 656 u16 *red, *green, *blue, *transp;
631 struct drm_crtc *crtc; 657 struct drm_crtc *crtc;
632 int i, j, rc = 0; 658 int i, j, rc = 0;
633 int start; 659 int start;
634 660
661 drm_modeset_lock_all(dev);
662 if (!drm_fb_helper_is_bound(fb_helper)) {
663 drm_modeset_unlock_all(dev);
664 return -EBUSY;
665 }
666
635 for (i = 0; i < fb_helper->crtc_count; i++) { 667 for (i = 0; i < fb_helper->crtc_count; i++) {
636 crtc = fb_helper->crtc_info[i].mode_set.crtc; 668 crtc = fb_helper->crtc_info[i].mode_set.crtc;
637 crtc_funcs = crtc->helper_private; 669 crtc_funcs = crtc->helper_private;
@@ -654,10 +686,13 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
654 686
655 rc = setcolreg(crtc, hred, hgreen, hblue, start++, info); 687 rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
656 if (rc) 688 if (rc)
657 return rc; 689 goto out;
658 } 690 }
659 crtc_funcs->load_lut(crtc); 691 if (crtc_funcs->load_lut)
692 crtc_funcs->load_lut(crtc);
660 } 693 }
694 out:
695 drm_modeset_unlock_all(dev);
661 return rc; 696 return rc;
662} 697}
663EXPORT_SYMBOL(drm_fb_helper_setcmap); 698EXPORT_SYMBOL(drm_fb_helper_setcmap);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 429e07d0b0f1..3a24385e0368 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -271,6 +271,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
271 priv->uid = current_euid(); 271 priv->uid = current_euid();
272 priv->pid = get_pid(task_pid(current)); 272 priv->pid = get_pid(task_pid(current));
273 priv->minor = idr_find(&drm_minors_idr, minor_id); 273 priv->minor = idr_find(&drm_minors_idr, minor_id);
274 if (!priv->minor) {
275 ret = -ENODEV;
276 goto out_put_pid;
277 }
278
274 priv->ioctl_count = 0; 279 priv->ioctl_count = 0;
275 /* for compatibility root is always authenticated */ 280 /* for compatibility root is always authenticated */
276 priv->authenticated = capable(CAP_SYS_ADMIN); 281 priv->authenticated = capable(CAP_SYS_ADMIN);
@@ -292,7 +297,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
292 if (dev->driver->open) { 297 if (dev->driver->open) {
293 ret = dev->driver->open(dev, priv); 298 ret = dev->driver->open(dev, priv);
294 if (ret < 0) 299 if (ret < 0)
295 goto out_free; 300 goto out_prime_destroy;
296 } 301 }
297 302
298 303
@@ -304,7 +309,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
304 if (!priv->minor->master) { 309 if (!priv->minor->master) {
305 mutex_unlock(&dev->struct_mutex); 310 mutex_unlock(&dev->struct_mutex);
306 ret = -ENOMEM; 311 ret = -ENOMEM;
307 goto out_free; 312 goto out_close;
308 } 313 }
309 314
310 priv->is_master = 1; 315 priv->is_master = 1;
@@ -322,7 +327,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
322 drm_master_put(&priv->minor->master); 327 drm_master_put(&priv->minor->master);
323 drm_master_put(&priv->master); 328 drm_master_put(&priv->master);
324 mutex_unlock(&dev->struct_mutex); 329 mutex_unlock(&dev->struct_mutex);
325 goto out_free; 330 goto out_close;
326 } 331 }
327 } 332 }
328 mutex_lock(&dev->struct_mutex); 333 mutex_lock(&dev->struct_mutex);
@@ -333,7 +338,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
333 drm_master_put(&priv->minor->master); 338 drm_master_put(&priv->minor->master);
334 drm_master_put(&priv->master); 339 drm_master_put(&priv->master);
335 mutex_unlock(&dev->struct_mutex); 340 mutex_unlock(&dev->struct_mutex);
336 goto out_free; 341 goto out_close;
337 } 342 }
338 } 343 }
339 mutex_unlock(&dev->struct_mutex); 344 mutex_unlock(&dev->struct_mutex);
@@ -367,7 +372,17 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
367#endif 372#endif
368 373
369 return 0; 374 return 0;
370 out_free: 375
376out_close:
377 if (dev->driver->postclose)
378 dev->driver->postclose(dev, priv);
379out_prime_destroy:
380 if (drm_core_check_feature(dev, DRIVER_PRIME))
381 drm_prime_destroy_file_private(&priv->prime);
382 if (dev->driver->driver_features & DRIVER_GEM)
383 drm_gem_release(dev, priv);
384out_put_pid:
385 put_pid(priv->pid);
371 kfree(priv); 386 kfree(priv);
372 filp->private_data = NULL; 387 filp->private_data = NULL;
373 return ret; 388 return ret;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index cf919e36e8ae..603f256152ef 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -108,12 +108,8 @@ drm_gem_init(struct drm_device *dev)
108 return -ENOMEM; 108 return -ENOMEM;
109 } 109 }
110 110
111 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, 111 drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
112 DRM_FILE_PAGE_OFFSET_SIZE)) { 112 DRM_FILE_PAGE_OFFSET_SIZE);
113 drm_ht_remove(&mm->offset_hash);
114 kfree(mm);
115 return -ENOMEM;
116 }
117 113
118 return 0; 114 return 0;
119} 115}
@@ -453,25 +449,21 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
453 spin_lock(&dev->object_name_lock); 449 spin_lock(&dev->object_name_lock);
454 if (!obj->name) { 450 if (!obj->name) {
455 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); 451 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
456 obj->name = ret;
457 args->name = (uint64_t) obj->name;
458 spin_unlock(&dev->object_name_lock);
459 idr_preload_end();
460
461 if (ret < 0) 452 if (ret < 0)
462 goto err; 453 goto err;
463 ret = 0; 454
455 obj->name = ret;
464 456
465 /* Allocate a reference for the name table. */ 457 /* Allocate a reference for the name table. */
466 drm_gem_object_reference(obj); 458 drm_gem_object_reference(obj);
467 } else {
468 args->name = (uint64_t) obj->name;
469 spin_unlock(&dev->object_name_lock);
470 idr_preload_end();
471 ret = 0;
472 } 459 }
473 460
461 args->name = (uint64_t) obj->name;
462 ret = 0;
463
474err: 464err:
465 spin_unlock(&dev->object_name_lock);
466 idr_preload_end();
475 drm_gem_object_unreference_unlocked(obj); 467 drm_gem_object_unreference_unlocked(obj);
476 return ret; 468 return ret;
477} 469}
@@ -644,6 +636,59 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
644} 636}
645EXPORT_SYMBOL(drm_gem_vm_close); 637EXPORT_SYMBOL(drm_gem_vm_close);
646 638
639/**
640 * drm_gem_mmap_obj - memory map a GEM object
641 * @obj: the GEM object to map
642 * @obj_size: the object size to be mapped, in bytes
643 * @vma: VMA for the area to be mapped
644 *
645 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
646 * provided by the driver. Depending on their requirements, drivers can either
647 * provide a fault handler in their gem_vm_ops (in which case any accesses to
648 * the object will be trapped, to perform migration, GTT binding, surface
649 * register allocation, or performance monitoring), or mmap the buffer memory
650 * synchronously after calling drm_gem_mmap_obj.
651 *
652 * This function is mainly intended to implement the DMABUF mmap operation, when
653 * the GEM object is not looked up based on its fake offset. To implement the
654 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
655 *
656 * NOTE: This function has to be protected with dev->struct_mutex
657 *
658 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
659 * size, or if no gem_vm_ops are provided.
660 */
661int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
662 struct vm_area_struct *vma)
663{
664 struct drm_device *dev = obj->dev;
665
666 lockdep_assert_held(&dev->struct_mutex);
667
668 /* Check for valid size. */
669 if (obj_size < vma->vm_end - vma->vm_start)
670 return -EINVAL;
671
672 if (!dev->driver->gem_vm_ops)
673 return -EINVAL;
674
675 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
676 vma->vm_ops = dev->driver->gem_vm_ops;
677 vma->vm_private_data = obj;
678 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
679
680 /* Take a ref for this mapping of the object, so that the fault
681 * handler can dereference the mmap offset's pointer to the object.
682 * This reference is cleaned up by the corresponding vm_close
683 * (which should happen whether the vma was created by this call, or
684 * by a vm_open due to mremap or partial unmap or whatever).
685 */
686 drm_gem_object_reference(obj);
687
688 drm_vm_open_locked(dev, vma);
689 return 0;
690}
691EXPORT_SYMBOL(drm_gem_mmap_obj);
647 692
648/** 693/**
649 * drm_gem_mmap - memory map routine for GEM objects 694 * drm_gem_mmap - memory map routine for GEM objects
@@ -653,11 +698,9 @@ EXPORT_SYMBOL(drm_gem_vm_close);
653 * If a driver supports GEM object mapping, mmap calls on the DRM file 698 * If a driver supports GEM object mapping, mmap calls on the DRM file
654 * descriptor will end up here. 699 * descriptor will end up here.
655 * 700 *
656 * If we find the object based on the offset passed in (vma->vm_pgoff will 701 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
657 * contain the fake offset we created when the GTT map ioctl was called on 702 * contain the fake offset we created when the GTT map ioctl was called on
658 * the object), we set up the driver fault handler so that any accesses 703 * the object) and map it with a call to drm_gem_mmap_obj().
659 * to the object can be trapped, to perform migration, GTT binding, surface
660 * register allocation, or performance monitoring.
661 */ 704 */
662int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 705int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
663{ 706{
@@ -665,7 +708,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
665 struct drm_device *dev = priv->minor->dev; 708 struct drm_device *dev = priv->minor->dev;
666 struct drm_gem_mm *mm = dev->mm_private; 709 struct drm_gem_mm *mm = dev->mm_private;
667 struct drm_local_map *map = NULL; 710 struct drm_local_map *map = NULL;
668 struct drm_gem_object *obj;
669 struct drm_hash_item *hash; 711 struct drm_hash_item *hash;
670 int ret = 0; 712 int ret = 0;
671 713
@@ -686,32 +728,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
686 goto out_unlock; 728 goto out_unlock;
687 } 729 }
688 730
689 /* Check for valid size. */ 731 ret = drm_gem_mmap_obj(map->handle, map->size, vma);
690 if (map->size < vma->vm_end - vma->vm_start) {
691 ret = -EINVAL;
692 goto out_unlock;
693 }
694
695 obj = map->handle;
696 if (!obj->dev->driver->gem_vm_ops) {
697 ret = -EINVAL;
698 goto out_unlock;
699 }
700
701 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
702 vma->vm_ops = obj->dev->driver->gem_vm_ops;
703 vma->vm_private_data = map->handle;
704 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
705
706 /* Take a ref for this mapping of the object, so that the fault
707 * handler can dereference the mmap offset's pointer to the object.
708 * This reference is cleaned up by the corresponding vm_close
709 * (which should happen whether the vma was created by this call, or
710 * by a vm_open due to mremap or partial unmap or whatever).
711 */
712 drm_gem_object_reference(obj);
713
714 drm_vm_open_locked(dev, vma);
715 732
716out_unlock: 733out_unlock:
717 mutex_unlock(&dev->struct_mutex); 734 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 0a7e011509bd..ce063970d68c 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -21,6 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/export.h> 23#include <linux/export.h>
24#include <linux/dma-buf.h>
24#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
25 26
26#include <drm/drmP.h> 27#include <drm/drmP.h>
@@ -32,11 +33,44 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
32 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; 33 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
33} 34}
34 35
35static void drm_gem_cma_buf_destroy(struct drm_device *drm, 36/*
36 struct drm_gem_cma_object *cma_obj) 37 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
38 * @drm: The drm device
39 * @size: The GEM object size
40 *
41 * This function creates and initializes a GEM CMA object of the given size, but
42 * doesn't allocate any memory to back the object.
43 *
44 * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure.
45 */
46static struct drm_gem_cma_object *
47__drm_gem_cma_create(struct drm_device *drm, unsigned int size)
37{ 48{
38 dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr, 49 struct drm_gem_cma_object *cma_obj;
39 cma_obj->paddr); 50 struct drm_gem_object *gem_obj;
51 int ret;
52
53 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
54 if (!cma_obj)
55 return ERR_PTR(-ENOMEM);
56
57 gem_obj = &cma_obj->base;
58
59 ret = drm_gem_object_init(drm, gem_obj, size);
60 if (ret)
61 goto error;
62
63 ret = drm_gem_create_mmap_offset(gem_obj);
64 if (ret) {
65 drm_gem_object_release(gem_obj);
66 goto error;
67 }
68
69 return cma_obj;
70
71error:
72 kfree(cma_obj);
73 return ERR_PTR(ret);
40} 74}
41 75
42/* 76/*
@@ -49,44 +83,42 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
49 unsigned int size) 83 unsigned int size)
50{ 84{
51 struct drm_gem_cma_object *cma_obj; 85 struct drm_gem_cma_object *cma_obj;
52 struct drm_gem_object *gem_obj; 86 struct sg_table *sgt = NULL;
53 int ret; 87 int ret;
54 88
55 size = round_up(size, PAGE_SIZE); 89 size = round_up(size, PAGE_SIZE);
56 90
57 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); 91 cma_obj = __drm_gem_cma_create(drm, size);
58 if (!cma_obj) 92 if (IS_ERR(cma_obj))
59 return ERR_PTR(-ENOMEM); 93 return cma_obj;
60 94
61 cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, 95 cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
62 &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); 96 &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
63 if (!cma_obj->vaddr) { 97 if (!cma_obj->vaddr) {
64 dev_err(drm->dev, "failed to allocate buffer with size %d\n", size); 98 dev_err(drm->dev, "failed to allocate buffer with size %d\n",
99 size);
65 ret = -ENOMEM; 100 ret = -ENOMEM;
66 goto err_dma_alloc; 101 goto error;
67 } 102 }
68 103
69 gem_obj = &cma_obj->base; 104 sgt = kzalloc(sizeof(*cma_obj->sgt), GFP_KERNEL);
105 if (sgt == NULL) {
106 ret = -ENOMEM;
107 goto error;
108 }
70 109
71 ret = drm_gem_object_init(drm, gem_obj, size); 110 ret = dma_get_sgtable(drm->dev, sgt, cma_obj->vaddr,
72 if (ret) 111 cma_obj->paddr, size);
73 goto err_obj_init; 112 if (ret < 0)
113 goto error;
74 114
75 ret = drm_gem_create_mmap_offset(gem_obj); 115 cma_obj->sgt = sgt;
76 if (ret)
77 goto err_create_mmap_offset;
78 116
79 return cma_obj; 117 return cma_obj;
80 118
81err_create_mmap_offset: 119error:
82 drm_gem_object_release(gem_obj); 120 kfree(sgt);
83 121 drm_gem_cma_free_object(&cma_obj->base);
84err_obj_init:
85 drm_gem_cma_buf_destroy(drm, cma_obj);
86
87err_dma_alloc:
88 kfree(cma_obj);
89
90 return ERR_PTR(ret); 122 return ERR_PTR(ret);
91} 123}
92EXPORT_SYMBOL_GPL(drm_gem_cma_create); 124EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -143,11 +175,20 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
143 if (gem_obj->map_list.map) 175 if (gem_obj->map_list.map)
144 drm_gem_free_mmap_offset(gem_obj); 176 drm_gem_free_mmap_offset(gem_obj);
145 177
146 drm_gem_object_release(gem_obj);
147
148 cma_obj = to_drm_gem_cma_obj(gem_obj); 178 cma_obj = to_drm_gem_cma_obj(gem_obj);
149 179
150 drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj); 180 if (cma_obj->vaddr) {
181 dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
182 cma_obj->vaddr, cma_obj->paddr);
183 if (cma_obj->sgt) {
184 sg_free_table(cma_obj->sgt);
185 kfree(cma_obj->sgt);
186 }
187 } else if (gem_obj->import_attach) {
188 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
189 }
190
191 drm_gem_object_release(gem_obj);
151 192
152 kfree(cma_obj); 193 kfree(cma_obj);
153} 194}
@@ -174,10 +215,7 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
174 215
175 cma_obj = drm_gem_cma_create_with_handle(file_priv, dev, 216 cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
176 args->size, &args->handle); 217 args->size, &args->handle);
177 if (IS_ERR(cma_obj)) 218 return PTR_RET(cma_obj);
178 return PTR_ERR(cma_obj);
179
180 return 0;
181} 219}
182EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create); 220EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
183 221
@@ -215,13 +253,26 @@ const struct vm_operations_struct drm_gem_cma_vm_ops = {
215}; 253};
216EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops); 254EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
217 255
256static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
257 struct vm_area_struct *vma)
258{
259 int ret;
260
261 ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
262 vma->vm_end - vma->vm_start, vma->vm_page_prot);
263 if (ret)
264 drm_gem_vm_close(vma);
265
266 return ret;
267}
268
218/* 269/*
219 * drm_gem_cma_mmap - (struct file_operation)->mmap callback function 270 * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
220 */ 271 */
221int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) 272int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
222{ 273{
223 struct drm_gem_object *gem_obj;
224 struct drm_gem_cma_object *cma_obj; 274 struct drm_gem_cma_object *cma_obj;
275 struct drm_gem_object *gem_obj;
225 int ret; 276 int ret;
226 277
227 ret = drm_gem_mmap(filp, vma); 278 ret = drm_gem_mmap(filp, vma);
@@ -231,12 +282,7 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
231 gem_obj = vma->vm_private_data; 282 gem_obj = vma->vm_private_data;
232 cma_obj = to_drm_gem_cma_obj(gem_obj); 283 cma_obj = to_drm_gem_cma_obj(gem_obj);
233 284
234 ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT, 285 return drm_gem_cma_mmap_obj(cma_obj, vma);
235 vma->vm_end - vma->vm_start, vma->vm_page_prot);
236 if (ret)
237 drm_gem_vm_close(vma);
238
239 return ret;
240} 286}
241EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); 287EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
242 288
@@ -270,3 +316,289 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m
270} 316}
271EXPORT_SYMBOL_GPL(drm_gem_cma_describe); 317EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
272#endif 318#endif
319
320/* -----------------------------------------------------------------------------
321 * DMA-BUF
322 */
323
324struct drm_gem_cma_dmabuf_attachment {
325 struct sg_table sgt;
326 enum dma_data_direction dir;
327};
328
329static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev,
330 struct dma_buf_attachment *attach)
331{
332 struct drm_gem_cma_dmabuf_attachment *cma_attach;
333
334 cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL);
335 if (!cma_attach)
336 return -ENOMEM;
337
338 cma_attach->dir = DMA_NONE;
339 attach->priv = cma_attach;
340
341 return 0;
342}
343
344static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf,
345 struct dma_buf_attachment *attach)
346{
347 struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
348 struct sg_table *sgt;
349
350 if (cma_attach == NULL)
351 return;
352
353 sgt = &cma_attach->sgt;
354
355 if (cma_attach->dir != DMA_NONE)
356 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
357 cma_attach->dir);
358
359 sg_free_table(sgt);
360 kfree(cma_attach);
361 attach->priv = NULL;
362}
363
364static struct sg_table *
365drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach,
366 enum dma_data_direction dir)
367{
368 struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
369 struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv;
370 struct drm_device *drm = cma_obj->base.dev;
371 struct scatterlist *rd, *wr;
372 struct sg_table *sgt;
373 unsigned int i;
374 int nents, ret;
375
376 DRM_DEBUG_PRIME("\n");
377
378 if (WARN_ON(dir == DMA_NONE))
379 return ERR_PTR(-EINVAL);
380
381 /* Return the cached mapping when possible. */
382 if (cma_attach->dir == dir)
383 return &cma_attach->sgt;
384
385 /* Two mappings with different directions for the same attachment are
386 * not allowed.
387 */
388 if (WARN_ON(cma_attach->dir != DMA_NONE))
389 return ERR_PTR(-EBUSY);
390
391 sgt = &cma_attach->sgt;
392
393 ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL);
394 if (ret) {
395 DRM_ERROR("failed to alloc sgt.\n");
396 return ERR_PTR(-ENOMEM);
397 }
398
399 mutex_lock(&drm->struct_mutex);
400
401 rd = cma_obj->sgt->sgl;
402 wr = sgt->sgl;
403 for (i = 0; i < sgt->orig_nents; ++i) {
404 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
405 rd = sg_next(rd);
406 wr = sg_next(wr);
407 }
408
409 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
410 if (!nents) {
411 DRM_ERROR("failed to map sgl with iommu.\n");
412 sg_free_table(sgt);
413 sgt = ERR_PTR(-EIO);
414 goto done;
415 }
416
417 cma_attach->dir = dir;
418 attach->priv = cma_attach;
419
420 DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size);
421
422done:
423 mutex_unlock(&drm->struct_mutex);
424 return sgt;
425}
426
427static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach,
428 struct sg_table *sgt,
429 enum dma_data_direction dir)
430{
431 /* Nothing to do. */
432}
433
434static void drm_gem_cma_dmabuf_release(struct dma_buf *dmabuf)
435{
436 struct drm_gem_cma_object *cma_obj = dmabuf->priv;
437
438 DRM_DEBUG_PRIME("%s\n", __FILE__);
439
440 /*
441 * drm_gem_cma_dmabuf_release() call means that file object's
442 * f_count is 0 and it calls drm_gem_object_handle_unreference()
443 * to drop the references that these values had been increased
444 * at drm_prime_handle_to_fd()
445 */
446 if (cma_obj->base.export_dma_buf == dmabuf) {
447 cma_obj->base.export_dma_buf = NULL;
448
449 /*
450 * drop this gem object refcount to release allocated buffer
451 * and resources.
452 */
453 drm_gem_object_unreference_unlocked(&cma_obj->base);
454 }
455}
456
457static void *drm_gem_cma_dmabuf_kmap_atomic(struct dma_buf *dmabuf,
458 unsigned long page_num)
459{
460 /* TODO */
461
462 return NULL;
463}
464
465static void drm_gem_cma_dmabuf_kunmap_atomic(struct dma_buf *dmabuf,
466 unsigned long page_num, void *addr)
467{
468 /* TODO */
469}
470
471static void *drm_gem_cma_dmabuf_kmap(struct dma_buf *dmabuf,
472 unsigned long page_num)
473{
474 /* TODO */
475
476 return NULL;
477}
478
479static void drm_gem_cma_dmabuf_kunmap(struct dma_buf *dmabuf,
480 unsigned long page_num, void *addr)
481{
482 /* TODO */
483}
484
485static int drm_gem_cma_dmabuf_mmap(struct dma_buf *dmabuf,
486 struct vm_area_struct *vma)
487{
488 struct drm_gem_cma_object *cma_obj = dmabuf->priv;
489 struct drm_gem_object *gem_obj = &cma_obj->base;
490 struct drm_device *dev = gem_obj->dev;
491 int ret;
492
493 mutex_lock(&dev->struct_mutex);
494 ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
495 mutex_unlock(&dev->struct_mutex);
496 if (ret < 0)
497 return ret;
498
499 return drm_gem_cma_mmap_obj(cma_obj, vma);
500}
501
502static void *drm_gem_cma_dmabuf_vmap(struct dma_buf *dmabuf)
503{
504 struct drm_gem_cma_object *cma_obj = dmabuf->priv;
505
506 return cma_obj->vaddr;
507}
508
509static struct dma_buf_ops drm_gem_cma_dmabuf_ops = {
510 .attach = drm_gem_cma_dmabuf_attach,
511 .detach = drm_gem_cma_dmabuf_detach,
512 .map_dma_buf = drm_gem_cma_dmabuf_map,
513 .unmap_dma_buf = drm_gem_cma_dmabuf_unmap,
514 .kmap = drm_gem_cma_dmabuf_kmap,
515 .kmap_atomic = drm_gem_cma_dmabuf_kmap_atomic,
516 .kunmap = drm_gem_cma_dmabuf_kunmap,
517 .kunmap_atomic = drm_gem_cma_dmabuf_kunmap_atomic,
518 .mmap = drm_gem_cma_dmabuf_mmap,
519 .vmap = drm_gem_cma_dmabuf_vmap,
520 .release = drm_gem_cma_dmabuf_release,
521};
522
523struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm,
524 struct drm_gem_object *obj, int flags)
525{
526 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
527
528 return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops,
529 cma_obj->base.size, flags);
530}
531EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_export);
532
533struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm,
534 struct dma_buf *dma_buf)
535{
536 struct drm_gem_cma_object *cma_obj;
537 struct dma_buf_attachment *attach;
538 struct sg_table *sgt;
539 int ret;
540
541 DRM_DEBUG_PRIME("%s\n", __FILE__);
542
543 /* is this one of own objects? */
544 if (dma_buf->ops == &drm_gem_cma_dmabuf_ops) {
545 struct drm_gem_object *obj;
546
547 cma_obj = dma_buf->priv;
548 obj = &cma_obj->base;
549
550 /* is it from our device? */
551 if (obj->dev == drm) {
552 /*
553 * Importing dmabuf exported from out own gem increases
554 * refcount on gem itself instead of f_count of dmabuf.
555 */
556 drm_gem_object_reference(obj);
557 dma_buf_put(dma_buf);
558 return obj;
559 }
560 }
561
562 /* Create a CMA GEM buffer. */
563 cma_obj = __drm_gem_cma_create(drm, dma_buf->size);
564 if (IS_ERR(cma_obj))
565 return ERR_PTR(PTR_ERR(cma_obj));
566
567 /* Attach to the buffer and map it. Make sure the mapping is contiguous
568 * on the device memory bus, as that's all we support.
569 */
570 attach = dma_buf_attach(dma_buf, drm->dev);
571 if (IS_ERR(attach)) {
572 ret = -EINVAL;
573 goto error_gem_free;
574 }
575
576 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
577 if (IS_ERR_OR_NULL(sgt)) {
578 ret = sgt ? PTR_ERR(sgt) : -ENOMEM;
579 goto error_buf_detach;
580 }
581
582 if (sgt->nents != 1) {
583 ret = -EINVAL;
584 goto error_buf_unmap;
585 }
586
587 cma_obj->base.import_attach = attach;
588 cma_obj->paddr = sg_dma_address(sgt->sgl);
589 cma_obj->sgt = sgt;
590
591 DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr,
592 dma_buf->size);
593
594 return &cma_obj->base;
595
596error_buf_unmap:
597 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
598error_buf_detach:
599 dma_buf_detach(dma_buf, attach);
600error_gem_free:
601 drm_gem_cma_free_object(&cma_obj->base);
602 return ERR_PTR(ret);
603}
604EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_import);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index e77bd8b57df2..ffd7a7ba70d4 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -38,6 +38,9 @@
38 38
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/export.h> 40#include <linux/export.h>
41#ifdef CONFIG_X86
42#include <asm/mtrr.h>
43#endif
41 44
42/** 45/**
43 * Get the bus id. 46 * Get the bus id.
@@ -181,7 +184,17 @@ int drm_getmap(struct drm_device *dev, void *data,
181 map->type = r_list->map->type; 184 map->type = r_list->map->type;
182 map->flags = r_list->map->flags; 185 map->flags = r_list->map->flags;
183 map->handle = (void *)(unsigned long) r_list->user_token; 186 map->handle = (void *)(unsigned long) r_list->user_token;
184 map->mtrr = r_list->map->mtrr; 187
188#ifdef CONFIG_X86
189 /*
190 * There appears to be exactly one user of the mtrr index: dritest.
191 * It's easy enough to keep it working on non-PAT systems.
192 */
193 map->mtrr = phys_wc_to_mtrr_index(r_list->map->mtrr);
194#else
195 map->mtrr = -1;
196#endif
197
185 mutex_unlock(&dev->struct_mutex); 198 mutex_unlock(&dev->struct_mutex);
186 199
187 return 0; 200 return 0;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 07cf99cc8862..543b9b3171d3 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -669,7 +669,7 @@ int drm_mm_clean(struct drm_mm * mm)
669} 669}
670EXPORT_SYMBOL(drm_mm_clean); 670EXPORT_SYMBOL(drm_mm_clean);
671 671
672int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 672void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
673{ 673{
674 INIT_LIST_HEAD(&mm->hole_stack); 674 INIT_LIST_HEAD(&mm->hole_stack);
675 INIT_LIST_HEAD(&mm->unused_nodes); 675 INIT_LIST_HEAD(&mm->unused_nodes);
@@ -690,8 +690,6 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
690 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); 690 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
691 691
692 mm->color_adjust = NULL; 692 mm->color_adjust = NULL;
693
694 return 0;
695} 693}
696EXPORT_SYMBOL(drm_mm_init); 694EXPORT_SYMBOL(drm_mm_init);
697 695
@@ -699,8 +697,8 @@ void drm_mm_takedown(struct drm_mm * mm)
699{ 697{
700 struct drm_mm_node *entry, *next; 698 struct drm_mm_node *entry, *next;
701 699
702 if (!list_empty(&mm->head_node.node_list)) { 700 if (WARN(!list_empty(&mm->head_node.node_list),
703 DRM_ERROR("Memory manager not clean. Delaying takedown\n"); 701 "Memory manager not clean. Delaying takedown\n")) {
704 return; 702 return;
705 } 703 }
706 704
@@ -716,36 +714,37 @@ void drm_mm_takedown(struct drm_mm * mm)
716} 714}
717EXPORT_SYMBOL(drm_mm_takedown); 715EXPORT_SYMBOL(drm_mm_takedown);
718 716
719void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 717static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
718 const char *prefix)
720{ 719{
721 struct drm_mm_node *entry;
722 unsigned long total_used = 0, total_free = 0, total = 0;
723 unsigned long hole_start, hole_end, hole_size; 720 unsigned long hole_start, hole_end, hole_size;
724 721
725 hole_start = drm_mm_hole_node_start(&mm->head_node); 722 if (entry->hole_follows) {
726 hole_end = drm_mm_hole_node_end(&mm->head_node); 723 hole_start = drm_mm_hole_node_start(entry);
727 hole_size = hole_end - hole_start; 724 hole_end = drm_mm_hole_node_end(entry);
728 if (hole_size) 725 hole_size = hole_end - hole_start;
729 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", 726 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
730 prefix, hole_start, hole_end, 727 prefix, hole_start, hole_end,
731 hole_size); 728 hole_size);
732 total_free += hole_size; 729 return hole_size;
730 }
731
732 return 0;
733}
734
735void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
736{
737 struct drm_mm_node *entry;
738 unsigned long total_used = 0, total_free = 0, total = 0;
739
740 total_free += drm_mm_debug_hole(&mm->head_node, prefix);
733 741
734 drm_mm_for_each_node(entry, mm) { 742 drm_mm_for_each_node(entry, mm) {
735 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", 743 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
736 prefix, entry->start, entry->start + entry->size, 744 prefix, entry->start, entry->start + entry->size,
737 entry->size); 745 entry->size);
738 total_used += entry->size; 746 total_used += entry->size;
739 747 total_free += drm_mm_debug_hole(entry, prefix);
740 if (entry->hole_follows) {
741 hole_start = drm_mm_hole_node_start(entry);
742 hole_end = drm_mm_hole_node_end(entry);
743 hole_size = hole_end - hole_start;
744 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
745 prefix, hole_start, hole_end,
746 hole_size);
747 total_free += hole_size;
748 }
749 } 748 }
750 total = total_free + total_used; 749 total = total_free + total_used;
751 750
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index a371ff865a88..a6729bfe6860 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -535,6 +535,8 @@ int drm_display_mode_from_videomode(const struct videomode *vm,
535 dmode->flags |= DRM_MODE_FLAG_INTERLACE; 535 dmode->flags |= DRM_MODE_FLAG_INTERLACE;
536 if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN) 536 if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN)
537 dmode->flags |= DRM_MODE_FLAG_DBLSCAN; 537 dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
538 if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
539 dmode->flags |= DRM_MODE_FLAG_DBLCLK;
538 drm_mode_set_name(dmode); 540 drm_mode_set_name(dmode);
539 541
540 return 0; 542 return 0;
@@ -787,16 +789,17 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
787 * LOCKING: 789 * LOCKING:
788 * None. 790 * None.
789 * 791 *
790 * Copy an existing mode into another mode, preserving the object id 792 * Copy an existing mode into another mode, preserving the object id and
791 * of the destination mode. 793 * list head of the destination mode.
792 */ 794 */
793void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src) 795void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
794{ 796{
795 int id = dst->base.id; 797 int id = dst->base.id;
798 struct list_head head = dst->head;
796 799
797 *dst = *src; 800 *dst = *src;
798 dst->base.id = id; 801 dst->base.id = id;
799 INIT_LIST_HEAD(&dst->head); 802 dst->head = head;
800} 803}
801EXPORT_SYMBOL(drm_mode_copy); 804EXPORT_SYMBOL(drm_mode_copy);
802 805
@@ -1017,6 +1020,11 @@ static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head
1017 diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay; 1020 diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
1018 if (diff) 1021 if (diff)
1019 return diff; 1022 return diff;
1023
1024 diff = b->vrefresh - a->vrefresh;
1025 if (diff)
1026 return diff;
1027
1020 diff = b->clock - a->clock; 1028 diff = b->clock - a->clock;
1021 return diff; 1029 return diff;
1022} 1030}
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 14194b6ef644..80c0b2b29801 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -278,10 +278,10 @@ static int drm_pci_agp_init(struct drm_device *dev)
278 } 278 }
279 if (drm_core_has_MTRR(dev)) { 279 if (drm_core_has_MTRR(dev)) {
280 if (dev->agp) 280 if (dev->agp)
281 dev->agp->agp_mtrr = 281 dev->agp->agp_mtrr = arch_phys_wc_add(
282 mtrr_add(dev->agp->agp_info.aper_base, 282 dev->agp->agp_info.aper_base,
283 dev->agp->agp_info.aper_size * 283 dev->agp->agp_info.aper_size *
284 1024 * 1024, MTRR_TYPE_WRCOMB, 1); 284 1024 * 1024);
285 } 285 }
286 } 286 }
287 return 0; 287 return 0;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 5b7b9110254b..1e0de41f085c 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -62,20 +62,124 @@ struct drm_prime_member {
62 struct dma_buf *dma_buf; 62 struct dma_buf *dma_buf;
63 uint32_t handle; 63 uint32_t handle;
64}; 64};
65static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle); 65
66struct drm_prime_attachment {
67 struct sg_table *sgt;
68 enum dma_data_direction dir;
69};
70
71static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
72{
73 struct drm_prime_member *member;
74
75 member = kmalloc(sizeof(*member), GFP_KERNEL);
76 if (!member)
77 return -ENOMEM;
78
79 get_dma_buf(dma_buf);
80 member->dma_buf = dma_buf;
81 member->handle = handle;
82 list_add(&member->entry, &prime_fpriv->head);
83 return 0;
84}
85
86static int drm_gem_map_attach(struct dma_buf *dma_buf,
87 struct device *target_dev,
88 struct dma_buf_attachment *attach)
89{
90 struct drm_prime_attachment *prime_attach;
91 struct drm_gem_object *obj = dma_buf->priv;
92 struct drm_device *dev = obj->dev;
93
94 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
95 if (!prime_attach)
96 return -ENOMEM;
97
98 prime_attach->dir = DMA_NONE;
99 attach->priv = prime_attach;
100
101 if (!dev->driver->gem_prime_pin)
102 return 0;
103
104 return dev->driver->gem_prime_pin(obj);
105}
106
107static void drm_gem_map_detach(struct dma_buf *dma_buf,
108 struct dma_buf_attachment *attach)
109{
110 struct drm_prime_attachment *prime_attach = attach->priv;
111 struct drm_gem_object *obj = dma_buf->priv;
112 struct drm_device *dev = obj->dev;
113 struct sg_table *sgt;
114
115 if (dev->driver->gem_prime_unpin)
116 dev->driver->gem_prime_unpin(obj);
117
118 if (!prime_attach)
119 return;
120
121 sgt = prime_attach->sgt;
122
123 if (prime_attach->dir != DMA_NONE)
124 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
125 prime_attach->dir);
126
127 sg_free_table(sgt);
128 kfree(sgt);
129 kfree(prime_attach);
130 attach->priv = NULL;
131}
132
133static void drm_prime_remove_buf_handle_locked(
134 struct drm_prime_file_private *prime_fpriv,
135 struct dma_buf *dma_buf)
136{
137 struct drm_prime_member *member, *safe;
138
139 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
140 if (member->dma_buf == dma_buf) {
141 dma_buf_put(dma_buf);
142 list_del(&member->entry);
143 kfree(member);
144 }
145 }
146}
66 147
67static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 148static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
68 enum dma_data_direction dir) 149 enum dma_data_direction dir)
69{ 150{
151 struct drm_prime_attachment *prime_attach = attach->priv;
70 struct drm_gem_object *obj = attach->dmabuf->priv; 152 struct drm_gem_object *obj = attach->dmabuf->priv;
71 struct sg_table *sgt; 153 struct sg_table *sgt;
72 154
155 if (WARN_ON(dir == DMA_NONE || !prime_attach))
156 return ERR_PTR(-EINVAL);
157
158 /* return the cached mapping when possible */
159 if (prime_attach->dir == dir)
160 return prime_attach->sgt;
161
162 /*
163 * two mappings with different directions for the same attachment are
164 * not allowed
165 */
166 if (WARN_ON(prime_attach->dir != DMA_NONE))
167 return ERR_PTR(-EBUSY);
168
73 mutex_lock(&obj->dev->struct_mutex); 169 mutex_lock(&obj->dev->struct_mutex);
74 170
75 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 171 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
76 172
77 if (!IS_ERR_OR_NULL(sgt)) 173 if (!IS_ERR(sgt)) {
78 dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); 174 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
175 sg_free_table(sgt);
176 kfree(sgt);
177 sgt = ERR_PTR(-ENOMEM);
178 } else {
179 prime_attach->sgt = sgt;
180 prime_attach->dir = dir;
181 }
182 }
79 183
80 mutex_unlock(&obj->dev->struct_mutex); 184 mutex_unlock(&obj->dev->struct_mutex);
81 return sgt; 185 return sgt;
@@ -84,9 +188,7 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
84static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 188static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
85 struct sg_table *sgt, enum dma_data_direction dir) 189 struct sg_table *sgt, enum dma_data_direction dir)
86{ 190{
87 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 191 /* nothing to be done here */
88 sg_free_table(sgt);
89 kfree(sgt);
90} 192}
91 193
92static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 194static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
@@ -146,6 +248,8 @@ static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
146} 248}
147 249
148static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { 250static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
251 .attach = drm_gem_map_attach,
252 .detach = drm_gem_map_detach,
149 .map_dma_buf = drm_gem_map_dma_buf, 253 .map_dma_buf = drm_gem_map_dma_buf,
150 .unmap_dma_buf = drm_gem_unmap_dma_buf, 254 .unmap_dma_buf = drm_gem_unmap_dma_buf,
151 .release = drm_gem_dmabuf_release, 255 .release = drm_gem_dmabuf_release,
@@ -185,11 +289,6 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
185struct dma_buf *drm_gem_prime_export(struct drm_device *dev, 289struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
186 struct drm_gem_object *obj, int flags) 290 struct drm_gem_object *obj, int flags)
187{ 291{
188 if (dev->driver->gem_prime_pin) {
189 int ret = dev->driver->gem_prime_pin(obj);
190 if (ret)
191 return ERR_PTR(ret);
192 }
193 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); 292 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
194} 293}
195EXPORT_SYMBOL(drm_gem_prime_export); 294EXPORT_SYMBOL(drm_gem_prime_export);
@@ -235,15 +334,34 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
235 ret = drm_prime_add_buf_handle(&file_priv->prime, 334 ret = drm_prime_add_buf_handle(&file_priv->prime,
236 obj->export_dma_buf, handle); 335 obj->export_dma_buf, handle);
237 if (ret) 336 if (ret)
238 goto out; 337 goto fail_put_dmabuf;
338
339 ret = dma_buf_fd(buf, flags);
340 if (ret < 0)
341 goto fail_rm_handle;
239 342
240 *prime_fd = dma_buf_fd(buf, flags); 343 *prime_fd = ret;
241 mutex_unlock(&file_priv->prime.lock); 344 mutex_unlock(&file_priv->prime.lock);
242 return 0; 345 return 0;
243 346
244out_have_obj: 347out_have_obj:
245 get_dma_buf(dmabuf); 348 get_dma_buf(dmabuf);
246 *prime_fd = dma_buf_fd(dmabuf, flags); 349 ret = dma_buf_fd(dmabuf, flags);
350 if (ret < 0) {
351 dma_buf_put(dmabuf);
352 } else {
353 *prime_fd = ret;
354 ret = 0;
355 }
356
357 goto out;
358
359fail_rm_handle:
360 drm_prime_remove_buf_handle_locked(&file_priv->prime, buf);
361fail_put_dmabuf:
362 /* clear NOT to be checked when releasing dma_buf */
363 obj->export_dma_buf = NULL;
364 dma_buf_put(buf);
247out: 365out:
248 drm_gem_object_unreference_unlocked(obj); 366 drm_gem_object_unreference_unlocked(obj);
249 mutex_unlock(&file_priv->prime.lock); 367 mutex_unlock(&file_priv->prime.lock);
@@ -276,7 +394,7 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
276 394
277 attach = dma_buf_attach(dma_buf, dev->dev); 395 attach = dma_buf_attach(dma_buf, dev->dev);
278 if (IS_ERR(attach)) 396 if (IS_ERR(attach))
279 return ERR_PTR(PTR_ERR(attach)); 397 return ERR_CAST(attach);
280 398
281 get_dma_buf(dma_buf); 399 get_dma_buf(dma_buf);
282 400
@@ -412,8 +530,10 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
412 int ret; 530 int ret;
413 531
414 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 532 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
415 if (!sg) 533 if (!sg) {
534 ret = -ENOMEM;
416 goto out; 535 goto out;
536 }
417 537
418 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, 538 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
419 nr_pages << PAGE_SHIFT, GFP_KERNEL); 539 nr_pages << PAGE_SHIFT, GFP_KERNEL);
@@ -423,7 +543,7 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
423 return sg; 543 return sg;
424out: 544out:
425 kfree(sg); 545 kfree(sg);
426 return NULL; 546 return ERR_PTR(ret);
427} 547}
428EXPORT_SYMBOL(drm_prime_pages_to_sg); 548EXPORT_SYMBOL(drm_prime_pages_to_sg);
429 549
@@ -492,21 +612,6 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
492} 612}
493EXPORT_SYMBOL(drm_prime_destroy_file_private); 613EXPORT_SYMBOL(drm_prime_destroy_file_private);
494 614
495static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
496{
497 struct drm_prime_member *member;
498
499 member = kmalloc(sizeof(*member), GFP_KERNEL);
500 if (!member)
501 return -ENOMEM;
502
503 get_dma_buf(dma_buf);
504 member->dma_buf = dma_buf;
505 member->handle = handle;
506 list_add(&member->entry, &prime_fpriv->head);
507 return 0;
508}
509
510int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) 615int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
511{ 616{
512 struct drm_prime_member *member; 617 struct drm_prime_member *member;
@@ -523,16 +628,8 @@ EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
523 628
524void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) 629void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
525{ 630{
526 struct drm_prime_member *member, *safe;
527
528 mutex_lock(&prime_fpriv->lock); 631 mutex_lock(&prime_fpriv->lock);
529 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { 632 drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf);
530 if (member->dma_buf == dma_buf) {
531 dma_buf_put(dma_buf);
532 list_del(&member->entry);
533 kfree(member);
534 }
535 }
536 mutex_unlock(&prime_fpriv->lock); 633 mutex_unlock(&prime_fpriv->lock);
537} 634}
538EXPORT_SYMBOL(drm_prime_remove_buf_handle); 635EXPORT_SYMBOL(drm_prime_remove_buf_handle);
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
new file mode 100644
index 000000000000..7047ca025787
--- /dev/null
+++ b/drivers/gpu/drm/drm_rect.c
@@ -0,0 +1,295 @@
1/*
2 * Copyright (C) 2011-2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include <linux/errno.h>
25#include <linux/export.h>
26#include <linux/kernel.h>
27#include <drm/drmP.h>
28#include <drm/drm_rect.h>
29
30/**
31 * drm_rect_intersect - intersect two rectangles
32 * @r1: first rectangle
33 * @r2: second rectangle
34 *
35 * Calculate the intersection of rectangles @r1 and @r2.
36 * @r1 will be overwritten with the intersection.
37 *
38 * RETURNS:
39 * %true if rectangle @r1 is still visible after the operation,
40 * %false otherwise.
41 */
42bool drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
43{
44 r1->x1 = max(r1->x1, r2->x1);
45 r1->y1 = max(r1->y1, r2->y1);
46 r1->x2 = min(r1->x2, r2->x2);
47 r1->y2 = min(r1->y2, r2->y2);
48
49 return drm_rect_visible(r1);
50}
51EXPORT_SYMBOL(drm_rect_intersect);
52
53/**
54 * drm_rect_clip_scaled - perform a scaled clip operation
55 * @src: source window rectangle
56 * @dst: destination window rectangle
57 * @clip: clip rectangle
58 * @hscale: horizontal scaling factor
59 * @vscale: vertical scaling factor
60 *
61 * Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
62 * same amounts multiplied by @hscale and @vscale.
63 *
64 * RETURNS:
65 * %true if rectangle @dst is still visible after being clipped,
66 * %false otherwise
67 */
68bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
69 const struct drm_rect *clip,
70 int hscale, int vscale)
71{
72 int diff;
73
74 diff = clip->x1 - dst->x1;
75 if (diff > 0) {
76 int64_t tmp = src->x1 + (int64_t) diff * hscale;
77 src->x1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
78 }
79 diff = clip->y1 - dst->y1;
80 if (diff > 0) {
81 int64_t tmp = src->y1 + (int64_t) diff * vscale;
82 src->y1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
83 }
84 diff = dst->x2 - clip->x2;
85 if (diff > 0) {
86 int64_t tmp = src->x2 - (int64_t) diff * hscale;
87 src->x2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
88 }
89 diff = dst->y2 - clip->y2;
90 if (diff > 0) {
91 int64_t tmp = src->y2 - (int64_t) diff * vscale;
92 src->y2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
93 }
94
95 return drm_rect_intersect(dst, clip);
96}
97EXPORT_SYMBOL(drm_rect_clip_scaled);
98
99static int drm_calc_scale(int src, int dst)
100{
101 int scale = 0;
102
103 if (src < 0 || dst < 0)
104 return -EINVAL;
105
106 if (dst == 0)
107 return 0;
108
109 scale = src / dst;
110
111 return scale;
112}
113
114/**
115 * drm_rect_calc_hscale - calculate the horizontal scaling factor
116 * @src: source window rectangle
117 * @dst: destination window rectangle
118 * @min_hscale: minimum allowed horizontal scaling factor
119 * @max_hscale: maximum allowed horizontal scaling factor
120 *
121 * Calculate the horizontal scaling factor as
122 * (@src width) / (@dst width).
123 *
124 * RETURNS:
125 * The horizontal scaling factor, or errno of out of limits.
126 */
127int drm_rect_calc_hscale(const struct drm_rect *src,
128 const struct drm_rect *dst,
129 int min_hscale, int max_hscale)
130{
131 int src_w = drm_rect_width(src);
132 int dst_w = drm_rect_width(dst);
133 int hscale = drm_calc_scale(src_w, dst_w);
134
135 if (hscale < 0 || dst_w == 0)
136 return hscale;
137
138 if (hscale < min_hscale || hscale > max_hscale)
139 return -ERANGE;
140
141 return hscale;
142}
143EXPORT_SYMBOL(drm_rect_calc_hscale);
144
145/**
146 * drm_rect_calc_vscale - calculate the vertical scaling factor
147 * @src: source window rectangle
148 * @dst: destination window rectangle
149 * @min_vscale: minimum allowed vertical scaling factor
150 * @max_vscale: maximum allowed vertical scaling factor
151 *
152 * Calculate the vertical scaling factor as
153 * (@src height) / (@dst height).
154 *
155 * RETURNS:
156 * The vertical scaling factor, or errno of out of limits.
157 */
158int drm_rect_calc_vscale(const struct drm_rect *src,
159 const struct drm_rect *dst,
160 int min_vscale, int max_vscale)
161{
162 int src_h = drm_rect_height(src);
163 int dst_h = drm_rect_height(dst);
164 int vscale = drm_calc_scale(src_h, dst_h);
165
166 if (vscale < 0 || dst_h == 0)
167 return vscale;
168
169 if (vscale < min_vscale || vscale > max_vscale)
170 return -ERANGE;
171
172 return vscale;
173}
174EXPORT_SYMBOL(drm_rect_calc_vscale);
175
176/**
177 * drm_calc_hscale_relaxed - calculate the horizontal scaling factor
178 * @src: source window rectangle
179 * @dst: destination window rectangle
180 * @min_hscale: minimum allowed horizontal scaling factor
181 * @max_hscale: maximum allowed horizontal scaling factor
182 *
183 * Calculate the horizontal scaling factor as
184 * (@src width) / (@dst width).
185 *
186 * If the calculated scaling factor is below @min_vscale,
187 * decrease the height of rectangle @dst to compensate.
188 *
189 * If the calculated scaling factor is above @max_vscale,
190 * decrease the height of rectangle @src to compensate.
191 *
192 * RETURNS:
193 * The horizontal scaling factor.
194 */
195int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
196 struct drm_rect *dst,
197 int min_hscale, int max_hscale)
198{
199 int src_w = drm_rect_width(src);
200 int dst_w = drm_rect_width(dst);
201 int hscale = drm_calc_scale(src_w, dst_w);
202
203 if (hscale < 0 || dst_w == 0)
204 return hscale;
205
206 if (hscale < min_hscale) {
207 int max_dst_w = src_w / min_hscale;
208
209 drm_rect_adjust_size(dst, max_dst_w - dst_w, 0);
210
211 return min_hscale;
212 }
213
214 if (hscale > max_hscale) {
215 int max_src_w = dst_w * max_hscale;
216
217 drm_rect_adjust_size(src, max_src_w - src_w, 0);
218
219 return max_hscale;
220 }
221
222 return hscale;
223}
224EXPORT_SYMBOL(drm_rect_calc_hscale_relaxed);
225
226/**
227 * drm_rect_calc_vscale_relaxed - calculate the vertical scaling factor
228 * @src: source window rectangle
229 * @dst: destination window rectangle
230 * @min_vscale: minimum allowed vertical scaling factor
231 * @max_vscale: maximum allowed vertical scaling factor
232 *
233 * Calculate the vertical scaling factor as
234 * (@src height) / (@dst height).
235 *
236 * If the calculated scaling factor is below @min_vscale,
237 * decrease the height of rectangle @dst to compensate.
238 *
239 * If the calculated scaling factor is above @max_vscale,
240 * decrease the height of rectangle @src to compensate.
241 *
242 * RETURNS:
243 * The vertical scaling factor.
244 */
245int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
246 struct drm_rect *dst,
247 int min_vscale, int max_vscale)
248{
249 int src_h = drm_rect_height(src);
250 int dst_h = drm_rect_height(dst);
251 int vscale = drm_calc_scale(src_h, dst_h);
252
253 if (vscale < 0 || dst_h == 0)
254 return vscale;
255
256 if (vscale < min_vscale) {
257 int max_dst_h = src_h / min_vscale;
258
259 drm_rect_adjust_size(dst, 0, max_dst_h - dst_h);
260
261 return min_vscale;
262 }
263
264 if (vscale > max_vscale) {
265 int max_src_h = dst_h * max_vscale;
266
267 drm_rect_adjust_size(src, 0, max_src_h - src_h);
268
269 return max_vscale;
270 }
271
272 return vscale;
273}
274EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed);
275
276/**
277 * drm_rect_debug_print - print the rectangle information
278 * @r: rectangle to print
279 * @fixed_point: rectangle is in 16.16 fixed point format
280 */
281void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
282{
283 int w = drm_rect_width(r);
284 int h = drm_rect_height(r);
285
286 if (fixed_point)
287 DRM_DEBUG_KMS("%d.%06ux%d.%06u%+d.%06u%+d.%06u\n",
288 w >> 16, ((w & 0xffff) * 15625) >> 10,
289 h >> 16, ((h & 0xffff) * 15625) >> 10,
290 r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
291 r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
292 else
293 DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
294}
295EXPORT_SYMBOL(drm_rect_debug_print);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 16f3ec579b3b..327ca19cda85 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -203,7 +203,7 @@ EXPORT_SYMBOL(drm_master_put);
203int drm_setmaster_ioctl(struct drm_device *dev, void *data, 203int drm_setmaster_ioctl(struct drm_device *dev, void *data,
204 struct drm_file *file_priv) 204 struct drm_file *file_priv)
205{ 205{
206 int ret; 206 int ret = 0;
207 207
208 if (file_priv->is_master) 208 if (file_priv->is_master)
209 return 0; 209 return 0;
@@ -229,7 +229,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
229 } 229 }
230 mutex_unlock(&dev->struct_mutex); 230 mutex_unlock(&dev->struct_mutex);
231 231
232 return 0; 232 return ret;
233} 233}
234 234
235int drm_dropmaster_ioctl(struct drm_device *dev, void *data, 235int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
@@ -451,14 +451,8 @@ void drm_put_dev(struct drm_device *dev)
451 451
452 drm_lastclose(dev); 452 drm_lastclose(dev);
453 453
454 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && 454 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp)
455 dev->agp && dev->agp->agp_mtrr >= 0) { 455 arch_phys_wc_del(dev->agp->agp_mtrr);
456 int retval;
457 retval = mtrr_del(dev->agp->agp_mtrr,
458 dev->agp->agp_info.aper_base,
459 dev->agp->agp_info.aper_size * 1024 * 1024);
460 DRM_DEBUG("mtrr_del=%d\n", retval);
461 }
462 456
463 if (dev->driver->unload) 457 if (dev->driver->unload)
464 dev->driver->unload(dev); 458 dev->driver->unload(dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 02296653a058..2290b3b73832 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -30,14 +30,14 @@ static struct device_type drm_sysfs_device_minor = {
30}; 30};
31 31
32/** 32/**
33 * drm_class_suspend - DRM class suspend hook 33 * __drm_class_suspend - internal DRM class suspend routine
34 * @dev: Linux device to suspend 34 * @dev: Linux device to suspend
35 * @state: power state to enter 35 * @state: power state to enter
36 * 36 *
37 * Just figures out what the actual struct drm_device associated with 37 * Just figures out what the actual struct drm_device associated with
38 * @dev is and calls its suspend hook, if present. 38 * @dev is and calls its suspend hook, if present.
39 */ 39 */
40static int drm_class_suspend(struct device *dev, pm_message_t state) 40static int __drm_class_suspend(struct device *dev, pm_message_t state)
41{ 41{
42 if (dev->type == &drm_sysfs_device_minor) { 42 if (dev->type == &drm_sysfs_device_minor) {
43 struct drm_minor *drm_minor = to_drm_minor(dev); 43 struct drm_minor *drm_minor = to_drm_minor(dev);
@@ -52,6 +52,26 @@ static int drm_class_suspend(struct device *dev, pm_message_t state)
52} 52}
53 53
54/** 54/**
55 * drm_class_suspend - internal DRM class suspend hook. Simply calls
56 * __drm_class_suspend() with the correct pm state.
57 * @dev: Linux device to suspend
58 */
59static int drm_class_suspend(struct device *dev)
60{
61 return __drm_class_suspend(dev, PMSG_SUSPEND);
62}
63
64/**
65 * drm_class_freeze - internal DRM class freeze hook. Simply calls
66 * __drm_class_suspend() with the correct pm state.
67 * @dev: Linux device to freeze
68 */
69static int drm_class_freeze(struct device *dev)
70{
71 return __drm_class_suspend(dev, PMSG_FREEZE);
72}
73
74/**
55 * drm_class_resume - DRM class resume hook 75 * drm_class_resume - DRM class resume hook
56 * @dev: Linux device to resume 76 * @dev: Linux device to resume
57 * 77 *
@@ -72,6 +92,12 @@ static int drm_class_resume(struct device *dev)
72 return 0; 92 return 0;
73} 93}
74 94
95static const struct dev_pm_ops drm_class_dev_pm_ops = {
96 .suspend = drm_class_suspend,
97 .resume = drm_class_resume,
98 .freeze = drm_class_freeze,
99};
100
75static char *drm_devnode(struct device *dev, umode_t *mode) 101static char *drm_devnode(struct device *dev, umode_t *mode)
76{ 102{
77 return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev)); 103 return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
@@ -106,8 +132,7 @@ struct class *drm_sysfs_create(struct module *owner, char *name)
106 goto err_out; 132 goto err_out;
107 } 133 }
108 134
109 class->suspend = drm_class_suspend; 135 class->pm = &drm_class_dev_pm_ops;
110 class->resume = drm_class_resume;
111 136
112 err = class_create_file(class, &class_attr_version.attr); 137 err = class_create_file(class, &class_attr_version.attr);
113 if (err) 138 if (err)
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
index 03ea964aa604..27cc95f36381 100644
--- a/drivers/gpu/drm/drm_trace.h
+++ b/drivers/gpu/drm/drm_trace.h
@@ -21,7 +21,7 @@ TRACE_EVENT(drm_vblank_event,
21 __entry->crtc = crtc; 21 __entry->crtc = crtc;
22 __entry->seq = seq; 22 __entry->seq = seq;
23 ), 23 ),
24 TP_printk("crtc=%d, seq=%d", __entry->crtc, __entry->seq) 24 TP_printk("crtc=%d, seq=%u", __entry->crtc, __entry->seq)
25); 25);
26 26
27TRACE_EVENT(drm_vblank_event_queued, 27TRACE_EVENT(drm_vblank_event_queued,
@@ -37,7 +37,7 @@ TRACE_EVENT(drm_vblank_event_queued,
37 __entry->crtc = crtc; 37 __entry->crtc = crtc;
38 __entry->seq = seq; 38 __entry->seq = seq;
39 ), 39 ),
40 TP_printk("pid=%d, crtc=%d, seq=%d", __entry->pid, __entry->crtc, \ 40 TP_printk("pid=%d, crtc=%d, seq=%u", __entry->pid, __entry->crtc, \
41 __entry->seq) 41 __entry->seq)
42); 42);
43 43
@@ -54,7 +54,7 @@ TRACE_EVENT(drm_vblank_event_delivered,
54 __entry->crtc = crtc; 54 __entry->crtc = crtc;
55 __entry->seq = seq; 55 __entry->seq = seq;
56 ), 56 ),
57 TP_printk("pid=%d, crtc=%d, seq=%d", __entry->pid, __entry->crtc, \ 57 TP_printk("pid=%d, crtc=%d, seq=%u", __entry->pid, __entry->crtc, \
58 __entry->seq) 58 __entry->seq)
59); 59);
60 60
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 1d4f7c9fe661..feb20035b2c4 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -43,18 +43,19 @@
43static void drm_vm_open(struct vm_area_struct *vma); 43static void drm_vm_open(struct vm_area_struct *vma);
44static void drm_vm_close(struct vm_area_struct *vma); 44static void drm_vm_close(struct vm_area_struct *vma);
45 45
46static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) 46static pgprot_t drm_io_prot(struct drm_local_map *map,
47 struct vm_area_struct *vma)
47{ 48{
48 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); 49 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
49 50
50#if defined(__i386__) || defined(__x86_64__) 51#if defined(__i386__) || defined(__x86_64__)
51 if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { 52 if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
52 pgprot_val(tmp) |= _PAGE_PCD; 53 tmp = pgprot_noncached(tmp);
53 pgprot_val(tmp) &= ~_PAGE_PWT; 54 else
54 } 55 tmp = pgprot_writecombine(tmp);
55#elif defined(__powerpc__) 56#elif defined(__powerpc__)
56 pgprot_val(tmp) |= _PAGE_NO_CACHE; 57 pgprot_val(tmp) |= _PAGE_NO_CACHE;
57 if (map_type == _DRM_REGISTERS) 58 if (map->type == _DRM_REGISTERS)
58 pgprot_val(tmp) |= _PAGE_GUARDED; 59 pgprot_val(tmp) |= _PAGE_GUARDED;
59#elif defined(__ia64__) 60#elif defined(__ia64__)
60 if (efi_range_is_wc(vma->vm_start, vma->vm_end - 61 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
@@ -250,13 +251,8 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
250 switch (map->type) { 251 switch (map->type) {
251 case _DRM_REGISTERS: 252 case _DRM_REGISTERS:
252 case _DRM_FRAME_BUFFER: 253 case _DRM_FRAME_BUFFER:
253 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { 254 if (drm_core_has_MTRR(dev))
254 int retcode; 255 arch_phys_wc_del(map->mtrr);
255 retcode = mtrr_del(map->mtrr,
256 map->offset,
257 map->size);
258 DRM_DEBUG("mtrr_del = %d\n", retcode);
259 }
260 iounmap(map->handle); 256 iounmap(map->handle);
261 break; 257 break;
262 case _DRM_SHM: 258 case _DRM_SHM:
@@ -617,8 +613,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
617 case _DRM_FRAME_BUFFER: 613 case _DRM_FRAME_BUFFER:
618 case _DRM_REGISTERS: 614 case _DRM_REGISTERS:
619 offset = drm_core_get_reg_ofs(dev); 615 offset = drm_core_get_reg_ofs(dev);
620 vma->vm_flags |= VM_IO; /* not in core dump */ 616 vma->vm_page_prot = drm_io_prot(map, vma);
621 vma->vm_page_prot = drm_io_prot(map->type, vma);
622 if (io_remap_pfn_range(vma, vma->vm_start, 617 if (io_remap_pfn_range(vma, vma->vm_start,
623 (map->offset + offset) >> PAGE_SHIFT, 618 (map->offset + offset) >> PAGE_SHIFT,
624 vma->vm_end - vma->vm_start, 619 vma->vm_end - vma->vm_start,
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 4e9b5ba8edff..95c75edef01a 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -53,6 +53,8 @@ static struct of_device_id hdmiddc_match_types[] = {
53 { 53 {
54 .compatible = "samsung,exynos5-hdmiddc", 54 .compatible = "samsung,exynos5-hdmiddc",
55 }, { 55 }, {
56 .compatible = "samsung,exynos4210-hdmiddc",
57 }, {
56 /* end node */ 58 /* end node */
57 } 59 }
58}; 60};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 57affae9568b..22865baa03b1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -24,8 +24,6 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
24 enum dma_attr attr; 24 enum dma_attr attr;
25 unsigned int nr_pages; 25 unsigned int nr_pages;
26 26
27 DRM_DEBUG_KMS("%s\n", __FILE__);
28
29 if (buf->dma_addr) { 27 if (buf->dma_addr) {
30 DRM_DEBUG_KMS("already allocated.\n"); 28 DRM_DEBUG_KMS("already allocated.\n");
31 return 0; 29 return 0;
@@ -119,8 +117,6 @@ err_free_attrs:
119static void lowlevel_buffer_deallocate(struct drm_device *dev, 117static void lowlevel_buffer_deallocate(struct drm_device *dev,
120 unsigned int flags, struct exynos_drm_gem_buf *buf) 118 unsigned int flags, struct exynos_drm_gem_buf *buf)
121{ 119{
122 DRM_DEBUG_KMS("%s.\n", __FILE__);
123
124 if (!buf->dma_addr) { 120 if (!buf->dma_addr) {
125 DRM_DEBUG_KMS("dma_addr is invalid.\n"); 121 DRM_DEBUG_KMS("dma_addr is invalid.\n");
126 return; 122 return;
@@ -151,7 +147,6 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
151{ 147{
152 struct exynos_drm_gem_buf *buffer; 148 struct exynos_drm_gem_buf *buffer;
153 149
154 DRM_DEBUG_KMS("%s.\n", __FILE__);
155 DRM_DEBUG_KMS("desired size = 0x%x\n", size); 150 DRM_DEBUG_KMS("desired size = 0x%x\n", size);
156 151
157 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 152 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
@@ -167,8 +162,6 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
167void exynos_drm_fini_buf(struct drm_device *dev, 162void exynos_drm_fini_buf(struct drm_device *dev,
168 struct exynos_drm_gem_buf *buffer) 163 struct exynos_drm_gem_buf *buffer)
169{ 164{
170 DRM_DEBUG_KMS("%s.\n", __FILE__);
171
172 if (!buffer) { 165 if (!buffer) {
173 DRM_DEBUG_KMS("buffer is null.\n"); 166 DRM_DEBUG_KMS("buffer is null.\n");
174 return; 167 return;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 8bcc13ac9f73..02a8bc5226ca 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -34,7 +34,6 @@ convert_to_display_mode(struct drm_display_mode *mode,
34 struct exynos_drm_panel_info *panel) 34 struct exynos_drm_panel_info *panel)
35{ 35{
36 struct fb_videomode *timing = &panel->timing; 36 struct fb_videomode *timing = &panel->timing;
37 DRM_DEBUG_KMS("%s\n", __FILE__);
38 37
39 mode->clock = timing->pixclock / 1000; 38 mode->clock = timing->pixclock / 1000;
40 mode->vrefresh = timing->refresh; 39 mode->vrefresh = timing->refresh;
@@ -58,37 +57,6 @@ convert_to_display_mode(struct drm_display_mode *mode,
58 mode->flags |= DRM_MODE_FLAG_DBLSCAN; 57 mode->flags |= DRM_MODE_FLAG_DBLSCAN;
59} 58}
60 59
61/* convert drm_display_mode to exynos_video_timings */
62static inline void
63convert_to_video_timing(struct fb_videomode *timing,
64 struct drm_display_mode *mode)
65{
66 DRM_DEBUG_KMS("%s\n", __FILE__);
67
68 memset(timing, 0, sizeof(*timing));
69
70 timing->pixclock = mode->clock * 1000;
71 timing->refresh = drm_mode_vrefresh(mode);
72
73 timing->xres = mode->hdisplay;
74 timing->right_margin = mode->hsync_start - mode->hdisplay;
75 timing->hsync_len = mode->hsync_end - mode->hsync_start;
76 timing->left_margin = mode->htotal - mode->hsync_end;
77
78 timing->yres = mode->vdisplay;
79 timing->lower_margin = mode->vsync_start - mode->vdisplay;
80 timing->vsync_len = mode->vsync_end - mode->vsync_start;
81 timing->upper_margin = mode->vtotal - mode->vsync_end;
82
83 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
84 timing->vmode = FB_VMODE_INTERLACED;
85 else
86 timing->vmode = FB_VMODE_NONINTERLACED;
87
88 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
89 timing->vmode |= FB_VMODE_DOUBLE;
90}
91
92static int exynos_drm_connector_get_modes(struct drm_connector *connector) 60static int exynos_drm_connector_get_modes(struct drm_connector *connector)
93{ 61{
94 struct exynos_drm_connector *exynos_connector = 62 struct exynos_drm_connector *exynos_connector =
@@ -99,8 +67,6 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
99 unsigned int count = 0; 67 unsigned int count = 0;
100 int ret; 68 int ret;
101 69
102 DRM_DEBUG_KMS("%s\n", __FILE__);
103
104 if (!display_ops) { 70 if (!display_ops) {
105 DRM_DEBUG_KMS("display_ops is null.\n"); 71 DRM_DEBUG_KMS("display_ops is null.\n");
106 return 0; 72 return 0;
@@ -168,15 +134,12 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
168 to_exynos_connector(connector); 134 to_exynos_connector(connector);
169 struct exynos_drm_manager *manager = exynos_connector->manager; 135 struct exynos_drm_manager *manager = exynos_connector->manager;
170 struct exynos_drm_display_ops *display_ops = manager->display_ops; 136 struct exynos_drm_display_ops *display_ops = manager->display_ops;
171 struct fb_videomode timing;
172 int ret = MODE_BAD; 137 int ret = MODE_BAD;
173 138
174 DRM_DEBUG_KMS("%s\n", __FILE__); 139 DRM_DEBUG_KMS("%s\n", __FILE__);
175 140
176 convert_to_video_timing(&timing, mode); 141 if (display_ops && display_ops->check_mode)
177 142 if (!display_ops->check_mode(manager->dev, mode))
178 if (display_ops && display_ops->check_timing)
179 if (!display_ops->check_timing(manager->dev, (void *)&timing))
180 ret = MODE_OK; 143 ret = MODE_OK;
181 144
182 return ret; 145 return ret;
@@ -190,8 +153,6 @@ struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
190 struct drm_mode_object *obj; 153 struct drm_mode_object *obj;
191 struct drm_encoder *encoder; 154 struct drm_encoder *encoder;
192 155
193 DRM_DEBUG_KMS("%s\n", __FILE__);
194
195 obj = drm_mode_object_find(dev, exynos_connector->encoder_id, 156 obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
196 DRM_MODE_OBJECT_ENCODER); 157 DRM_MODE_OBJECT_ENCODER);
197 if (!obj) { 158 if (!obj) {
@@ -234,8 +195,6 @@ void exynos_drm_display_power(struct drm_connector *connector, int mode)
234static void exynos_drm_connector_dpms(struct drm_connector *connector, 195static void exynos_drm_connector_dpms(struct drm_connector *connector,
235 int mode) 196 int mode)
236{ 197{
237 DRM_DEBUG_KMS("%s\n", __FILE__);
238
239 /* 198 /*
240 * in case that drm_crtc_helper_set_mode() is called, 199 * in case that drm_crtc_helper_set_mode() is called,
241 * encoder/crtc->funcs->dpms() will be just returned 200 * encoder/crtc->funcs->dpms() will be just returned
@@ -282,8 +241,6 @@ exynos_drm_connector_detect(struct drm_connector *connector, bool force)
282 manager->display_ops; 241 manager->display_ops;
283 enum drm_connector_status status = connector_status_disconnected; 242 enum drm_connector_status status = connector_status_disconnected;
284 243
285 DRM_DEBUG_KMS("%s\n", __FILE__);
286
287 if (display_ops && display_ops->is_connected) { 244 if (display_ops && display_ops->is_connected) {
288 if (display_ops->is_connected(manager->dev)) 245 if (display_ops->is_connected(manager->dev))
289 status = connector_status_connected; 246 status = connector_status_connected;
@@ -299,8 +256,6 @@ static void exynos_drm_connector_destroy(struct drm_connector *connector)
299 struct exynos_drm_connector *exynos_connector = 256 struct exynos_drm_connector *exynos_connector =
300 to_exynos_connector(connector); 257 to_exynos_connector(connector);
301 258
302 DRM_DEBUG_KMS("%s\n", __FILE__);
303
304 drm_sysfs_connector_remove(connector); 259 drm_sysfs_connector_remove(connector);
305 drm_connector_cleanup(connector); 260 drm_connector_cleanup(connector);
306 kfree(exynos_connector); 261 kfree(exynos_connector);
@@ -322,8 +277,6 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
322 int type; 277 int type;
323 int err; 278 int err;
324 279
325 DRM_DEBUG_KMS("%s\n", __FILE__);
326
327 exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL); 280 exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
328 if (!exynos_connector) { 281 if (!exynos_connector) {
329 DRM_ERROR("failed to allocate connector\n"); 282 DRM_ERROR("failed to allocate connector\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 4667c9f67acd..1bef6dc77478 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -27,8 +27,6 @@ static int exynos_drm_create_enc_conn(struct drm_device *dev,
27 struct drm_connector *connector; 27 struct drm_connector *connector;
28 int ret; 28 int ret;
29 29
30 DRM_DEBUG_DRIVER("%s\n", __FILE__);
31
32 subdrv->manager->dev = subdrv->dev; 30 subdrv->manager->dev = subdrv->dev;
33 31
34 /* create and initialize a encoder for this sub driver. */ 32 /* create and initialize a encoder for this sub driver. */
@@ -102,8 +100,6 @@ static int exynos_drm_subdrv_probe(struct drm_device *dev,
102static void exynos_drm_subdrv_remove(struct drm_device *dev, 100static void exynos_drm_subdrv_remove(struct drm_device *dev,
103 struct exynos_drm_subdrv *subdrv) 101 struct exynos_drm_subdrv *subdrv)
104{ 102{
105 DRM_DEBUG_DRIVER("%s\n", __FILE__);
106
107 if (subdrv->remove) 103 if (subdrv->remove)
108 subdrv->remove(dev, subdrv->dev); 104 subdrv->remove(dev, subdrv->dev);
109} 105}
@@ -114,8 +110,6 @@ int exynos_drm_device_register(struct drm_device *dev)
114 unsigned int fine_cnt = 0; 110 unsigned int fine_cnt = 0;
115 int err; 111 int err;
116 112
117 DRM_DEBUG_DRIVER("%s\n", __FILE__);
118
119 if (!dev) 113 if (!dev)
120 return -EINVAL; 114 return -EINVAL;
121 115
@@ -158,8 +152,6 @@ int exynos_drm_device_unregister(struct drm_device *dev)
158{ 152{
159 struct exynos_drm_subdrv *subdrv; 153 struct exynos_drm_subdrv *subdrv;
160 154
161 DRM_DEBUG_DRIVER("%s\n", __FILE__);
162
163 if (!dev) { 155 if (!dev) {
164 WARN(1, "Unexpected drm device unregister!\n"); 156 WARN(1, "Unexpected drm device unregister!\n");
165 return -EINVAL; 157 return -EINVAL;
@@ -176,8 +168,6 @@ EXPORT_SYMBOL_GPL(exynos_drm_device_unregister);
176 168
177int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) 169int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
178{ 170{
179 DRM_DEBUG_DRIVER("%s\n", __FILE__);
180
181 if (!subdrv) 171 if (!subdrv)
182 return -EINVAL; 172 return -EINVAL;
183 173
@@ -189,8 +179,6 @@ EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
189 179
190int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) 180int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
191{ 181{
192 DRM_DEBUG_DRIVER("%s\n", __FILE__);
193
194 if (!subdrv) 182 if (!subdrv)
195 return -EINVAL; 183 return -EINVAL;
196 184
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index c200e4d71e3d..9a35d171a6d3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -76,8 +76,6 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
76 76
77static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) 77static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
78{ 78{
79 DRM_DEBUG_KMS("%s\n", __FILE__);
80
81 /* drm framework doesn't check NULL. */ 79 /* drm framework doesn't check NULL. */
82} 80}
83 81
@@ -85,8 +83,6 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
85{ 83{
86 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 84 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
87 85
88 DRM_DEBUG_KMS("%s\n", __FILE__);
89
90 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 86 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
91 exynos_plane_commit(exynos_crtc->plane); 87 exynos_plane_commit(exynos_crtc->plane);
92 exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON); 88 exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON);
@@ -97,8 +93,6 @@ exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
97 const struct drm_display_mode *mode, 93 const struct drm_display_mode *mode,
98 struct drm_display_mode *adjusted_mode) 94 struct drm_display_mode *adjusted_mode)
99{ 95{
100 DRM_DEBUG_KMS("%s\n", __FILE__);
101
102 /* drm framework doesn't check NULL */ 96 /* drm framework doesn't check NULL */
103 return true; 97 return true;
104} 98}
@@ -115,8 +109,6 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
115 int pipe = exynos_crtc->pipe; 109 int pipe = exynos_crtc->pipe;
116 int ret; 110 int ret;
117 111
118 DRM_DEBUG_KMS("%s\n", __FILE__);
119
120 /* 112 /*
121 * copy the mode data adjusted by mode_fixup() into crtc->mode 113 * copy the mode data adjusted by mode_fixup() into crtc->mode
122 * so that hardware can be seet to proper mode. 114 * so that hardware can be seet to proper mode.
@@ -139,7 +131,7 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
139 return 0; 131 return 0;
140} 132}
141 133
142static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 134static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
143 struct drm_framebuffer *old_fb) 135 struct drm_framebuffer *old_fb)
144{ 136{
145 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 137 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
@@ -148,8 +140,6 @@ static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
148 unsigned int crtc_h; 140 unsigned int crtc_h;
149 int ret; 141 int ret;
150 142
151 DRM_DEBUG_KMS("%s\n", __FILE__);
152
153 /* when framebuffer changing is requested, crtc's dpms should be on */ 143 /* when framebuffer changing is requested, crtc's dpms should be on */
154 if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) { 144 if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) {
155 DRM_ERROR("failed framebuffer changing request.\n"); 145 DRM_ERROR("failed framebuffer changing request.\n");
@@ -169,18 +159,16 @@ static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
169 return 0; 159 return 0;
170} 160}
171 161
172static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc) 162static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
163 struct drm_framebuffer *old_fb)
173{ 164{
174 DRM_DEBUG_KMS("%s\n", __FILE__); 165 return exynos_drm_crtc_mode_set_commit(crtc, x, y, old_fb);
175 /* drm framework doesn't check NULL */
176} 166}
177 167
178static void exynos_drm_crtc_disable(struct drm_crtc *crtc) 168static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
179{ 169{
180 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 170 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
181 171
182 DRM_DEBUG_KMS("%s\n", __FILE__);
183
184 exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_OFF); 172 exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_OFF);
185 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 173 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
186} 174}
@@ -192,7 +180,6 @@ static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
192 .mode_fixup = exynos_drm_crtc_mode_fixup, 180 .mode_fixup = exynos_drm_crtc_mode_fixup,
193 .mode_set = exynos_drm_crtc_mode_set, 181 .mode_set = exynos_drm_crtc_mode_set,
194 .mode_set_base = exynos_drm_crtc_mode_set_base, 182 .mode_set_base = exynos_drm_crtc_mode_set_base,
195 .load_lut = exynos_drm_crtc_load_lut,
196 .disable = exynos_drm_crtc_disable, 183 .disable = exynos_drm_crtc_disable,
197}; 184};
198 185
@@ -206,8 +193,6 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
206 struct drm_framebuffer *old_fb = crtc->fb; 193 struct drm_framebuffer *old_fb = crtc->fb;
207 int ret = -EINVAL; 194 int ret = -EINVAL;
208 195
209 DRM_DEBUG_KMS("%s\n", __FILE__);
210
211 /* when the page flip is requested, crtc's dpms should be on */ 196 /* when the page flip is requested, crtc's dpms should be on */
212 if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) { 197 if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) {
213 DRM_ERROR("failed page flip request.\n"); 198 DRM_ERROR("failed page flip request.\n");
@@ -237,7 +222,7 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
237 spin_unlock_irq(&dev->event_lock); 222 spin_unlock_irq(&dev->event_lock);
238 223
239 crtc->fb = fb; 224 crtc->fb = fb;
240 ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, 225 ret = exynos_drm_crtc_mode_set_commit(crtc, crtc->x, crtc->y,
241 NULL); 226 NULL);
242 if (ret) { 227 if (ret) {
243 crtc->fb = old_fb; 228 crtc->fb = old_fb;
@@ -260,8 +245,6 @@ static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
260 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 245 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
261 struct exynos_drm_private *private = crtc->dev->dev_private; 246 struct exynos_drm_private *private = crtc->dev->dev_private;
262 247
263 DRM_DEBUG_KMS("%s\n", __FILE__);
264
265 private->crtc[exynos_crtc->pipe] = NULL; 248 private->crtc[exynos_crtc->pipe] = NULL;
266 249
267 drm_crtc_cleanup(crtc); 250 drm_crtc_cleanup(crtc);
@@ -276,8 +259,6 @@ static int exynos_drm_crtc_set_property(struct drm_crtc *crtc,
276 struct exynos_drm_private *dev_priv = dev->dev_private; 259 struct exynos_drm_private *dev_priv = dev->dev_private;
277 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 260 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
278 261
279 DRM_DEBUG_KMS("%s\n", __func__);
280
281 if (property == dev_priv->crtc_mode_property) { 262 if (property == dev_priv->crtc_mode_property) {
282 enum exynos_crtc_mode mode = val; 263 enum exynos_crtc_mode mode = val;
283 264
@@ -322,8 +303,6 @@ static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
322 struct exynos_drm_private *dev_priv = dev->dev_private; 303 struct exynos_drm_private *dev_priv = dev->dev_private;
323 struct drm_property *prop; 304 struct drm_property *prop;
324 305
325 DRM_DEBUG_KMS("%s\n", __func__);
326
327 prop = dev_priv->crtc_mode_property; 306 prop = dev_priv->crtc_mode_property;
328 if (!prop) { 307 if (!prop) {
329 prop = drm_property_create_enum(dev, 0, "mode", mode_names, 308 prop = drm_property_create_enum(dev, 0, "mode", mode_names,
@@ -343,8 +322,6 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
343 struct exynos_drm_private *private = dev->dev_private; 322 struct exynos_drm_private *private = dev->dev_private;
344 struct drm_crtc *crtc; 323 struct drm_crtc *crtc;
345 324
346 DRM_DEBUG_KMS("%s\n", __FILE__);
347
348 exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL); 325 exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
349 if (!exynos_crtc) { 326 if (!exynos_crtc) {
350 DRM_ERROR("failed to allocate exynos crtc\n"); 327 DRM_ERROR("failed to allocate exynos crtc\n");
@@ -379,8 +356,6 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
379 struct exynos_drm_crtc *exynos_crtc = 356 struct exynos_drm_crtc *exynos_crtc =
380 to_exynos_crtc(private->crtc[crtc]); 357 to_exynos_crtc(private->crtc[crtc]);
381 358
382 DRM_DEBUG_KMS("%s\n", __FILE__);
383
384 if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) 359 if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
385 return -EPERM; 360 return -EPERM;
386 361
@@ -396,8 +371,6 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
396 struct exynos_drm_crtc *exynos_crtc = 371 struct exynos_drm_crtc *exynos_crtc =
397 to_exynos_crtc(private->crtc[crtc]); 372 to_exynos_crtc(private->crtc[crtc]);
398 373
399 DRM_DEBUG_KMS("%s\n", __FILE__);
400
401 if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) 374 if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
402 return; 375 return;
403 376
@@ -413,8 +386,6 @@ void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc)
413 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc); 386 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
414 unsigned long flags; 387 unsigned long flags;
415 388
416 DRM_DEBUG_KMS("%s\n", __FILE__);
417
418 spin_lock_irqsave(&dev->event_lock, flags); 389 spin_lock_irqsave(&dev->event_lock, flags);
419 390
420 list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list, 391 list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index ff7f2a886a34..a0f997e0cbdf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -71,8 +71,6 @@ static struct sg_table *
71 unsigned int i; 71 unsigned int i;
72 int nents, ret; 72 int nents, ret;
73 73
74 DRM_DEBUG_PRIME("%s\n", __FILE__);
75
76 /* just return current sgt if already requested. */ 74 /* just return current sgt if already requested. */
77 if (exynos_attach->dir == dir && exynos_attach->is_mapped) 75 if (exynos_attach->dir == dir && exynos_attach->is_mapped)
78 return &exynos_attach->sgt; 76 return &exynos_attach->sgt;
@@ -133,8 +131,6 @@ static void exynos_dmabuf_release(struct dma_buf *dmabuf)
133{ 131{
134 struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv; 132 struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
135 133
136 DRM_DEBUG_PRIME("%s\n", __FILE__);
137
138 /* 134 /*
139 * exynos_dmabuf_release() call means that file object's 135 * exynos_dmabuf_release() call means that file object's
140 * f_count is 0 and it calls drm_gem_object_handle_unreference() 136 * f_count is 0 and it calls drm_gem_object_handle_unreference()
@@ -219,8 +215,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
219 struct exynos_drm_gem_buf *buffer; 215 struct exynos_drm_gem_buf *buffer;
220 int ret; 216 int ret;
221 217
222 DRM_DEBUG_PRIME("%s\n", __FILE__);
223
224 /* is this one of own objects? */ 218 /* is this one of own objects? */
225 if (dma_buf->ops == &exynos_dmabuf_ops) { 219 if (dma_buf->ops == &exynos_dmabuf_ops) {
226 struct drm_gem_object *obj; 220 struct drm_gem_object *obj;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ba6d995e4375..276237348d1e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -46,8 +46,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
46 int ret; 46 int ret;
47 int nr; 47 int nr;
48 48
49 DRM_DEBUG_DRIVER("%s\n", __FILE__);
50
51 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL); 49 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
52 if (!private) { 50 if (!private) {
53 DRM_ERROR("failed to allocate private\n"); 51 DRM_ERROR("failed to allocate private\n");
@@ -140,8 +138,6 @@ err_crtc:
140 138
141static int exynos_drm_unload(struct drm_device *dev) 139static int exynos_drm_unload(struct drm_device *dev)
142{ 140{
143 DRM_DEBUG_DRIVER("%s\n", __FILE__);
144
145 exynos_drm_fbdev_fini(dev); 141 exynos_drm_fbdev_fini(dev);
146 exynos_drm_device_unregister(dev); 142 exynos_drm_device_unregister(dev);
147 drm_vblank_cleanup(dev); 143 drm_vblank_cleanup(dev);
@@ -160,8 +156,6 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
160{ 156{
161 struct drm_exynos_file_private *file_priv; 157 struct drm_exynos_file_private *file_priv;
162 158
163 DRM_DEBUG_DRIVER("%s\n", __FILE__);
164
165 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 159 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
166 if (!file_priv) 160 if (!file_priv)
167 return -ENOMEM; 161 return -ENOMEM;
@@ -178,8 +172,6 @@ static void exynos_drm_preclose(struct drm_device *dev,
178 struct drm_pending_vblank_event *e, *t; 172 struct drm_pending_vblank_event *e, *t;
179 unsigned long flags; 173 unsigned long flags;
180 174
181 DRM_DEBUG_DRIVER("%s\n", __FILE__);
182
183 /* release events of current file */ 175 /* release events of current file */
184 spin_lock_irqsave(&dev->event_lock, flags); 176 spin_lock_irqsave(&dev->event_lock, flags);
185 list_for_each_entry_safe(e, t, &private->pageflip_event_list, 177 list_for_each_entry_safe(e, t, &private->pageflip_event_list,
@@ -196,8 +188,6 @@ static void exynos_drm_preclose(struct drm_device *dev,
196 188
197static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) 189static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
198{ 190{
199 DRM_DEBUG_DRIVER("%s\n", __FILE__);
200
201 if (!file->driver_priv) 191 if (!file->driver_priv)
202 return; 192 return;
203 193
@@ -207,8 +197,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
207 197
208static void exynos_drm_lastclose(struct drm_device *dev) 198static void exynos_drm_lastclose(struct drm_device *dev)
209{ 199{
210 DRM_DEBUG_DRIVER("%s\n", __FILE__);
211
212 exynos_drm_fbdev_restore_mode(dev); 200 exynos_drm_fbdev_restore_mode(dev);
213} 201}
214 202
@@ -292,8 +280,6 @@ static struct drm_driver exynos_drm_driver = {
292 280
293static int exynos_drm_platform_probe(struct platform_device *pdev) 281static int exynos_drm_platform_probe(struct platform_device *pdev)
294{ 282{
295 DRM_DEBUG_DRIVER("%s\n", __FILE__);
296
297 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 283 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
298 exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls); 284 exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
299 285
@@ -302,8 +288,6 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
302 288
303static int exynos_drm_platform_remove(struct platform_device *pdev) 289static int exynos_drm_platform_remove(struct platform_device *pdev)
304{ 290{
305 DRM_DEBUG_DRIVER("%s\n", __FILE__);
306
307 drm_platform_exit(&exynos_drm_driver, pdev); 291 drm_platform_exit(&exynos_drm_driver, pdev);
308 292
309 return 0; 293 return 0;
@@ -322,8 +306,6 @@ static int __init exynos_drm_init(void)
322{ 306{
323 int ret; 307 int ret;
324 308
325 DRM_DEBUG_DRIVER("%s\n", __FILE__);
326
327#ifdef CONFIG_DRM_EXYNOS_FIMD 309#ifdef CONFIG_DRM_EXYNOS_FIMD
328 ret = platform_driver_register(&fimd_driver); 310 ret = platform_driver_register(&fimd_driver);
329 if (ret < 0) 311 if (ret < 0)
@@ -455,8 +437,6 @@ out_fimd:
455 437
456static void __exit exynos_drm_exit(void) 438static void __exit exynos_drm_exit(void)
457{ 439{
458 DRM_DEBUG_DRIVER("%s\n", __FILE__);
459
460 platform_device_unregister(exynos_drm_pdev); 440 platform_device_unregister(exynos_drm_pdev);
461 441
462 platform_driver_unregister(&exynos_drm_platform_driver); 442 platform_driver_unregister(&exynos_drm_platform_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 680a7c1b9dea..eaa19668bf00 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -142,7 +142,7 @@ struct exynos_drm_overlay {
142 * @is_connected: check for that display is connected or not. 142 * @is_connected: check for that display is connected or not.
143 * @get_edid: get edid modes from display driver. 143 * @get_edid: get edid modes from display driver.
144 * @get_panel: get panel object from display driver. 144 * @get_panel: get panel object from display driver.
145 * @check_timing: check if timing is valid or not. 145 * @check_mode: check if mode is valid or not.
146 * @power_on: display device on or off. 146 * @power_on: display device on or off.
147 */ 147 */
148struct exynos_drm_display_ops { 148struct exynos_drm_display_ops {
@@ -151,7 +151,7 @@ struct exynos_drm_display_ops {
151 struct edid *(*get_edid)(struct device *dev, 151 struct edid *(*get_edid)(struct device *dev,
152 struct drm_connector *connector); 152 struct drm_connector *connector);
153 void *(*get_panel)(struct device *dev); 153 void *(*get_panel)(struct device *dev);
154 int (*check_timing)(struct device *dev, void *timing); 154 int (*check_mode)(struct device *dev, struct drm_display_mode *mode);
155 int (*power_on)(struct device *dev, int mode); 155 int (*power_on)(struct device *dev, int mode);
156}; 156};
157 157
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index c63721f64aec..a99a033793bc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -61,7 +61,7 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
61 struct exynos_drm_manager_ops *manager_ops = manager->ops; 61 struct exynos_drm_manager_ops *manager_ops = manager->ops;
62 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); 62 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
63 63
64 DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode); 64 DRM_DEBUG_KMS("encoder dpms: %d\n", mode);
65 65
66 if (exynos_encoder->dpms == mode) { 66 if (exynos_encoder->dpms == mode) {
67 DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n"); 67 DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
@@ -104,8 +104,6 @@ exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
104 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 104 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
105 struct exynos_drm_manager_ops *manager_ops = manager->ops; 105 struct exynos_drm_manager_ops *manager_ops = manager->ops;
106 106
107 DRM_DEBUG_KMS("%s\n", __FILE__);
108
109 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 107 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
110 if (connector->encoder == encoder) 108 if (connector->encoder == encoder)
111 if (manager_ops && manager_ops->mode_fixup) 109 if (manager_ops && manager_ops->mode_fixup)
@@ -155,8 +153,6 @@ static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
155 struct exynos_drm_manager *manager; 153 struct exynos_drm_manager *manager;
156 struct exynos_drm_manager_ops *manager_ops; 154 struct exynos_drm_manager_ops *manager_ops;
157 155
158 DRM_DEBUG_KMS("%s\n", __FILE__);
159
160 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 156 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
161 if (connector->encoder == encoder) { 157 if (connector->encoder == encoder) {
162 struct exynos_drm_encoder *exynos_encoder; 158 struct exynos_drm_encoder *exynos_encoder;
@@ -189,8 +185,6 @@ static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
189 185
190static void exynos_drm_encoder_prepare(struct drm_encoder *encoder) 186static void exynos_drm_encoder_prepare(struct drm_encoder *encoder)
191{ 187{
192 DRM_DEBUG_KMS("%s\n", __FILE__);
193
194 /* drm framework doesn't check NULL. */ 188 /* drm framework doesn't check NULL. */
195} 189}
196 190
@@ -200,8 +194,6 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
200 struct exynos_drm_manager *manager = exynos_encoder->manager; 194 struct exynos_drm_manager *manager = exynos_encoder->manager;
201 struct exynos_drm_manager_ops *manager_ops = manager->ops; 195 struct exynos_drm_manager_ops *manager_ops = manager->ops;
202 196
203 DRM_DEBUG_KMS("%s\n", __FILE__);
204
205 if (manager_ops && manager_ops->commit) 197 if (manager_ops && manager_ops->commit)
206 manager_ops->commit(manager->dev); 198 manager_ops->commit(manager->dev);
207 199
@@ -274,8 +266,6 @@ static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
274 struct exynos_drm_encoder *exynos_encoder = 266 struct exynos_drm_encoder *exynos_encoder =
275 to_exynos_encoder(encoder); 267 to_exynos_encoder(encoder);
276 268
277 DRM_DEBUG_KMS("%s\n", __FILE__);
278
279 exynos_encoder->manager->pipe = -1; 269 exynos_encoder->manager->pipe = -1;
280 270
281 drm_encoder_cleanup(encoder); 271 drm_encoder_cleanup(encoder);
@@ -315,8 +305,6 @@ void exynos_drm_encoder_setup(struct drm_device *dev)
315{ 305{
316 struct drm_encoder *encoder; 306 struct drm_encoder *encoder;
317 307
318 DRM_DEBUG_KMS("%s\n", __FILE__);
319
320 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 308 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
321 encoder->possible_clones = exynos_drm_encoder_clones(encoder); 309 encoder->possible_clones = exynos_drm_encoder_clones(encoder);
322} 310}
@@ -329,8 +317,6 @@ exynos_drm_encoder_create(struct drm_device *dev,
329 struct drm_encoder *encoder; 317 struct drm_encoder *encoder;
330 struct exynos_drm_encoder *exynos_encoder; 318 struct exynos_drm_encoder *exynos_encoder;
331 319
332 DRM_DEBUG_KMS("%s\n", __FILE__);
333
334 if (!manager || !possible_crtcs) 320 if (!manager || !possible_crtcs)
335 return NULL; 321 return NULL;
336 322
@@ -427,8 +413,6 @@ void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
427 struct exynos_drm_manager_ops *manager_ops = manager->ops; 413 struct exynos_drm_manager_ops *manager_ops = manager->ops;
428 int mode = *(int *)data; 414 int mode = *(int *)data;
429 415
430 DRM_DEBUG_KMS("%s\n", __FILE__);
431
432 if (manager_ops && manager_ops->dpms) 416 if (manager_ops && manager_ops->dpms)
433 manager_ops->dpms(manager->dev, mode); 417 manager_ops->dpms(manager->dev, mode);
434 418
@@ -449,8 +433,6 @@ void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data)
449 to_exynos_encoder(encoder)->manager; 433 to_exynos_encoder(encoder)->manager;
450 int pipe = *(int *)data; 434 int pipe = *(int *)data;
451 435
452 DRM_DEBUG_KMS("%s\n", __FILE__);
453
454 /* 436 /*
455 * when crtc is detached from encoder, this pipe is used 437 * when crtc is detached from encoder, this pipe is used
456 * to select manager operation 438 * to select manager operation
@@ -465,8 +447,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data)
465 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 447 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
466 struct exynos_drm_overlay *overlay = data; 448 struct exynos_drm_overlay *overlay = data;
467 449
468 DRM_DEBUG_KMS("%s\n", __FILE__);
469
470 if (overlay_ops && overlay_ops->mode_set) 450 if (overlay_ops && overlay_ops->mode_set)
471 overlay_ops->mode_set(manager->dev, overlay); 451 overlay_ops->mode_set(manager->dev, overlay);
472} 452}
@@ -478,8 +458,6 @@ void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data)
478 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 458 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
479 int zpos = DEFAULT_ZPOS; 459 int zpos = DEFAULT_ZPOS;
480 460
481 DRM_DEBUG_KMS("%s\n", __FILE__);
482
483 if (data) 461 if (data)
484 zpos = *(int *)data; 462 zpos = *(int *)data;
485 463
@@ -494,8 +472,6 @@ void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data)
494 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 472 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
495 int zpos = DEFAULT_ZPOS; 473 int zpos = DEFAULT_ZPOS;
496 474
497 DRM_DEBUG_KMS("%s\n", __FILE__);
498
499 if (data) 475 if (data)
500 zpos = *(int *)data; 476 zpos = *(int *)data;
501 477
@@ -510,8 +486,6 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
510 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 486 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
511 int zpos = DEFAULT_ZPOS; 487 int zpos = DEFAULT_ZPOS;
512 488
513 DRM_DEBUG_KMS("%s\n", __FILE__);
514
515 if (data) 489 if (data)
516 zpos = *(int *)data; 490 zpos = *(int *)data;
517 491
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 0e04f4ea441f..c2d149f0408a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -70,8 +70,6 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
70 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 70 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
71 unsigned int i; 71 unsigned int i;
72 72
73 DRM_DEBUG_KMS("%s\n", __FILE__);
74
75 /* make sure that overlay data are updated before relesing fb. */ 73 /* make sure that overlay data are updated before relesing fb. */
76 exynos_drm_encoder_complete_scanout(fb); 74 exynos_drm_encoder_complete_scanout(fb);
77 75
@@ -97,8 +95,6 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
97{ 95{
98 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 96 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
99 97
100 DRM_DEBUG_KMS("%s\n", __FILE__);
101
102 /* This fb should have only one gem object. */ 98 /* This fb should have only one gem object. */
103 if (WARN_ON(exynos_fb->buf_cnt != 1)) 99 if (WARN_ON(exynos_fb->buf_cnt != 1))
104 return -EINVAL; 100 return -EINVAL;
@@ -112,8 +108,6 @@ static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
112 unsigned color, struct drm_clip_rect *clips, 108 unsigned color, struct drm_clip_rect *clips,
113 unsigned num_clips) 109 unsigned num_clips)
114{ 110{
115 DRM_DEBUG_KMS("%s\n", __FILE__);
116
117 /* TODO */ 111 /* TODO */
118 112
119 return 0; 113 return 0;
@@ -225,8 +219,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
225 struct exynos_drm_fb *exynos_fb; 219 struct exynos_drm_fb *exynos_fb;
226 int i, ret; 220 int i, ret;
227 221
228 DRM_DEBUG_KMS("%s\n", __FILE__);
229
230 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 222 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
231 if (!exynos_fb) { 223 if (!exynos_fb) {
232 DRM_ERROR("failed to allocate exynos drm framebuffer\n"); 224 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
@@ -293,8 +285,6 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
293 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 285 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
294 struct exynos_drm_gem_buf *buffer; 286 struct exynos_drm_gem_buf *buffer;
295 287
296 DRM_DEBUG_KMS("%s\n", __FILE__);
297
298 if (index >= MAX_FB_BUFFER) 288 if (index >= MAX_FB_BUFFER)
299 return NULL; 289 return NULL;
300 290
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 8f007aaeffc3..8e60bd61137f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -43,8 +43,6 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
43 unsigned long vm_size; 43 unsigned long vm_size;
44 int ret; 44 int ret;
45 45
46 DRM_DEBUG_KMS("%s\n", __func__);
47
48 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 46 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
49 47
50 vm_size = vma->vm_end - vma->vm_start; 48 vm_size = vma->vm_end - vma->vm_start;
@@ -84,8 +82,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
84 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3); 82 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
85 unsigned long offset; 83 unsigned long offset;
86 84
87 DRM_DEBUG_KMS("%s\n", __FILE__);
88
89 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); 85 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
90 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height); 86 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
91 87
@@ -148,8 +144,6 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
148 unsigned long size; 144 unsigned long size;
149 int ret; 145 int ret;
150 146
151 DRM_DEBUG_KMS("%s\n", __FILE__);
152
153 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n", 147 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
154 sizes->surface_width, sizes->surface_height, 148 sizes->surface_width, sizes->surface_height,
155 sizes->surface_bpp); 149 sizes->surface_bpp);
@@ -238,8 +232,6 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
238 unsigned int num_crtc; 232 unsigned int num_crtc;
239 int ret; 233 int ret;
240 234
241 DRM_DEBUG_KMS("%s\n", __FILE__);
242
243 if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) 235 if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
244 return 0; 236 return 0;
245 237
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 4a1616a18ab7..61b094f689a7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -175,8 +175,6 @@ static void fimc_sw_reset(struct fimc_context *ctx)
175{ 175{
176 u32 cfg; 176 u32 cfg;
177 177
178 DRM_DEBUG_KMS("%s\n", __func__);
179
180 /* stop dma operation */ 178 /* stop dma operation */
181 cfg = fimc_read(EXYNOS_CISTATUS); 179 cfg = fimc_read(EXYNOS_CISTATUS);
182 if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg)) { 180 if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg)) {
@@ -210,8 +208,6 @@ static void fimc_sw_reset(struct fimc_context *ctx)
210 208
211static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx) 209static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
212{ 210{
213 DRM_DEBUG_KMS("%s\n", __func__);
214
215 return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK, 211 return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK,
216 SYSREG_FIMD0WB_DEST_MASK, 212 SYSREG_FIMD0WB_DEST_MASK,
217 ctx->id << SYSREG_FIMD0WB_DEST_SHIFT); 213 ctx->id << SYSREG_FIMD0WB_DEST_SHIFT);
@@ -221,7 +217,7 @@ static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
221{ 217{
222 u32 cfg; 218 u32 cfg;
223 219
224 DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb); 220 DRM_DEBUG_KMS("wb[%d]\n", wb);
225 221
226 cfg = fimc_read(EXYNOS_CIGCTRL); 222 cfg = fimc_read(EXYNOS_CIGCTRL);
227 cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK | 223 cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
@@ -257,10 +253,10 @@ static void fimc_set_polarity(struct fimc_context *ctx,
257{ 253{
258 u32 cfg; 254 u32 cfg;
259 255
260 DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n", 256 DRM_DEBUG_KMS("inv_pclk[%d]inv_vsync[%d]\n",
261 __func__, pol->inv_pclk, pol->inv_vsync); 257 pol->inv_pclk, pol->inv_vsync);
262 DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n", 258 DRM_DEBUG_KMS("inv_href[%d]inv_hsync[%d]\n",
263 __func__, pol->inv_href, pol->inv_hsync); 259 pol->inv_href, pol->inv_hsync);
264 260
265 cfg = fimc_read(EXYNOS_CIGCTRL); 261 cfg = fimc_read(EXYNOS_CIGCTRL);
266 cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC | 262 cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
@@ -282,7 +278,7 @@ static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
282{ 278{
283 u32 cfg; 279 u32 cfg;
284 280
285 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable); 281 DRM_DEBUG_KMS("enable[%d]\n", enable);
286 282
287 cfg = fimc_read(EXYNOS_CIGCTRL); 283 cfg = fimc_read(EXYNOS_CIGCTRL);
288 if (enable) 284 if (enable)
@@ -298,7 +294,7 @@ static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
298{ 294{
299 u32 cfg; 295 u32 cfg;
300 296
301 DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__, 297 DRM_DEBUG_KMS("enable[%d]overflow[%d]level[%d]\n",
302 enable, overflow, level); 298 enable, overflow, level);
303 299
304 cfg = fimc_read(EXYNOS_CIGCTRL); 300 cfg = fimc_read(EXYNOS_CIGCTRL);
@@ -319,8 +315,6 @@ static void fimc_clear_irq(struct fimc_context *ctx)
319{ 315{
320 u32 cfg; 316 u32 cfg;
321 317
322 DRM_DEBUG_KMS("%s\n", __func__);
323
324 cfg = fimc_read(EXYNOS_CIGCTRL); 318 cfg = fimc_read(EXYNOS_CIGCTRL);
325 cfg |= EXYNOS_CIGCTRL_IRQ_CLR; 319 cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
326 fimc_write(cfg, EXYNOS_CIGCTRL); 320 fimc_write(cfg, EXYNOS_CIGCTRL);
@@ -335,7 +329,7 @@ static bool fimc_check_ovf(struct fimc_context *ctx)
335 flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB | 329 flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
336 EXYNOS_CISTATUS_OVFICR; 330 EXYNOS_CISTATUS_OVFICR;
337 331
338 DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag); 332 DRM_DEBUG_KMS("flag[0x%x]\n", flag);
339 333
340 if (status & flag) { 334 if (status & flag) {
341 cfg = fimc_read(EXYNOS_CIWDOFST); 335 cfg = fimc_read(EXYNOS_CIWDOFST);
@@ -364,7 +358,7 @@ static bool fimc_check_frame_end(struct fimc_context *ctx)
364 358
365 cfg = fimc_read(EXYNOS_CISTATUS); 359 cfg = fimc_read(EXYNOS_CISTATUS);
366 360
367 DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg); 361 DRM_DEBUG_KMS("cfg[0x%x]\n", cfg);
368 362
369 if (!(cfg & EXYNOS_CISTATUS_FRAMEEND)) 363 if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
370 return false; 364 return false;
@@ -380,15 +374,13 @@ static int fimc_get_buf_id(struct fimc_context *ctx)
380 u32 cfg; 374 u32 cfg;
381 int frame_cnt, buf_id; 375 int frame_cnt, buf_id;
382 376
383 DRM_DEBUG_KMS("%s\n", __func__);
384
385 cfg = fimc_read(EXYNOS_CISTATUS2); 377 cfg = fimc_read(EXYNOS_CISTATUS2);
386 frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg); 378 frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
387 379
388 if (frame_cnt == 0) 380 if (frame_cnt == 0)
389 frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg); 381 frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
390 382
391 DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__, 383 DRM_DEBUG_KMS("present[%d]before[%d]\n",
392 EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg), 384 EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
393 EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg)); 385 EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
394 386
@@ -398,7 +390,7 @@ static int fimc_get_buf_id(struct fimc_context *ctx)
398 } 390 }
399 391
400 buf_id = frame_cnt - 1; 392 buf_id = frame_cnt - 1;
401 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id); 393 DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
402 394
403 return buf_id; 395 return buf_id;
404} 396}
@@ -407,7 +399,7 @@ static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
407{ 399{
408 u32 cfg; 400 u32 cfg;
409 401
410 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable); 402 DRM_DEBUG_KMS("enable[%d]\n", enable);
411 403
412 cfg = fimc_read(EXYNOS_CIOCTRL); 404 cfg = fimc_read(EXYNOS_CIOCTRL);
413 if (enable) 405 if (enable)
@@ -424,7 +416,7 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
424 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 416 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
425 u32 cfg; 417 u32 cfg;
426 418
427 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt); 419 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
428 420
429 /* RGB */ 421 /* RGB */
430 cfg = fimc_read(EXYNOS_CISCCTRL); 422 cfg = fimc_read(EXYNOS_CISCCTRL);
@@ -497,7 +489,7 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
497 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 489 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
498 u32 cfg; 490 u32 cfg;
499 491
500 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt); 492 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
501 493
502 cfg = fimc_read(EXYNOS_MSCTRL); 494 cfg = fimc_read(EXYNOS_MSCTRL);
503 cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB; 495 cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
@@ -557,8 +549,7 @@ static int fimc_src_set_transf(struct device *dev,
557 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 549 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
558 u32 cfg1, cfg2; 550 u32 cfg1, cfg2;
559 551
560 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__, 552 DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
561 degree, flip);
562 553
563 cfg1 = fimc_read(EXYNOS_MSCTRL); 554 cfg1 = fimc_read(EXYNOS_MSCTRL);
564 cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR | 555 cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
@@ -621,10 +612,9 @@ static int fimc_set_window(struct fimc_context *ctx,
621 v1 = pos->y; 612 v1 = pos->y;
622 v2 = sz->vsize - pos->h - pos->y; 613 v2 = sz->vsize - pos->h - pos->y;
623 614
624 DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n", 615 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
625 __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize); 616 pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
626 DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__, 617 DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
627 h1, h2, v1, v2);
628 618
629 /* 619 /*
630 * set window offset 1, 2 size 620 * set window offset 1, 2 size
@@ -653,8 +643,8 @@ static int fimc_src_set_size(struct device *dev, int swap,
653 struct drm_exynos_sz img_sz = *sz; 643 struct drm_exynos_sz img_sz = *sz;
654 u32 cfg; 644 u32 cfg;
655 645
656 DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n", 646 DRM_DEBUG_KMS("swap[%d]hsize[%d]vsize[%d]\n",
657 __func__, swap, sz->hsize, sz->vsize); 647 swap, sz->hsize, sz->vsize);
658 648
659 /* original size */ 649 /* original size */
660 cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) | 650 cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
@@ -662,8 +652,7 @@ static int fimc_src_set_size(struct device *dev, int swap,
662 652
663 fimc_write(cfg, EXYNOS_ORGISIZE); 653 fimc_write(cfg, EXYNOS_ORGISIZE);
664 654
665 DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__, 655 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h);
666 pos->x, pos->y, pos->w, pos->h);
667 656
668 if (swap) { 657 if (swap) {
669 img_pos.w = pos->h; 658 img_pos.w = pos->h;
@@ -720,7 +709,7 @@ static int fimc_src_set_addr(struct device *dev,
720 709
721 property = &c_node->property; 710 property = &c_node->property;
722 711
723 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__, 712 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
724 property->prop_id, buf_id, buf_type); 713 property->prop_id, buf_id, buf_type);
725 714
726 if (buf_id > FIMC_MAX_SRC) { 715 if (buf_id > FIMC_MAX_SRC) {
@@ -772,7 +761,7 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
772 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 761 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
773 u32 cfg; 762 u32 cfg;
774 763
775 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt); 764 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
776 765
777 /* RGB */ 766 /* RGB */
778 cfg = fimc_read(EXYNOS_CISCCTRL); 767 cfg = fimc_read(EXYNOS_CISCCTRL);
@@ -851,7 +840,7 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
851 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 840 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
852 u32 cfg; 841 u32 cfg;
853 842
854 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt); 843 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
855 844
856 cfg = fimc_read(EXYNOS_CIEXTEN); 845 cfg = fimc_read(EXYNOS_CIEXTEN);
857 846
@@ -919,8 +908,7 @@ static int fimc_dst_set_transf(struct device *dev,
919 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 908 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
920 u32 cfg; 909 u32 cfg;
921 910
922 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__, 911 DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
923 degree, flip);
924 912
925 cfg = fimc_read(EXYNOS_CITRGFMT); 913 cfg = fimc_read(EXYNOS_CITRGFMT);
926 cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK; 914 cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
@@ -970,7 +958,7 @@ static int fimc_dst_set_transf(struct device *dev,
970 958
971static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift) 959static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
972{ 960{
973 DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst); 961 DRM_DEBUG_KMS("src[%d]dst[%d]\n", src, dst);
974 962
975 if (src >= dst * 64) { 963 if (src >= dst * 64) {
976 DRM_ERROR("failed to make ratio and shift.\n"); 964 DRM_ERROR("failed to make ratio and shift.\n");
@@ -1039,20 +1027,20 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
1039 1027
1040 pre_dst_width = src_w / pre_hratio; 1028 pre_dst_width = src_w / pre_hratio;
1041 pre_dst_height = src_h / pre_vratio; 1029 pre_dst_height = src_h / pre_vratio;
1042 DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__, 1030 DRM_DEBUG_KMS("pre_dst_width[%d]pre_dst_height[%d]\n",
1043 pre_dst_width, pre_dst_height); 1031 pre_dst_width, pre_dst_height);
1044 DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n", 1032 DRM_DEBUG_KMS("pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
1045 __func__, pre_hratio, hfactor, pre_vratio, vfactor); 1033 pre_hratio, hfactor, pre_vratio, vfactor);
1046 1034
1047 sc->hratio = (src_w << 14) / (dst_w << hfactor); 1035 sc->hratio = (src_w << 14) / (dst_w << hfactor);
1048 sc->vratio = (src_h << 14) / (dst_h << vfactor); 1036 sc->vratio = (src_h << 14) / (dst_h << vfactor);
1049 sc->up_h = (dst_w >= src_w) ? true : false; 1037 sc->up_h = (dst_w >= src_w) ? true : false;
1050 sc->up_v = (dst_h >= src_h) ? true : false; 1038 sc->up_v = (dst_h >= src_h) ? true : false;
1051 DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n", 1039 DRM_DEBUG_KMS("hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
1052 __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v); 1040 sc->hratio, sc->vratio, sc->up_h, sc->up_v);
1053 1041
1054 shfactor = FIMC_SHFACTOR - (hfactor + vfactor); 1042 shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
1055 DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor); 1043 DRM_DEBUG_KMS("shfactor[%d]\n", shfactor);
1056 1044
1057 cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) | 1045 cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
1058 EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) | 1046 EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
@@ -1070,10 +1058,10 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
1070{ 1058{
1071 u32 cfg, cfg_ext; 1059 u32 cfg, cfg_ext;
1072 1060
1073 DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n", 1061 DRM_DEBUG_KMS("range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
1074 __func__, sc->range, sc->bypass, sc->up_h, sc->up_v); 1062 sc->range, sc->bypass, sc->up_h, sc->up_v);
1075 DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n", 1063 DRM_DEBUG_KMS("hratio[%d]vratio[%d]\n",
1076 __func__, sc->hratio, sc->vratio); 1064 sc->hratio, sc->vratio);
1077 1065
1078 cfg = fimc_read(EXYNOS_CISCCTRL); 1066 cfg = fimc_read(EXYNOS_CISCCTRL);
1079 cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS | 1067 cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
@@ -1113,8 +1101,8 @@ static int fimc_dst_set_size(struct device *dev, int swap,
1113 struct drm_exynos_sz img_sz = *sz; 1101 struct drm_exynos_sz img_sz = *sz;
1114 u32 cfg; 1102 u32 cfg;
1115 1103
1116 DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n", 1104 DRM_DEBUG_KMS("swap[%d]hsize[%d]vsize[%d]\n",
1117 __func__, swap, sz->hsize, sz->vsize); 1105 swap, sz->hsize, sz->vsize);
1118 1106
1119 /* original size */ 1107 /* original size */
1120 cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) | 1108 cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
@@ -1122,8 +1110,7 @@ static int fimc_dst_set_size(struct device *dev, int swap,
1122 1110
1123 fimc_write(cfg, EXYNOS_ORGOSIZE); 1111 fimc_write(cfg, EXYNOS_ORGOSIZE);
1124 1112
1125 DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", 1113 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h);
1126 __func__, pos->x, pos->y, pos->w, pos->h);
1127 1114
1128 /* CSC ITU */ 1115 /* CSC ITU */
1129 cfg = fimc_read(EXYNOS_CIGCTRL); 1116 cfg = fimc_read(EXYNOS_CIGCTRL);
@@ -1180,7 +1167,7 @@ static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
1180 if (cfg & (mask << i)) 1167 if (cfg & (mask << i))
1181 buf_num++; 1168 buf_num++;
1182 1169
1183 DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num); 1170 DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
1184 1171
1185 return buf_num; 1172 return buf_num;
1186} 1173}
@@ -1194,8 +1181,7 @@ static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1194 u32 mask = 0x00000001 << buf_id; 1181 u32 mask = 0x00000001 << buf_id;
1195 int ret = 0; 1182 int ret = 0;
1196 1183
1197 DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__, 1184 DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
1198 buf_id, buf_type);
1199 1185
1200 mutex_lock(&ctx->lock); 1186 mutex_lock(&ctx->lock);
1201 1187
@@ -1252,7 +1238,7 @@ static int fimc_dst_set_addr(struct device *dev,
1252 1238
1253 property = &c_node->property; 1239 property = &c_node->property;
1254 1240
1255 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__, 1241 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
1256 property->prop_id, buf_id, buf_type); 1242 property->prop_id, buf_id, buf_type);
1257 1243
1258 if (buf_id > FIMC_MAX_DST) { 1244 if (buf_id > FIMC_MAX_DST) {
@@ -1302,7 +1288,7 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = {
1302 1288
1303static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) 1289static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
1304{ 1290{
1305 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable); 1291 DRM_DEBUG_KMS("enable[%d]\n", enable);
1306 1292
1307 if (enable) { 1293 if (enable) {
1308 clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); 1294 clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
@@ -1326,7 +1312,7 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
1326 c_node->event_work; 1312 c_node->event_work;
1327 int buf_id; 1313 int buf_id;
1328 1314
1329 DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id); 1315 DRM_DEBUG_KMS("fimc id[%d]\n", ctx->id);
1330 1316
1331 fimc_clear_irq(ctx); 1317 fimc_clear_irq(ctx);
1332 if (fimc_check_ovf(ctx)) 1318 if (fimc_check_ovf(ctx))
@@ -1339,7 +1325,7 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
1339 if (buf_id < 0) 1325 if (buf_id < 0)
1340 return IRQ_HANDLED; 1326 return IRQ_HANDLED;
1341 1327
1342 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id); 1328 DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
1343 1329
1344 if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) { 1330 if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
1345 DRM_ERROR("failed to dequeue.\n"); 1331 DRM_ERROR("failed to dequeue.\n");
@@ -1357,8 +1343,6 @@ static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1357{ 1343{
1358 struct drm_exynos_ipp_prop_list *prop_list; 1344 struct drm_exynos_ipp_prop_list *prop_list;
1359 1345
1360 DRM_DEBUG_KMS("%s\n", __func__);
1361
1362 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); 1346 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1363 if (!prop_list) { 1347 if (!prop_list) {
1364 DRM_ERROR("failed to alloc property list.\n"); 1348 DRM_ERROR("failed to alloc property list.\n");
@@ -1402,7 +1386,7 @@ static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
1402 case EXYNOS_DRM_FLIP_BOTH: 1386 case EXYNOS_DRM_FLIP_BOTH:
1403 return true; 1387 return true;
1404 default: 1388 default:
1405 DRM_DEBUG_KMS("%s:invalid flip\n", __func__); 1389 DRM_DEBUG_KMS("invalid flip\n");
1406 return false; 1390 return false;
1407 } 1391 }
1408} 1392}
@@ -1419,8 +1403,6 @@ static int fimc_ippdrv_check_property(struct device *dev,
1419 bool swap; 1403 bool swap;
1420 int i; 1404 int i;
1421 1405
1422 DRM_DEBUG_KMS("%s\n", __func__);
1423
1424 for_each_ipp_ops(i) { 1406 for_each_ipp_ops(i) {
1425 if ((i == EXYNOS_DRM_OPS_SRC) && 1407 if ((i == EXYNOS_DRM_OPS_SRC) &&
1426 (property->cmd == IPP_CMD_WB)) 1408 (property->cmd == IPP_CMD_WB))
@@ -1526,8 +1508,6 @@ static void fimc_clear_addr(struct fimc_context *ctx)
1526{ 1508{
1527 int i; 1509 int i;
1528 1510
1529 DRM_DEBUG_KMS("%s:\n", __func__);
1530
1531 for (i = 0; i < FIMC_MAX_SRC; i++) { 1511 for (i = 0; i < FIMC_MAX_SRC; i++) {
1532 fimc_write(0, EXYNOS_CIIYSA(i)); 1512 fimc_write(0, EXYNOS_CIIYSA(i));
1533 fimc_write(0, EXYNOS_CIICBSA(i)); 1513 fimc_write(0, EXYNOS_CIICBSA(i));
@@ -1545,8 +1525,6 @@ static int fimc_ippdrv_reset(struct device *dev)
1545{ 1525{
1546 struct fimc_context *ctx = get_fimc_context(dev); 1526 struct fimc_context *ctx = get_fimc_context(dev);
1547 1527
1548 DRM_DEBUG_KMS("%s\n", __func__);
1549
1550 /* reset h/w block */ 1528 /* reset h/w block */
1551 fimc_sw_reset(ctx); 1529 fimc_sw_reset(ctx);
1552 1530
@@ -1570,7 +1548,7 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1570 int ret, i; 1548 int ret, i;
1571 u32 cfg0, cfg1; 1549 u32 cfg0, cfg1;
1572 1550
1573 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd); 1551 DRM_DEBUG_KMS("cmd[%d]\n", cmd);
1574 1552
1575 if (!c_node) { 1553 if (!c_node) {
1576 DRM_ERROR("failed to get c_node.\n"); 1554 DRM_ERROR("failed to get c_node.\n");
@@ -1679,7 +1657,7 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1679 struct drm_exynos_ipp_set_wb set_wb = {0, 0}; 1657 struct drm_exynos_ipp_set_wb set_wb = {0, 0};
1680 u32 cfg; 1658 u32 cfg;
1681 1659
1682 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd); 1660 DRM_DEBUG_KMS("cmd[%d]\n", cmd);
1683 1661
1684 switch (cmd) { 1662 switch (cmd) {
1685 case IPP_CMD_M2M: 1663 case IPP_CMD_M2M:
@@ -1869,8 +1847,7 @@ static int fimc_probe(struct platform_device *pdev)
1869 goto err_put_clk; 1847 goto err_put_clk;
1870 } 1848 }
1871 1849
1872 DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id, 1850 DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv);
1873 (int)ippdrv);
1874 1851
1875 mutex_init(&ctx->lock); 1852 mutex_init(&ctx->lock);
1876 platform_set_drvdata(pdev, ctx); 1853 platform_set_drvdata(pdev, ctx);
@@ -1917,7 +1894,7 @@ static int fimc_suspend(struct device *dev)
1917{ 1894{
1918 struct fimc_context *ctx = get_fimc_context(dev); 1895 struct fimc_context *ctx = get_fimc_context(dev);
1919 1896
1920 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id); 1897 DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1921 1898
1922 if (pm_runtime_suspended(dev)) 1899 if (pm_runtime_suspended(dev))
1923 return 0; 1900 return 0;
@@ -1929,7 +1906,7 @@ static int fimc_resume(struct device *dev)
1929{ 1906{
1930 struct fimc_context *ctx = get_fimc_context(dev); 1907 struct fimc_context *ctx = get_fimc_context(dev);
1931 1908
1932 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id); 1909 DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1933 1910
1934 if (!pm_runtime_suspended(dev)) 1911 if (!pm_runtime_suspended(dev))
1935 return fimc_clk_ctrl(ctx, true); 1912 return fimc_clk_ctrl(ctx, true);
@@ -1943,7 +1920,7 @@ static int fimc_runtime_suspend(struct device *dev)
1943{ 1920{
1944 struct fimc_context *ctx = get_fimc_context(dev); 1921 struct fimc_context *ctx = get_fimc_context(dev);
1945 1922
1946 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id); 1923 DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1947 1924
1948 return fimc_clk_ctrl(ctx, false); 1925 return fimc_clk_ctrl(ctx, false);
1949} 1926}
@@ -1952,7 +1929,7 @@ static int fimc_runtime_resume(struct device *dev)
1952{ 1929{
1953 struct fimc_context *ctx = get_fimc_context(dev); 1930 struct fimc_context *ctx = get_fimc_context(dev);
1954 1931
1955 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id); 1932 DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1956 1933
1957 return fimc_clk_ctrl(ctx, true); 1934 return fimc_clk_ctrl(ctx, true);
1958} 1935}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 97c61dbffd82..3e106beca5b6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -63,14 +63,24 @@
63 63
64struct fimd_driver_data { 64struct fimd_driver_data {
65 unsigned int timing_base; 65 unsigned int timing_base;
66
67 unsigned int has_shadowcon:1;
68 unsigned int has_clksel:1;
69};
70
71static struct fimd_driver_data s3c64xx_fimd_driver_data = {
72 .timing_base = 0x0,
73 .has_clksel = 1,
66}; 74};
67 75
68static struct fimd_driver_data exynos4_fimd_driver_data = { 76static struct fimd_driver_data exynos4_fimd_driver_data = {
69 .timing_base = 0x0, 77 .timing_base = 0x0,
78 .has_shadowcon = 1,
70}; 79};
71 80
72static struct fimd_driver_data exynos5_fimd_driver_data = { 81static struct fimd_driver_data exynos5_fimd_driver_data = {
73 .timing_base = 0x20000, 82 .timing_base = 0x20000,
83 .has_shadowcon = 1,
74}; 84};
75 85
76struct fimd_win_data { 86struct fimd_win_data {
@@ -107,10 +117,13 @@ struct fimd_context {
107 atomic_t wait_vsync_event; 117 atomic_t wait_vsync_event;
108 118
109 struct exynos_drm_panel_info *panel; 119 struct exynos_drm_panel_info *panel;
120 struct fimd_driver_data *driver_data;
110}; 121};
111 122
112#ifdef CONFIG_OF 123#ifdef CONFIG_OF
113static const struct of_device_id fimd_driver_dt_match[] = { 124static const struct of_device_id fimd_driver_dt_match[] = {
125 { .compatible = "samsung,s3c6400-fimd",
126 .data = &s3c64xx_fimd_driver_data },
114 { .compatible = "samsung,exynos4210-fimd", 127 { .compatible = "samsung,exynos4210-fimd",
115 .data = &exynos4_fimd_driver_data }, 128 .data = &exynos4_fimd_driver_data },
116 { .compatible = "samsung,exynos5250-fimd", 129 { .compatible = "samsung,exynos5250-fimd",
@@ -137,8 +150,6 @@ static inline struct fimd_driver_data *drm_fimd_get_driver_data(
137 150
138static bool fimd_display_is_connected(struct device *dev) 151static bool fimd_display_is_connected(struct device *dev)
139{ 152{
140 DRM_DEBUG_KMS("%s\n", __FILE__);
141
142 /* TODO. */ 153 /* TODO. */
143 154
144 return true; 155 return true;
@@ -148,15 +159,11 @@ static void *fimd_get_panel(struct device *dev)
148{ 159{
149 struct fimd_context *ctx = get_fimd_context(dev); 160 struct fimd_context *ctx = get_fimd_context(dev);
150 161
151 DRM_DEBUG_KMS("%s\n", __FILE__);
152
153 return ctx->panel; 162 return ctx->panel;
154} 163}
155 164
156static int fimd_check_timing(struct device *dev, void *timing) 165static int fimd_check_mode(struct device *dev, struct drm_display_mode *mode)
157{ 166{
158 DRM_DEBUG_KMS("%s\n", __FILE__);
159
160 /* TODO. */ 167 /* TODO. */
161 168
162 return 0; 169 return 0;
@@ -164,8 +171,6 @@ static int fimd_check_timing(struct device *dev, void *timing)
164 171
165static int fimd_display_power_on(struct device *dev, int mode) 172static int fimd_display_power_on(struct device *dev, int mode)
166{ 173{
167 DRM_DEBUG_KMS("%s\n", __FILE__);
168
169 /* TODO */ 174 /* TODO */
170 175
171 return 0; 176 return 0;
@@ -175,7 +180,7 @@ static struct exynos_drm_display_ops fimd_display_ops = {
175 .type = EXYNOS_DISPLAY_TYPE_LCD, 180 .type = EXYNOS_DISPLAY_TYPE_LCD,
176 .is_connected = fimd_display_is_connected, 181 .is_connected = fimd_display_is_connected,
177 .get_panel = fimd_get_panel, 182 .get_panel = fimd_get_panel,
178 .check_timing = fimd_check_timing, 183 .check_mode = fimd_check_mode,
179 .power_on = fimd_display_power_on, 184 .power_on = fimd_display_power_on,
180}; 185};
181 186
@@ -183,7 +188,7 @@ static void fimd_dpms(struct device *subdrv_dev, int mode)
183{ 188{
184 struct fimd_context *ctx = get_fimd_context(subdrv_dev); 189 struct fimd_context *ctx = get_fimd_context(subdrv_dev);
185 190
186 DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode); 191 DRM_DEBUG_KMS("%d\n", mode);
187 192
188 mutex_lock(&ctx->lock); 193 mutex_lock(&ctx->lock);
189 194
@@ -221,8 +226,6 @@ static void fimd_apply(struct device *subdrv_dev)
221 struct fimd_win_data *win_data; 226 struct fimd_win_data *win_data;
222 int i; 227 int i;
223 228
224 DRM_DEBUG_KMS("%s\n", __FILE__);
225
226 for (i = 0; i < WINDOWS_NR; i++) { 229 for (i = 0; i < WINDOWS_NR; i++) {
227 win_data = &ctx->win_data[i]; 230 win_data = &ctx->win_data[i];
228 if (win_data->enabled && (ovl_ops && ovl_ops->commit)) 231 if (win_data->enabled && (ovl_ops && ovl_ops->commit))
@@ -239,15 +242,12 @@ static void fimd_commit(struct device *dev)
239 struct exynos_drm_panel_info *panel = ctx->panel; 242 struct exynos_drm_panel_info *panel = ctx->panel;
240 struct fb_videomode *timing = &panel->timing; 243 struct fb_videomode *timing = &panel->timing;
241 struct fimd_driver_data *driver_data; 244 struct fimd_driver_data *driver_data;
242 struct platform_device *pdev = to_platform_device(dev);
243 u32 val; 245 u32 val;
244 246
245 driver_data = drm_fimd_get_driver_data(pdev); 247 driver_data = ctx->driver_data;
246 if (ctx->suspended) 248 if (ctx->suspended)
247 return; 249 return;
248 250
249 DRM_DEBUG_KMS("%s\n", __FILE__);
250
251 /* setup polarity values from machine code. */ 251 /* setup polarity values from machine code. */
252 writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1); 252 writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
253 253
@@ -274,6 +274,11 @@ static void fimd_commit(struct device *dev)
274 val = ctx->vidcon0; 274 val = ctx->vidcon0;
275 val &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR); 275 val &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
276 276
277 if (ctx->driver_data->has_clksel) {
278 val &= ~VIDCON0_CLKSEL_MASK;
279 val |= VIDCON0_CLKSEL_LCD;
280 }
281
277 if (ctx->clkdiv > 1) 282 if (ctx->clkdiv > 1)
278 val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR; 283 val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR;
279 else 284 else
@@ -292,8 +297,6 @@ static int fimd_enable_vblank(struct device *dev)
292 struct fimd_context *ctx = get_fimd_context(dev); 297 struct fimd_context *ctx = get_fimd_context(dev);
293 u32 val; 298 u32 val;
294 299
295 DRM_DEBUG_KMS("%s\n", __FILE__);
296
297 if (ctx->suspended) 300 if (ctx->suspended)
298 return -EPERM; 301 return -EPERM;
299 302
@@ -319,8 +322,6 @@ static void fimd_disable_vblank(struct device *dev)
319 struct fimd_context *ctx = get_fimd_context(dev); 322 struct fimd_context *ctx = get_fimd_context(dev);
320 u32 val; 323 u32 val;
321 324
322 DRM_DEBUG_KMS("%s\n", __FILE__);
323
324 if (ctx->suspended) 325 if (ctx->suspended)
325 return; 326 return;
326 327
@@ -370,8 +371,6 @@ static void fimd_win_mode_set(struct device *dev,
370 int win; 371 int win;
371 unsigned long offset; 372 unsigned long offset;
372 373
373 DRM_DEBUG_KMS("%s\n", __FILE__);
374
375 if (!overlay) { 374 if (!overlay) {
376 dev_err(dev, "overlay is NULL\n"); 375 dev_err(dev, "overlay is NULL\n");
377 return; 376 return;
@@ -381,7 +380,7 @@ static void fimd_win_mode_set(struct device *dev,
381 if (win == DEFAULT_ZPOS) 380 if (win == DEFAULT_ZPOS)
382 win = ctx->default_win; 381 win = ctx->default_win;
383 382
384 if (win < 0 || win > WINDOWS_NR) 383 if (win < 0 || win >= WINDOWS_NR)
385 return; 384 return;
386 385
387 offset = overlay->fb_x * (overlay->bpp >> 3); 386 offset = overlay->fb_x * (overlay->bpp >> 3);
@@ -418,8 +417,6 @@ static void fimd_win_set_pixfmt(struct device *dev, unsigned int win)
418 struct fimd_win_data *win_data = &ctx->win_data[win]; 417 struct fimd_win_data *win_data = &ctx->win_data[win];
419 unsigned long val; 418 unsigned long val;
420 419
421 DRM_DEBUG_KMS("%s\n", __FILE__);
422
423 val = WINCONx_ENWIN; 420 val = WINCONx_ENWIN;
424 421
425 switch (win_data->bpp) { 422 switch (win_data->bpp) {
@@ -478,8 +475,6 @@ static void fimd_win_set_colkey(struct device *dev, unsigned int win)
478 struct fimd_context *ctx = get_fimd_context(dev); 475 struct fimd_context *ctx = get_fimd_context(dev);
479 unsigned int keycon0 = 0, keycon1 = 0; 476 unsigned int keycon0 = 0, keycon1 = 0;
480 477
481 DRM_DEBUG_KMS("%s\n", __FILE__);
482
483 keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F | 478 keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F |
484 WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0); 479 WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
485 480
@@ -489,6 +484,33 @@ static void fimd_win_set_colkey(struct device *dev, unsigned int win)
489 writel(keycon1, ctx->regs + WKEYCON1_BASE(win)); 484 writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
490} 485}
491 486
487/**
488 * shadow_protect_win() - disable updating values from shadow registers at vsync
489 *
490 * @win: window to protect registers for
491 * @protect: 1 to protect (disable updates)
492 */
493static void fimd_shadow_protect_win(struct fimd_context *ctx,
494 int win, bool protect)
495{
496 u32 reg, bits, val;
497
498 if (ctx->driver_data->has_shadowcon) {
499 reg = SHADOWCON;
500 bits = SHADOWCON_WINx_PROTECT(win);
501 } else {
502 reg = PRTCON;
503 bits = PRTCON_PROTECT;
504 }
505
506 val = readl(ctx->regs + reg);
507 if (protect)
508 val |= bits;
509 else
510 val &= ~bits;
511 writel(val, ctx->regs + reg);
512}
513
492static void fimd_win_commit(struct device *dev, int zpos) 514static void fimd_win_commit(struct device *dev, int zpos)
493{ 515{
494 struct fimd_context *ctx = get_fimd_context(dev); 516 struct fimd_context *ctx = get_fimd_context(dev);
@@ -498,21 +520,19 @@ static void fimd_win_commit(struct device *dev, int zpos)
498 unsigned int last_x; 520 unsigned int last_x;
499 unsigned int last_y; 521 unsigned int last_y;
500 522
501 DRM_DEBUG_KMS("%s\n", __FILE__);
502
503 if (ctx->suspended) 523 if (ctx->suspended)
504 return; 524 return;
505 525
506 if (win == DEFAULT_ZPOS) 526 if (win == DEFAULT_ZPOS)
507 win = ctx->default_win; 527 win = ctx->default_win;
508 528
509 if (win < 0 || win > WINDOWS_NR) 529 if (win < 0 || win >= WINDOWS_NR)
510 return; 530 return;
511 531
512 win_data = &ctx->win_data[win]; 532 win_data = &ctx->win_data[win];
513 533
514 /* 534 /*
515 * SHADOWCON register is used for enabling timing. 535 * SHADOWCON/PRTCON register is used for enabling timing.
516 * 536 *
517 * for example, once only width value of a register is set, 537 * for example, once only width value of a register is set,
518 * if the dma is started then fimd hardware could malfunction so 538 * if the dma is started then fimd hardware could malfunction so
@@ -522,9 +542,7 @@ static void fimd_win_commit(struct device *dev, int zpos)
522 */ 542 */
523 543
524 /* protect windows */ 544 /* protect windows */
525 val = readl(ctx->regs + SHADOWCON); 545 fimd_shadow_protect_win(ctx, win, true);
526 val |= SHADOWCON_WINx_PROTECT(win);
527 writel(val, ctx->regs + SHADOWCON);
528 546
529 /* buffer start address */ 547 /* buffer start address */
530 val = (unsigned long)win_data->dma_addr; 548 val = (unsigned long)win_data->dma_addr;
@@ -602,10 +620,13 @@ static void fimd_win_commit(struct device *dev, int zpos)
602 writel(val, ctx->regs + WINCON(win)); 620 writel(val, ctx->regs + WINCON(win));
603 621
604 /* Enable DMA channel and unprotect windows */ 622 /* Enable DMA channel and unprotect windows */
605 val = readl(ctx->regs + SHADOWCON); 623 fimd_shadow_protect_win(ctx, win, false);
606 val |= SHADOWCON_CHx_ENABLE(win); 624
607 val &= ~SHADOWCON_WINx_PROTECT(win); 625 if (ctx->driver_data->has_shadowcon) {
608 writel(val, ctx->regs + SHADOWCON); 626 val = readl(ctx->regs + SHADOWCON);
627 val |= SHADOWCON_CHx_ENABLE(win);
628 writel(val, ctx->regs + SHADOWCON);
629 }
609 630
610 win_data->enabled = true; 631 win_data->enabled = true;
611} 632}
@@ -617,12 +638,10 @@ static void fimd_win_disable(struct device *dev, int zpos)
617 int win = zpos; 638 int win = zpos;
618 u32 val; 639 u32 val;
619 640
620 DRM_DEBUG_KMS("%s\n", __FILE__);
621
622 if (win == DEFAULT_ZPOS) 641 if (win == DEFAULT_ZPOS)
623 win = ctx->default_win; 642 win = ctx->default_win;
624 643
625 if (win < 0 || win > WINDOWS_NR) 644 if (win < 0 || win >= WINDOWS_NR)
626 return; 645 return;
627 646
628 win_data = &ctx->win_data[win]; 647 win_data = &ctx->win_data[win];
@@ -634,9 +653,7 @@ static void fimd_win_disable(struct device *dev, int zpos)
634 } 653 }
635 654
636 /* protect windows */ 655 /* protect windows */
637 val = readl(ctx->regs + SHADOWCON); 656 fimd_shadow_protect_win(ctx, win, true);
638 val |= SHADOWCON_WINx_PROTECT(win);
639 writel(val, ctx->regs + SHADOWCON);
640 657
641 /* wincon */ 658 /* wincon */
642 val = readl(ctx->regs + WINCON(win)); 659 val = readl(ctx->regs + WINCON(win));
@@ -644,10 +661,13 @@ static void fimd_win_disable(struct device *dev, int zpos)
644 writel(val, ctx->regs + WINCON(win)); 661 writel(val, ctx->regs + WINCON(win));
645 662
646 /* unprotect windows */ 663 /* unprotect windows */
647 val = readl(ctx->regs + SHADOWCON); 664 if (ctx->driver_data->has_shadowcon) {
648 val &= ~SHADOWCON_CHx_ENABLE(win); 665 val = readl(ctx->regs + SHADOWCON);
649 val &= ~SHADOWCON_WINx_PROTECT(win); 666 val &= ~SHADOWCON_CHx_ENABLE(win);
650 writel(val, ctx->regs + SHADOWCON); 667 writel(val, ctx->regs + SHADOWCON);
668 }
669
670 fimd_shadow_protect_win(ctx, win, false);
651 671
652 win_data->enabled = false; 672 win_data->enabled = false;
653} 673}
@@ -697,8 +717,6 @@ out:
697 717
698static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 718static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
699{ 719{
700 DRM_DEBUG_KMS("%s\n", __FILE__);
701
702 /* 720 /*
703 * enable drm irq mode. 721 * enable drm irq mode.
704 * - with irq_enabled = 1, we can use the vblank feature. 722 * - with irq_enabled = 1, we can use the vblank feature.
@@ -725,8 +743,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
725 743
726static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 744static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
727{ 745{
728 DRM_DEBUG_KMS("%s\n", __FILE__);
729
730 /* detach this sub driver from iommu mapping if supported. */ 746 /* detach this sub driver from iommu mapping if supported. */
731 if (is_drm_iommu_supported(drm_dev)) 747 if (is_drm_iommu_supported(drm_dev))
732 drm_iommu_detach_device(drm_dev, dev); 748 drm_iommu_detach_device(drm_dev, dev);
@@ -741,8 +757,6 @@ static int fimd_calc_clkdiv(struct fimd_context *ctx,
741 u32 best_framerate = 0; 757 u32 best_framerate = 0;
742 u32 framerate; 758 u32 framerate;
743 759
744 DRM_DEBUG_KMS("%s\n", __FILE__);
745
746 retrace = timing->left_margin + timing->hsync_len + 760 retrace = timing->left_margin + timing->hsync_len +
747 timing->right_margin + timing->xres; 761 timing->right_margin + timing->xres;
748 retrace *= timing->upper_margin + timing->vsync_len + 762 retrace *= timing->upper_margin + timing->vsync_len +
@@ -777,10 +791,6 @@ static int fimd_calc_clkdiv(struct fimd_context *ctx,
777 791
778static void fimd_clear_win(struct fimd_context *ctx, int win) 792static void fimd_clear_win(struct fimd_context *ctx, int win)
779{ 793{
780 u32 val;
781
782 DRM_DEBUG_KMS("%s\n", __FILE__);
783
784 writel(0, ctx->regs + WINCON(win)); 794 writel(0, ctx->regs + WINCON(win));
785 writel(0, ctx->regs + VIDOSD_A(win)); 795 writel(0, ctx->regs + VIDOSD_A(win));
786 writel(0, ctx->regs + VIDOSD_B(win)); 796 writel(0, ctx->regs + VIDOSD_B(win));
@@ -789,15 +799,11 @@ static void fimd_clear_win(struct fimd_context *ctx, int win)
789 if (win == 1 || win == 2) 799 if (win == 1 || win == 2)
790 writel(0, ctx->regs + VIDOSD_D(win)); 800 writel(0, ctx->regs + VIDOSD_D(win));
791 801
792 val = readl(ctx->regs + SHADOWCON); 802 fimd_shadow_protect_win(ctx, win, false);
793 val &= ~SHADOWCON_WINx_PROTECT(win);
794 writel(val, ctx->regs + SHADOWCON);
795} 803}
796 804
797static int fimd_clock(struct fimd_context *ctx, bool enable) 805static int fimd_clock(struct fimd_context *ctx, bool enable)
798{ 806{
799 DRM_DEBUG_KMS("%s\n", __FILE__);
800
801 if (enable) { 807 if (enable) {
802 int ret; 808 int ret;
803 809
@@ -883,8 +889,6 @@ static int fimd_probe(struct platform_device *pdev)
883 int win; 889 int win;
884 int ret = -EINVAL; 890 int ret = -EINVAL;
885 891
886 DRM_DEBUG_KMS("%s\n", __FILE__);
887
888 if (dev->of_node) { 892 if (dev->of_node) {
889 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 893 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
890 if (!pdata) { 894 if (!pdata) {
@@ -949,6 +953,7 @@ static int fimd_probe(struct platform_device *pdev)
949 return ret; 953 return ret;
950 } 954 }
951 955
956 ctx->driver_data = drm_fimd_get_driver_data(pdev);
952 ctx->vidcon0 = pdata->vidcon0; 957 ctx->vidcon0 = pdata->vidcon0;
953 ctx->vidcon1 = pdata->vidcon1; 958 ctx->vidcon1 = pdata->vidcon1;
954 ctx->default_win = pdata->default_win; 959 ctx->default_win = pdata->default_win;
@@ -989,8 +994,6 @@ static int fimd_remove(struct platform_device *pdev)
989 struct device *dev = &pdev->dev; 994 struct device *dev = &pdev->dev;
990 struct fimd_context *ctx = platform_get_drvdata(pdev); 995 struct fimd_context *ctx = platform_get_drvdata(pdev);
991 996
992 DRM_DEBUG_KMS("%s\n", __FILE__);
993
994 exynos_drm_subdrv_unregister(&ctx->subdrv); 997 exynos_drm_subdrv_unregister(&ctx->subdrv);
995 998
996 if (ctx->suspended) 999 if (ctx->suspended)
@@ -1055,8 +1058,6 @@ static int fimd_runtime_suspend(struct device *dev)
1055{ 1058{
1056 struct fimd_context *ctx = get_fimd_context(dev); 1059 struct fimd_context *ctx = get_fimd_context(dev);
1057 1060
1058 DRM_DEBUG_KMS("%s\n", __FILE__);
1059
1060 return fimd_activate(ctx, false); 1061 return fimd_activate(ctx, false);
1061} 1062}
1062 1063
@@ -1064,14 +1065,15 @@ static int fimd_runtime_resume(struct device *dev)
1064{ 1065{
1065 struct fimd_context *ctx = get_fimd_context(dev); 1066 struct fimd_context *ctx = get_fimd_context(dev);
1066 1067
1067 DRM_DEBUG_KMS("%s\n", __FILE__);
1068
1069 return fimd_activate(ctx, true); 1068 return fimd_activate(ctx, true);
1070} 1069}
1071#endif 1070#endif
1072 1071
1073static struct platform_device_id fimd_driver_ids[] = { 1072static struct platform_device_id fimd_driver_ids[] = {
1074 { 1073 {
1074 .name = "s3c64xx-fb",
1075 .driver_data = (unsigned long)&s3c64xx_fimd_driver_data,
1076 }, {
1075 .name = "exynos4-fb", 1077 .name = "exynos4-fb",
1076 .driver_data = (unsigned long)&exynos4_fimd_driver_data, 1078 .driver_data = (unsigned long)&exynos4_fimd_driver_data,
1077 }, { 1079 }, {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index cf4543ffa079..c3f15e7646d5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -132,8 +132,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
132 struct drm_gem_object *obj; 132 struct drm_gem_object *obj;
133 struct exynos_drm_gem_buf *buf; 133 struct exynos_drm_gem_buf *buf;
134 134
135 DRM_DEBUG_KMS("%s\n", __FILE__);
136
137 obj = &exynos_gem_obj->base; 135 obj = &exynos_gem_obj->base;
138 buf = exynos_gem_obj->buffer; 136 buf = exynos_gem_obj->buffer;
139 137
@@ -227,7 +225,6 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
227 } 225 }
228 226
229 size = roundup_gem_size(size, flags); 227 size = roundup_gem_size(size, flags);
230 DRM_DEBUG_KMS("%s\n", __FILE__);
231 228
232 ret = check_gem_flags(flags); 229 ret = check_gem_flags(flags);
233 if (ret) 230 if (ret)
@@ -268,8 +265,6 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
268 struct exynos_drm_gem_obj *exynos_gem_obj; 265 struct exynos_drm_gem_obj *exynos_gem_obj;
269 int ret; 266 int ret;
270 267
271 DRM_DEBUG_KMS("%s\n", __FILE__);
272
273 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); 268 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
274 if (IS_ERR(exynos_gem_obj)) 269 if (IS_ERR(exynos_gem_obj))
275 return PTR_ERR(exynos_gem_obj); 270 return PTR_ERR(exynos_gem_obj);
@@ -331,8 +326,6 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
331{ 326{
332 struct drm_exynos_gem_map_off *args = data; 327 struct drm_exynos_gem_map_off *args = data;
333 328
334 DRM_DEBUG_KMS("%s\n", __FILE__);
335
336 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n", 329 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
337 args->handle, (unsigned long)args->offset); 330 args->handle, (unsigned long)args->offset);
338 331
@@ -371,8 +364,6 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
371 unsigned long vm_size; 364 unsigned long vm_size;
372 int ret; 365 int ret;
373 366
374 DRM_DEBUG_KMS("%s\n", __FILE__);
375
376 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 367 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
377 vma->vm_private_data = obj; 368 vma->vm_private_data = obj;
378 vma->vm_ops = drm_dev->driver->gem_vm_ops; 369 vma->vm_ops = drm_dev->driver->gem_vm_ops;
@@ -429,9 +420,7 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
429{ 420{
430 struct drm_exynos_gem_mmap *args = data; 421 struct drm_exynos_gem_mmap *args = data;
431 struct drm_gem_object *obj; 422 struct drm_gem_object *obj;
432 unsigned int addr; 423 unsigned long addr;
433
434 DRM_DEBUG_KMS("%s\n", __FILE__);
435 424
436 if (!(dev->driver->driver_features & DRIVER_GEM)) { 425 if (!(dev->driver->driver_features & DRIVER_GEM)) {
437 DRM_ERROR("does not support GEM.\n"); 426 DRM_ERROR("does not support GEM.\n");
@@ -473,14 +462,14 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
473 462
474 drm_gem_object_unreference(obj); 463 drm_gem_object_unreference(obj);
475 464
476 if (IS_ERR((void *)addr)) { 465 if (IS_ERR_VALUE(addr)) {
477 /* check filp->f_op, filp->private_data are restored */ 466 /* check filp->f_op, filp->private_data are restored */
478 if (file_priv->filp->f_op == &exynos_drm_gem_fops) { 467 if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
479 file_priv->filp->f_op = fops_get(dev->driver->fops); 468 file_priv->filp->f_op = fops_get(dev->driver->fops);
480 file_priv->filp->private_data = file_priv; 469 file_priv->filp->private_data = file_priv;
481 } 470 }
482 mutex_unlock(&dev->struct_mutex); 471 mutex_unlock(&dev->struct_mutex);
483 return PTR_ERR((void *)addr); 472 return (int)addr;
484 } 473 }
485 474
486 mutex_unlock(&dev->struct_mutex); 475 mutex_unlock(&dev->struct_mutex);
@@ -643,8 +632,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
643 632
644int exynos_drm_gem_init_object(struct drm_gem_object *obj) 633int exynos_drm_gem_init_object(struct drm_gem_object *obj)
645{ 634{
646 DRM_DEBUG_KMS("%s\n", __FILE__);
647
648 return 0; 635 return 0;
649} 636}
650 637
@@ -653,8 +640,6 @@ void exynos_drm_gem_free_object(struct drm_gem_object *obj)
653 struct exynos_drm_gem_obj *exynos_gem_obj; 640 struct exynos_drm_gem_obj *exynos_gem_obj;
654 struct exynos_drm_gem_buf *buf; 641 struct exynos_drm_gem_buf *buf;
655 642
656 DRM_DEBUG_KMS("%s\n", __FILE__);
657
658 exynos_gem_obj = to_exynos_gem_obj(obj); 643 exynos_gem_obj = to_exynos_gem_obj(obj);
659 buf = exynos_gem_obj->buffer; 644 buf = exynos_gem_obj->buffer;
660 645
@@ -671,8 +656,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
671 struct exynos_drm_gem_obj *exynos_gem_obj; 656 struct exynos_drm_gem_obj *exynos_gem_obj;
672 int ret; 657 int ret;
673 658
674 DRM_DEBUG_KMS("%s\n", __FILE__);
675
676 /* 659 /*
677 * alocate memory to be used for framebuffer. 660 * alocate memory to be used for framebuffer.
678 * - this callback would be called by user application 661 * - this callback would be called by user application
@@ -704,8 +687,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
704 struct drm_gem_object *obj; 687 struct drm_gem_object *obj;
705 int ret = 0; 688 int ret = 0;
706 689
707 DRM_DEBUG_KMS("%s\n", __FILE__);
708
709 mutex_lock(&dev->struct_mutex); 690 mutex_lock(&dev->struct_mutex);
710 691
711 /* 692 /*
@@ -743,8 +724,6 @@ int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
743{ 724{
744 int ret; 725 int ret;
745 726
746 DRM_DEBUG_KMS("%s\n", __FILE__);
747
748 /* 727 /*
749 * obj->refcount and obj->handle_count are decreased and 728 * obj->refcount and obj->handle_count are decreased and
750 * if both them are 0 then exynos_drm_gem_free_object() 729 * if both them are 0 then exynos_drm_gem_free_object()
@@ -788,8 +767,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
788 struct drm_gem_object *obj; 767 struct drm_gem_object *obj;
789 int ret; 768 int ret;
790 769
791 DRM_DEBUG_KMS("%s\n", __FILE__);
792
793 /* set vm_area_struct. */ 770 /* set vm_area_struct. */
794 ret = drm_gem_mmap(filp, vma); 771 ret = drm_gem_mmap(filp, vma);
795 if (ret < 0) { 772 if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 762f40d548b7..472e3b25e7f2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -400,8 +400,6 @@ static int gsc_sw_reset(struct gsc_context *ctx)
400 u32 cfg; 400 u32 cfg;
401 int count = GSC_RESET_TIMEOUT; 401 int count = GSC_RESET_TIMEOUT;
402 402
403 DRM_DEBUG_KMS("%s\n", __func__);
404
405 /* s/w reset */ 403 /* s/w reset */
406 cfg = (GSC_SW_RESET_SRESET); 404 cfg = (GSC_SW_RESET_SRESET);
407 gsc_write(cfg, GSC_SW_RESET); 405 gsc_write(cfg, GSC_SW_RESET);
@@ -441,8 +439,6 @@ static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
441{ 439{
442 u32 gscblk_cfg; 440 u32 gscblk_cfg;
443 441
444 DRM_DEBUG_KMS("%s\n", __func__);
445
446 gscblk_cfg = readl(SYSREG_GSCBLK_CFG1); 442 gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
447 443
448 if (enable) 444 if (enable)
@@ -460,7 +456,7 @@ static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
460{ 456{
461 u32 cfg; 457 u32 cfg;
462 458
463 DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__, 459 DRM_DEBUG_KMS("enable[%d]overflow[%d]level[%d]\n",
464 enable, overflow, done); 460 enable, overflow, done);
465 461
466 cfg = gsc_read(GSC_IRQ); 462 cfg = gsc_read(GSC_IRQ);
@@ -491,7 +487,7 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt)
491 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 487 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
492 u32 cfg; 488 u32 cfg;
493 489
494 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt); 490 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
495 491
496 cfg = gsc_read(GSC_IN_CON); 492 cfg = gsc_read(GSC_IN_CON);
497 cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK | 493 cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
@@ -567,8 +563,7 @@ static int gsc_src_set_transf(struct device *dev,
567 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 563 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
568 u32 cfg; 564 u32 cfg;
569 565
570 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__, 566 DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
571 degree, flip);
572 567
573 cfg = gsc_read(GSC_IN_CON); 568 cfg = gsc_read(GSC_IN_CON);
574 cfg &= ~GSC_IN_ROT_MASK; 569 cfg &= ~GSC_IN_ROT_MASK;
@@ -616,8 +611,8 @@ static int gsc_src_set_size(struct device *dev, int swap,
616 struct gsc_scaler *sc = &ctx->sc; 611 struct gsc_scaler *sc = &ctx->sc;
617 u32 cfg; 612 u32 cfg;
618 613
619 DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n", 614 DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
620 __func__, swap, pos->x, pos->y, pos->w, pos->h); 615 swap, pos->x, pos->y, pos->w, pos->h);
621 616
622 if (swap) { 617 if (swap) {
623 img_pos.w = pos->h; 618 img_pos.w = pos->h;
@@ -634,8 +629,7 @@ static int gsc_src_set_size(struct device *dev, int swap,
634 GSC_CROPPED_HEIGHT(img_pos.h)); 629 GSC_CROPPED_HEIGHT(img_pos.h));
635 gsc_write(cfg, GSC_CROPPED_SIZE); 630 gsc_write(cfg, GSC_CROPPED_SIZE);
636 631
637 DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n", 632 DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize);
638 __func__, sz->hsize, sz->vsize);
639 633
640 /* original size */ 634 /* original size */
641 cfg = gsc_read(GSC_SRCIMG_SIZE); 635 cfg = gsc_read(GSC_SRCIMG_SIZE);
@@ -650,8 +644,7 @@ static int gsc_src_set_size(struct device *dev, int swap,
650 cfg = gsc_read(GSC_IN_CON); 644 cfg = gsc_read(GSC_IN_CON);
651 cfg &= ~GSC_IN_RGB_TYPE_MASK; 645 cfg &= ~GSC_IN_RGB_TYPE_MASK;
652 646
653 DRM_DEBUG_KMS("%s:width[%d]range[%d]\n", 647 DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range);
654 __func__, pos->w, sc->range);
655 648
656 if (pos->w >= GSC_WIDTH_ITU_709) 649 if (pos->w >= GSC_WIDTH_ITU_709)
657 if (sc->range) 650 if (sc->range)
@@ -677,8 +670,7 @@ static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
677 u32 cfg; 670 u32 cfg;
678 u32 mask = 0x00000001 << buf_id; 671 u32 mask = 0x00000001 << buf_id;
679 672
680 DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__, 673 DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
681 buf_id, buf_type);
682 674
683 /* mask register set */ 675 /* mask register set */
684 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK); 676 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
@@ -721,7 +713,7 @@ static int gsc_src_set_addr(struct device *dev,
721 713
722 property = &c_node->property; 714 property = &c_node->property;
723 715
724 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__, 716 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
725 property->prop_id, buf_id, buf_type); 717 property->prop_id, buf_id, buf_type);
726 718
727 if (buf_id > GSC_MAX_SRC) { 719 if (buf_id > GSC_MAX_SRC) {
@@ -765,7 +757,7 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
765 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 757 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
766 u32 cfg; 758 u32 cfg;
767 759
768 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt); 760 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
769 761
770 cfg = gsc_read(GSC_OUT_CON); 762 cfg = gsc_read(GSC_OUT_CON);
771 cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK | 763 cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
@@ -838,8 +830,7 @@ static int gsc_dst_set_transf(struct device *dev,
838 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 830 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
839 u32 cfg; 831 u32 cfg;
840 832
841 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__, 833 DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
842 degree, flip);
843 834
844 cfg = gsc_read(GSC_IN_CON); 835 cfg = gsc_read(GSC_IN_CON);
845 cfg &= ~GSC_IN_ROT_MASK; 836 cfg &= ~GSC_IN_ROT_MASK;
@@ -881,7 +872,7 @@ static int gsc_dst_set_transf(struct device *dev,
881 872
882static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio) 873static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
883{ 874{
884 DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst); 875 DRM_DEBUG_KMS("src[%d]dst[%d]\n", src, dst);
885 876
886 if (src >= dst * 8) { 877 if (src >= dst * 8) {
887 DRM_ERROR("failed to make ratio and shift.\n"); 878 DRM_ERROR("failed to make ratio and shift.\n");
@@ -944,20 +935,19 @@ static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
944 return ret; 935 return ret;
945 } 936 }
946 937
947 DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n", 938 DRM_DEBUG_KMS("pre_hratio[%d]pre_vratio[%d]\n",
948 __func__, sc->pre_hratio, sc->pre_vratio); 939 sc->pre_hratio, sc->pre_vratio);
949 940
950 sc->main_hratio = (src_w << 16) / dst_w; 941 sc->main_hratio = (src_w << 16) / dst_w;
951 sc->main_vratio = (src_h << 16) / dst_h; 942 sc->main_vratio = (src_h << 16) / dst_h;
952 943
953 DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n", 944 DRM_DEBUG_KMS("main_hratio[%ld]main_vratio[%ld]\n",
954 __func__, sc->main_hratio, sc->main_vratio); 945 sc->main_hratio, sc->main_vratio);
955 946
956 gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio, 947 gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
957 &sc->pre_shfactor); 948 &sc->pre_shfactor);
958 949
959 DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__, 950 DRM_DEBUG_KMS("pre_shfactor[%d]\n", sc->pre_shfactor);
960 sc->pre_shfactor);
961 951
962 cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) | 952 cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
963 GSC_PRESC_H_RATIO(sc->pre_hratio) | 953 GSC_PRESC_H_RATIO(sc->pre_hratio) |
@@ -1023,8 +1013,8 @@ static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
1023{ 1013{
1024 u32 cfg; 1014 u32 cfg;
1025 1015
1026 DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n", 1016 DRM_DEBUG_KMS("main_hratio[%ld]main_vratio[%ld]\n",
1027 __func__, sc->main_hratio, sc->main_vratio); 1017 sc->main_hratio, sc->main_vratio);
1028 1018
1029 gsc_set_h_coef(ctx, sc->main_hratio); 1019 gsc_set_h_coef(ctx, sc->main_hratio);
1030 cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio); 1020 cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
@@ -1043,8 +1033,8 @@ static int gsc_dst_set_size(struct device *dev, int swap,
1043 struct gsc_scaler *sc = &ctx->sc; 1033 struct gsc_scaler *sc = &ctx->sc;
1044 u32 cfg; 1034 u32 cfg;
1045 1035
1046 DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n", 1036 DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
1047 __func__, swap, pos->x, pos->y, pos->w, pos->h); 1037 swap, pos->x, pos->y, pos->w, pos->h);
1048 1038
1049 if (swap) { 1039 if (swap) {
1050 img_pos.w = pos->h; 1040 img_pos.w = pos->h;
@@ -1060,8 +1050,7 @@ static int gsc_dst_set_size(struct device *dev, int swap,
1060 cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h)); 1050 cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
1061 gsc_write(cfg, GSC_SCALED_SIZE); 1051 gsc_write(cfg, GSC_SCALED_SIZE);
1062 1052
1063 DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n", 1053 DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize);
1064 __func__, sz->hsize, sz->vsize);
1065 1054
1066 /* original size */ 1055 /* original size */
1067 cfg = gsc_read(GSC_DSTIMG_SIZE); 1056 cfg = gsc_read(GSC_DSTIMG_SIZE);
@@ -1074,8 +1063,7 @@ static int gsc_dst_set_size(struct device *dev, int swap,
1074 cfg = gsc_read(GSC_OUT_CON); 1063 cfg = gsc_read(GSC_OUT_CON);
1075 cfg &= ~GSC_OUT_RGB_TYPE_MASK; 1064 cfg &= ~GSC_OUT_RGB_TYPE_MASK;
1076 1065
1077 DRM_DEBUG_KMS("%s:width[%d]range[%d]\n", 1066 DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range);
1078 __func__, pos->w, sc->range);
1079 1067
1080 if (pos->w >= GSC_WIDTH_ITU_709) 1068 if (pos->w >= GSC_WIDTH_ITU_709)
1081 if (sc->range) 1069 if (sc->range)
@@ -1104,7 +1092,7 @@ static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
1104 if (cfg & (mask << i)) 1092 if (cfg & (mask << i))
1105 buf_num--; 1093 buf_num--;
1106 1094
1107 DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num); 1095 DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
1108 1096
1109 return buf_num; 1097 return buf_num;
1110} 1098}
@@ -1118,8 +1106,7 @@ static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
1118 u32 mask = 0x00000001 << buf_id; 1106 u32 mask = 0x00000001 << buf_id;
1119 int ret = 0; 1107 int ret = 0;
1120 1108
1121 DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__, 1109 DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
1122 buf_id, buf_type);
1123 1110
1124 mutex_lock(&ctx->lock); 1111 mutex_lock(&ctx->lock);
1125 1112
@@ -1177,7 +1164,7 @@ static int gsc_dst_set_addr(struct device *dev,
1177 1164
1178 property = &c_node->property; 1165 property = &c_node->property;
1179 1166
1180 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__, 1167 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
1181 property->prop_id, buf_id, buf_type); 1168 property->prop_id, buf_id, buf_type);
1182 1169
1183 if (buf_id > GSC_MAX_DST) { 1170 if (buf_id > GSC_MAX_DST) {
@@ -1217,7 +1204,7 @@ static struct exynos_drm_ipp_ops gsc_dst_ops = {
1217 1204
1218static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable) 1205static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
1219{ 1206{
1220 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable); 1207 DRM_DEBUG_KMS("enable[%d]\n", enable);
1221 1208
1222 if (enable) { 1209 if (enable) {
1223 clk_enable(ctx->gsc_clk); 1210 clk_enable(ctx->gsc_clk);
@@ -1236,7 +1223,7 @@ static int gsc_get_src_buf_index(struct gsc_context *ctx)
1236 u32 buf_id = GSC_MAX_SRC; 1223 u32 buf_id = GSC_MAX_SRC;
1237 int ret; 1224 int ret;
1238 1225
1239 DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id); 1226 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
1240 1227
1241 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK); 1228 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
1242 curr_index = GSC_IN_CURR_GET_INDEX(cfg); 1229 curr_index = GSC_IN_CURR_GET_INDEX(cfg);
@@ -1259,7 +1246,7 @@ static int gsc_get_src_buf_index(struct gsc_context *ctx)
1259 return ret; 1246 return ret;
1260 } 1247 }
1261 1248
1262 DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg, 1249 DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
1263 curr_index, buf_id); 1250 curr_index, buf_id);
1264 1251
1265 return buf_id; 1252 return buf_id;
@@ -1271,7 +1258,7 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx)
1271 u32 buf_id = GSC_MAX_DST; 1258 u32 buf_id = GSC_MAX_DST;
1272 int ret; 1259 int ret;
1273 1260
1274 DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id); 1261 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
1275 1262
1276 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK); 1263 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1277 curr_index = GSC_OUT_CURR_GET_INDEX(cfg); 1264 curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
@@ -1294,7 +1281,7 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx)
1294 return ret; 1281 return ret;
1295 } 1282 }
1296 1283
1297 DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg, 1284 DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
1298 curr_index, buf_id); 1285 curr_index, buf_id);
1299 1286
1300 return buf_id; 1287 return buf_id;
@@ -1310,7 +1297,7 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
1310 u32 status; 1297 u32 status;
1311 int buf_id[EXYNOS_DRM_OPS_MAX]; 1298 int buf_id[EXYNOS_DRM_OPS_MAX];
1312 1299
1313 DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id); 1300 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
1314 1301
1315 status = gsc_read(GSC_IRQ); 1302 status = gsc_read(GSC_IRQ);
1316 if (status & GSC_IRQ_STATUS_OR_IRQ) { 1303 if (status & GSC_IRQ_STATUS_OR_IRQ) {
@@ -1331,7 +1318,7 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
1331 if (buf_id[EXYNOS_DRM_OPS_DST] < 0) 1318 if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
1332 return IRQ_HANDLED; 1319 return IRQ_HANDLED;
1333 1320
1334 DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__, 1321 DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n",
1335 buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]); 1322 buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
1336 1323
1337 event_work->ippdrv = ippdrv; 1324 event_work->ippdrv = ippdrv;
@@ -1350,8 +1337,6 @@ static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1350{ 1337{
1351 struct drm_exynos_ipp_prop_list *prop_list; 1338 struct drm_exynos_ipp_prop_list *prop_list;
1352 1339
1353 DRM_DEBUG_KMS("%s\n", __func__);
1354
1355 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); 1340 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1356 if (!prop_list) { 1341 if (!prop_list) {
1357 DRM_ERROR("failed to alloc property list.\n"); 1342 DRM_ERROR("failed to alloc property list.\n");
@@ -1394,7 +1379,7 @@ static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
1394 case EXYNOS_DRM_FLIP_BOTH: 1379 case EXYNOS_DRM_FLIP_BOTH:
1395 return true; 1380 return true;
1396 default: 1381 default:
1397 DRM_DEBUG_KMS("%s:invalid flip\n", __func__); 1382 DRM_DEBUG_KMS("invalid flip\n");
1398 return false; 1383 return false;
1399 } 1384 }
1400} 1385}
@@ -1411,8 +1396,6 @@ static int gsc_ippdrv_check_property(struct device *dev,
1411 bool swap; 1396 bool swap;
1412 int i; 1397 int i;
1413 1398
1414 DRM_DEBUG_KMS("%s\n", __func__);
1415
1416 for_each_ipp_ops(i) { 1399 for_each_ipp_ops(i) {
1417 if ((i == EXYNOS_DRM_OPS_SRC) && 1400 if ((i == EXYNOS_DRM_OPS_SRC) &&
1418 (property->cmd == IPP_CMD_WB)) 1401 (property->cmd == IPP_CMD_WB))
@@ -1521,8 +1504,6 @@ static int gsc_ippdrv_reset(struct device *dev)
1521 struct gsc_scaler *sc = &ctx->sc; 1504 struct gsc_scaler *sc = &ctx->sc;
1522 int ret; 1505 int ret;
1523 1506
1524 DRM_DEBUG_KMS("%s\n", __func__);
1525
1526 /* reset h/w block */ 1507 /* reset h/w block */
1527 ret = gsc_sw_reset(ctx); 1508 ret = gsc_sw_reset(ctx);
1528 if (ret < 0) { 1509 if (ret < 0) {
@@ -1549,7 +1530,7 @@ static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1549 u32 cfg; 1530 u32 cfg;
1550 int ret, i; 1531 int ret, i;
1551 1532
1552 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd); 1533 DRM_DEBUG_KMS("cmd[%d]\n", cmd);
1553 1534
1554 if (!c_node) { 1535 if (!c_node) {
1555 DRM_ERROR("failed to get c_node.\n"); 1536 DRM_ERROR("failed to get c_node.\n");
@@ -1643,7 +1624,7 @@ static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1643 struct drm_exynos_ipp_set_wb set_wb = {0, 0}; 1624 struct drm_exynos_ipp_set_wb set_wb = {0, 0};
1644 u32 cfg; 1625 u32 cfg;
1645 1626
1646 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd); 1627 DRM_DEBUG_KMS("cmd[%d]\n", cmd);
1647 1628
1648 switch (cmd) { 1629 switch (cmd) {
1649 case IPP_CMD_M2M: 1630 case IPP_CMD_M2M:
@@ -1728,8 +1709,7 @@ static int gsc_probe(struct platform_device *pdev)
1728 return ret; 1709 return ret;
1729 } 1710 }
1730 1711
1731 DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id, 1712 DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv);
1732 (int)ippdrv);
1733 1713
1734 mutex_init(&ctx->lock); 1714 mutex_init(&ctx->lock);
1735 platform_set_drvdata(pdev, ctx); 1715 platform_set_drvdata(pdev, ctx);
@@ -1772,7 +1752,7 @@ static int gsc_suspend(struct device *dev)
1772{ 1752{
1773 struct gsc_context *ctx = get_gsc_context(dev); 1753 struct gsc_context *ctx = get_gsc_context(dev);
1774 1754
1775 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id); 1755 DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1776 1756
1777 if (pm_runtime_suspended(dev)) 1757 if (pm_runtime_suspended(dev))
1778 return 0; 1758 return 0;
@@ -1784,7 +1764,7 @@ static int gsc_resume(struct device *dev)
1784{ 1764{
1785 struct gsc_context *ctx = get_gsc_context(dev); 1765 struct gsc_context *ctx = get_gsc_context(dev);
1786 1766
1787 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id); 1767 DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1788 1768
1789 if (!pm_runtime_suspended(dev)) 1769 if (!pm_runtime_suspended(dev))
1790 return gsc_clk_ctrl(ctx, true); 1770 return gsc_clk_ctrl(ctx, true);
@@ -1798,7 +1778,7 @@ static int gsc_runtime_suspend(struct device *dev)
1798{ 1778{
1799 struct gsc_context *ctx = get_gsc_context(dev); 1779 struct gsc_context *ctx = get_gsc_context(dev);
1800 1780
1801 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id); 1781 DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1802 1782
1803 return gsc_clk_ctrl(ctx, false); 1783 return gsc_clk_ctrl(ctx, false);
1804} 1784}
@@ -1807,7 +1787,7 @@ static int gsc_runtime_resume(struct device *dev)
1807{ 1787{
1808 struct gsc_context *ctx = get_gsc_context(dev); 1788 struct gsc_context *ctx = get_gsc_context(dev);
1809 1789
1810 DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id); 1790 DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1811 1791
1812 return gsc_clk_ctrl(ctx, true); 1792 return gsc_clk_ctrl(ctx, true);
1813} 1793}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 437fb947e46d..aaa550d622f0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -88,16 +88,12 @@ void exynos_mixer_drv_attach(struct exynos_drm_hdmi_context *ctx)
88 88
89void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops) 89void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
90{ 90{
91 DRM_DEBUG_KMS("%s\n", __FILE__);
92
93 if (ops) 91 if (ops)
94 hdmi_ops = ops; 92 hdmi_ops = ops;
95} 93}
96 94
97void exynos_mixer_ops_register(struct exynos_mixer_ops *ops) 95void exynos_mixer_ops_register(struct exynos_mixer_ops *ops)
98{ 96{
99 DRM_DEBUG_KMS("%s\n", __FILE__);
100
101 if (ops) 97 if (ops)
102 mixer_ops = ops; 98 mixer_ops = ops;
103} 99}
@@ -106,8 +102,6 @@ static bool drm_hdmi_is_connected(struct device *dev)
106{ 102{
107 struct drm_hdmi_context *ctx = to_context(dev); 103 struct drm_hdmi_context *ctx = to_context(dev);
108 104
109 DRM_DEBUG_KMS("%s\n", __FILE__);
110
111 if (hdmi_ops && hdmi_ops->is_connected) 105 if (hdmi_ops && hdmi_ops->is_connected)
112 return hdmi_ops->is_connected(ctx->hdmi_ctx->ctx); 106 return hdmi_ops->is_connected(ctx->hdmi_ctx->ctx);
113 107
@@ -119,34 +113,31 @@ static struct edid *drm_hdmi_get_edid(struct device *dev,
119{ 113{
120 struct drm_hdmi_context *ctx = to_context(dev); 114 struct drm_hdmi_context *ctx = to_context(dev);
121 115
122 DRM_DEBUG_KMS("%s\n", __FILE__);
123
124 if (hdmi_ops && hdmi_ops->get_edid) 116 if (hdmi_ops && hdmi_ops->get_edid)
125 return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector); 117 return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector);
126 118
127 return NULL; 119 return NULL;
128} 120}
129 121
130static int drm_hdmi_check_timing(struct device *dev, void *timing) 122static int drm_hdmi_check_mode(struct device *dev,
123 struct drm_display_mode *mode)
131{ 124{
132 struct drm_hdmi_context *ctx = to_context(dev); 125 struct drm_hdmi_context *ctx = to_context(dev);
133 int ret = 0; 126 int ret = 0;
134 127
135 DRM_DEBUG_KMS("%s\n", __FILE__);
136
137 /* 128 /*
138 * Both, mixer and hdmi should be able to handle the requested mode. 129 * Both, mixer and hdmi should be able to handle the requested mode.
139 * If any of the two fails, return mode as BAD. 130 * If any of the two fails, return mode as BAD.
140 */ 131 */
141 132
142 if (mixer_ops && mixer_ops->check_timing) 133 if (mixer_ops && mixer_ops->check_mode)
143 ret = mixer_ops->check_timing(ctx->mixer_ctx->ctx, timing); 134 ret = mixer_ops->check_mode(ctx->mixer_ctx->ctx, mode);
144 135
145 if (ret) 136 if (ret)
146 return ret; 137 return ret;
147 138
148 if (hdmi_ops && hdmi_ops->check_timing) 139 if (hdmi_ops && hdmi_ops->check_mode)
149 return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing); 140 return hdmi_ops->check_mode(ctx->hdmi_ctx->ctx, mode);
150 141
151 return 0; 142 return 0;
152} 143}
@@ -155,8 +146,6 @@ static int drm_hdmi_power_on(struct device *dev, int mode)
155{ 146{
156 struct drm_hdmi_context *ctx = to_context(dev); 147 struct drm_hdmi_context *ctx = to_context(dev);
157 148
158 DRM_DEBUG_KMS("%s\n", __FILE__);
159
160 if (hdmi_ops && hdmi_ops->power_on) 149 if (hdmi_ops && hdmi_ops->power_on)
161 return hdmi_ops->power_on(ctx->hdmi_ctx->ctx, mode); 150 return hdmi_ops->power_on(ctx->hdmi_ctx->ctx, mode);
162 151
@@ -167,7 +156,7 @@ static struct exynos_drm_display_ops drm_hdmi_display_ops = {
167 .type = EXYNOS_DISPLAY_TYPE_HDMI, 156 .type = EXYNOS_DISPLAY_TYPE_HDMI,
168 .is_connected = drm_hdmi_is_connected, 157 .is_connected = drm_hdmi_is_connected,
169 .get_edid = drm_hdmi_get_edid, 158 .get_edid = drm_hdmi_get_edid,
170 .check_timing = drm_hdmi_check_timing, 159 .check_mode = drm_hdmi_check_mode,
171 .power_on = drm_hdmi_power_on, 160 .power_on = drm_hdmi_power_on,
172}; 161};
173 162
@@ -177,8 +166,6 @@ static int drm_hdmi_enable_vblank(struct device *subdrv_dev)
177 struct exynos_drm_subdrv *subdrv = &ctx->subdrv; 166 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
178 struct exynos_drm_manager *manager = subdrv->manager; 167 struct exynos_drm_manager *manager = subdrv->manager;
179 168
180 DRM_DEBUG_KMS("%s\n", __FILE__);
181
182 if (mixer_ops && mixer_ops->enable_vblank) 169 if (mixer_ops && mixer_ops->enable_vblank)
183 return mixer_ops->enable_vblank(ctx->mixer_ctx->ctx, 170 return mixer_ops->enable_vblank(ctx->mixer_ctx->ctx,
184 manager->pipe); 171 manager->pipe);
@@ -190,8 +177,6 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
190{ 177{
191 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 178 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
192 179
193 DRM_DEBUG_KMS("%s\n", __FILE__);
194
195 if (mixer_ops && mixer_ops->disable_vblank) 180 if (mixer_ops && mixer_ops->disable_vblank)
196 return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx); 181 return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
197} 182}
@@ -200,8 +185,6 @@ static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
200{ 185{
201 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 186 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
202 187
203 DRM_DEBUG_KMS("%s\n", __FILE__);
204
205 if (mixer_ops && mixer_ops->wait_for_vblank) 188 if (mixer_ops && mixer_ops->wait_for_vblank)
206 mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx); 189 mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
207} 190}
@@ -214,11 +197,9 @@ static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
214 struct drm_display_mode *m; 197 struct drm_display_mode *m;
215 int mode_ok; 198 int mode_ok;
216 199
217 DRM_DEBUG_KMS("%s\n", __FILE__);
218
219 drm_mode_set_crtcinfo(adjusted_mode, 0); 200 drm_mode_set_crtcinfo(adjusted_mode, 0);
220 201
221 mode_ok = drm_hdmi_check_timing(subdrv_dev, adjusted_mode); 202 mode_ok = drm_hdmi_check_mode(subdrv_dev, adjusted_mode);
222 203
223 /* just return if user desired mode exists. */ 204 /* just return if user desired mode exists. */
224 if (mode_ok == 0) 205 if (mode_ok == 0)
@@ -229,7 +210,7 @@ static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
229 * to adjusted_mode. 210 * to adjusted_mode.
230 */ 211 */
231 list_for_each_entry(m, &connector->modes, head) { 212 list_for_each_entry(m, &connector->modes, head) {
232 mode_ok = drm_hdmi_check_timing(subdrv_dev, m); 213 mode_ok = drm_hdmi_check_mode(subdrv_dev, m);
233 214
234 if (mode_ok == 0) { 215 if (mode_ok == 0) {
235 struct drm_mode_object base; 216 struct drm_mode_object base;
@@ -256,8 +237,6 @@ static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
256{ 237{
257 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 238 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
258 239
259 DRM_DEBUG_KMS("%s\n", __FILE__);
260
261 if (hdmi_ops && hdmi_ops->mode_set) 240 if (hdmi_ops && hdmi_ops->mode_set)
262 hdmi_ops->mode_set(ctx->hdmi_ctx->ctx, mode); 241 hdmi_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
263} 242}
@@ -267,8 +246,6 @@ static void drm_hdmi_get_max_resol(struct device *subdrv_dev,
267{ 246{
268 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 247 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
269 248
270 DRM_DEBUG_KMS("%s\n", __FILE__);
271
272 if (hdmi_ops && hdmi_ops->get_max_resol) 249 if (hdmi_ops && hdmi_ops->get_max_resol)
273 hdmi_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, height); 250 hdmi_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, height);
274} 251}
@@ -277,8 +254,6 @@ static void drm_hdmi_commit(struct device *subdrv_dev)
277{ 254{
278 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 255 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
279 256
280 DRM_DEBUG_KMS("%s\n", __FILE__);
281
282 if (hdmi_ops && hdmi_ops->commit) 257 if (hdmi_ops && hdmi_ops->commit)
283 hdmi_ops->commit(ctx->hdmi_ctx->ctx); 258 hdmi_ops->commit(ctx->hdmi_ctx->ctx);
284} 259}
@@ -287,8 +262,6 @@ static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
287{ 262{
288 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 263 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
289 264
290 DRM_DEBUG_KMS("%s\n", __FILE__);
291
292 if (mixer_ops && mixer_ops->dpms) 265 if (mixer_ops && mixer_ops->dpms)
293 mixer_ops->dpms(ctx->mixer_ctx->ctx, mode); 266 mixer_ops->dpms(ctx->mixer_ctx->ctx, mode);
294 267
@@ -301,8 +274,6 @@ static void drm_hdmi_apply(struct device *subdrv_dev)
301 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 274 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
302 int i; 275 int i;
303 276
304 DRM_DEBUG_KMS("%s\n", __FILE__);
305
306 for (i = 0; i < MIXER_WIN_NR; i++) { 277 for (i = 0; i < MIXER_WIN_NR; i++) {
307 if (!ctx->enabled[i]) 278 if (!ctx->enabled[i])
308 continue; 279 continue;
@@ -331,8 +302,6 @@ static void drm_mixer_mode_set(struct device *subdrv_dev,
331{ 302{
332 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 303 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
333 304
334 DRM_DEBUG_KMS("%s\n", __FILE__);
335
336 if (mixer_ops && mixer_ops->win_mode_set) 305 if (mixer_ops && mixer_ops->win_mode_set)
337 mixer_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay); 306 mixer_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
338} 307}
@@ -342,9 +311,7 @@ static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
342 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 311 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
343 int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos; 312 int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
344 313
345 DRM_DEBUG_KMS("%s\n", __FILE__); 314 if (win < 0 || win >= MIXER_WIN_NR) {
346
347 if (win < 0 || win > MIXER_WIN_NR) {
348 DRM_ERROR("mixer window[%d] is wrong\n", win); 315 DRM_ERROR("mixer window[%d] is wrong\n", win);
349 return; 316 return;
350 } 317 }
@@ -360,9 +327,7 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
360 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 327 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
361 int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos; 328 int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
362 329
363 DRM_DEBUG_KMS("%s\n", __FILE__); 330 if (win < 0 || win >= MIXER_WIN_NR) {
364
365 if (win < 0 || win > MIXER_WIN_NR) {
366 DRM_ERROR("mixer window[%d] is wrong\n", win); 331 DRM_ERROR("mixer window[%d] is wrong\n", win);
367 return; 332 return;
368 } 333 }
@@ -392,8 +357,6 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
392 struct exynos_drm_subdrv *subdrv = to_subdrv(dev); 357 struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
393 struct drm_hdmi_context *ctx; 358 struct drm_hdmi_context *ctx;
394 359
395 DRM_DEBUG_KMS("%s\n", __FILE__);
396
397 if (!hdmi_ctx) { 360 if (!hdmi_ctx) {
398 DRM_ERROR("hdmi context not initialized.\n"); 361 DRM_ERROR("hdmi context not initialized.\n");
399 return -EFAULT; 362 return -EFAULT;
@@ -440,8 +403,6 @@ static int exynos_drm_hdmi_probe(struct platform_device *pdev)
440 struct exynos_drm_subdrv *subdrv; 403 struct exynos_drm_subdrv *subdrv;
441 struct drm_hdmi_context *ctx; 404 struct drm_hdmi_context *ctx;
442 405
443 DRM_DEBUG_KMS("%s\n", __FILE__);
444
445 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 406 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
446 if (!ctx) { 407 if (!ctx) {
447 DRM_LOG_KMS("failed to alloc common hdmi context.\n"); 408 DRM_LOG_KMS("failed to alloc common hdmi context.\n");
@@ -466,8 +427,6 @@ static int exynos_drm_hdmi_remove(struct platform_device *pdev)
466{ 427{
467 struct drm_hdmi_context *ctx = platform_get_drvdata(pdev); 428 struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
468 429
469 DRM_DEBUG_KMS("%s\n", __FILE__);
470
471 exynos_drm_subdrv_unregister(&ctx->subdrv); 430 exynos_drm_subdrv_unregister(&ctx->subdrv);
472 431
473 return 0; 432 return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 6b709440df4c..724cab181976 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -32,11 +32,11 @@ struct exynos_hdmi_ops {
32 bool (*is_connected)(void *ctx); 32 bool (*is_connected)(void *ctx);
33 struct edid *(*get_edid)(void *ctx, 33 struct edid *(*get_edid)(void *ctx,
34 struct drm_connector *connector); 34 struct drm_connector *connector);
35 int (*check_timing)(void *ctx, struct fb_videomode *timing); 35 int (*check_mode)(void *ctx, struct drm_display_mode *mode);
36 int (*power_on)(void *ctx, int mode); 36 int (*power_on)(void *ctx, int mode);
37 37
38 /* manager */ 38 /* manager */
39 void (*mode_set)(void *ctx, void *mode); 39 void (*mode_set)(void *ctx, struct drm_display_mode *mode);
40 void (*get_max_resol)(void *ctx, unsigned int *width, 40 void (*get_max_resol)(void *ctx, unsigned int *width,
41 unsigned int *height); 41 unsigned int *height);
42 void (*commit)(void *ctx); 42 void (*commit)(void *ctx);
@@ -57,7 +57,7 @@ struct exynos_mixer_ops {
57 void (*win_disable)(void *ctx, int zpos); 57 void (*win_disable)(void *ctx, int zpos);
58 58
59 /* display */ 59 /* display */
60 int (*check_timing)(void *ctx, struct fb_videomode *timing); 60 int (*check_mode)(void *ctx, struct drm_display_mode *mode);
61}; 61};
62 62
63void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx); 63void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index be1e88463466..b1ef8e7ff9c9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -131,8 +131,6 @@ void exynos_platform_device_ipp_unregister(void)
131 131
132int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) 132int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
133{ 133{
134 DRM_DEBUG_KMS("%s\n", __func__);
135
136 if (!ippdrv) 134 if (!ippdrv)
137 return -EINVAL; 135 return -EINVAL;
138 136
@@ -145,8 +143,6 @@ int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
145 143
146int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv) 144int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
147{ 145{
148 DRM_DEBUG_KMS("%s\n", __func__);
149
150 if (!ippdrv) 146 if (!ippdrv)
151 return -EINVAL; 147 return -EINVAL;
152 148
@@ -162,8 +158,6 @@ static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
162{ 158{
163 int ret; 159 int ret;
164 160
165 DRM_DEBUG_KMS("%s\n", __func__);
166
167 /* do the allocation under our mutexlock */ 161 /* do the allocation under our mutexlock */
168 mutex_lock(lock); 162 mutex_lock(lock);
169 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL); 163 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
@@ -179,7 +173,7 @@ static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
179{ 173{
180 void *obj; 174 void *obj;
181 175
182 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id); 176 DRM_DEBUG_KMS("id[%d]\n", id);
183 177
184 mutex_lock(lock); 178 mutex_lock(lock);
185 179
@@ -216,7 +210,7 @@ static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
216 struct exynos_drm_ippdrv *ippdrv; 210 struct exynos_drm_ippdrv *ippdrv;
217 u32 ipp_id = property->ipp_id; 211 u32 ipp_id = property->ipp_id;
218 212
219 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id); 213 DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
220 214
221 if (ipp_id) { 215 if (ipp_id) {
222 /* find ipp driver using idr */ 216 /* find ipp driver using idr */
@@ -257,14 +251,13 @@ static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
257 */ 251 */
258 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 252 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
259 if (ipp_check_dedicated(ippdrv, property->cmd)) { 253 if (ipp_check_dedicated(ippdrv, property->cmd)) {
260 DRM_DEBUG_KMS("%s:used device.\n", __func__); 254 DRM_DEBUG_KMS("used device.\n");
261 continue; 255 continue;
262 } 256 }
263 257
264 if (ippdrv->check_property && 258 if (ippdrv->check_property &&
265 ippdrv->check_property(ippdrv->dev, property)) { 259 ippdrv->check_property(ippdrv->dev, property)) {
266 DRM_DEBUG_KMS("%s:not support property.\n", 260 DRM_DEBUG_KMS("not support property.\n");
267 __func__);
268 continue; 261 continue;
269 } 262 }
270 263
@@ -283,10 +276,10 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
283 struct drm_exynos_ipp_cmd_node *c_node; 276 struct drm_exynos_ipp_cmd_node *c_node;
284 int count = 0; 277 int count = 0;
285 278
286 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id); 279 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
287 280
288 if (list_empty(&exynos_drm_ippdrv_list)) { 281 if (list_empty(&exynos_drm_ippdrv_list)) {
289 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__); 282 DRM_DEBUG_KMS("ippdrv_list is empty.\n");
290 return ERR_PTR(-ENODEV); 283 return ERR_PTR(-ENODEV);
291 } 284 }
292 285
@@ -296,8 +289,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
296 * e.g PAUSE state, queue buf, command contro. 289 * e.g PAUSE state, queue buf, command contro.
297 */ 290 */
298 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 291 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
299 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__, 292 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
300 count++, (int)ippdrv);
301 293
302 if (!list_empty(&ippdrv->cmd_list)) { 294 if (!list_empty(&ippdrv->cmd_list)) {
303 list_for_each_entry(c_node, &ippdrv->cmd_list, list) 295 list_for_each_entry(c_node, &ippdrv->cmd_list, list)
@@ -320,8 +312,6 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
320 struct exynos_drm_ippdrv *ippdrv; 312 struct exynos_drm_ippdrv *ippdrv;
321 int count = 0; 313 int count = 0;
322 314
323 DRM_DEBUG_KMS("%s\n", __func__);
324
325 if (!ctx) { 315 if (!ctx) {
326 DRM_ERROR("invalid context.\n"); 316 DRM_ERROR("invalid context.\n");
327 return -EINVAL; 317 return -EINVAL;
@@ -332,7 +322,7 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
332 return -EINVAL; 322 return -EINVAL;
333 } 323 }
334 324
335 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id); 325 DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
336 326
337 if (!prop_list->ipp_id) { 327 if (!prop_list->ipp_id) {
338 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) 328 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
@@ -371,11 +361,11 @@ static void ipp_print_property(struct drm_exynos_ipp_property *property,
371 struct drm_exynos_pos *pos = &config->pos; 361 struct drm_exynos_pos *pos = &config->pos;
372 struct drm_exynos_sz *sz = &config->sz; 362 struct drm_exynos_sz *sz = &config->sz;
373 363
374 DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n", 364 DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
375 __func__, property->prop_id, idx ? "dst" : "src", config->fmt); 365 property->prop_id, idx ? "dst" : "src", config->fmt);
376 366
377 DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n", 367 DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
378 __func__, pos->x, pos->y, pos->w, pos->h, 368 pos->x, pos->y, pos->w, pos->h,
379 sz->hsize, sz->vsize, config->flip, config->degree); 369 sz->hsize, sz->vsize, config->flip, config->degree);
380} 370}
381 371
@@ -385,7 +375,7 @@ static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
385 struct drm_exynos_ipp_cmd_node *c_node; 375 struct drm_exynos_ipp_cmd_node *c_node;
386 u32 prop_id = property->prop_id; 376 u32 prop_id = property->prop_id;
387 377
388 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id); 378 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
389 379
390 ippdrv = ipp_find_drv_by_handle(prop_id); 380 ippdrv = ipp_find_drv_by_handle(prop_id);
391 if (IS_ERR(ippdrv)) { 381 if (IS_ERR(ippdrv)) {
@@ -401,8 +391,8 @@ static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
401 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 391 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
402 if ((c_node->property.prop_id == prop_id) && 392 if ((c_node->property.prop_id == prop_id) &&
403 (c_node->state == IPP_STATE_STOP)) { 393 (c_node->state == IPP_STATE_STOP)) {
404 DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n", 394 DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
405 __func__, property->cmd, (int)ippdrv); 395 property->cmd, (int)ippdrv);
406 396
407 c_node->property = *property; 397 c_node->property = *property;
408 return 0; 398 return 0;
@@ -418,8 +408,6 @@ static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
418{ 408{
419 struct drm_exynos_ipp_cmd_work *cmd_work; 409 struct drm_exynos_ipp_cmd_work *cmd_work;
420 410
421 DRM_DEBUG_KMS("%s\n", __func__);
422
423 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL); 411 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
424 if (!cmd_work) { 412 if (!cmd_work) {
425 DRM_ERROR("failed to alloc cmd_work.\n"); 413 DRM_ERROR("failed to alloc cmd_work.\n");
@@ -435,8 +423,6 @@ static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
435{ 423{
436 struct drm_exynos_ipp_event_work *event_work; 424 struct drm_exynos_ipp_event_work *event_work;
437 425
438 DRM_DEBUG_KMS("%s\n", __func__);
439
440 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL); 426 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
441 if (!event_work) { 427 if (!event_work) {
442 DRM_ERROR("failed to alloc event_work.\n"); 428 DRM_ERROR("failed to alloc event_work.\n");
@@ -460,8 +446,6 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
460 struct drm_exynos_ipp_cmd_node *c_node; 446 struct drm_exynos_ipp_cmd_node *c_node;
461 int ret, i; 447 int ret, i;
462 448
463 DRM_DEBUG_KMS("%s\n", __func__);
464
465 if (!ctx) { 449 if (!ctx) {
466 DRM_ERROR("invalid context.\n"); 450 DRM_ERROR("invalid context.\n");
467 return -EINVAL; 451 return -EINVAL;
@@ -486,7 +470,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
486 * instead of allocation. 470 * instead of allocation.
487 */ 471 */
488 if (property->prop_id) { 472 if (property->prop_id) {
489 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id); 473 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
490 return ipp_find_and_set_property(property); 474 return ipp_find_and_set_property(property);
491 } 475 }
492 476
@@ -512,8 +496,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
512 goto err_clear; 496 goto err_clear;
513 } 497 }
514 498
515 DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", 499 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
516 __func__, property->prop_id, property->cmd, (int)ippdrv); 500 property->prop_id, property->cmd, (int)ippdrv);
517 501
518 /* stored property information and ippdrv in private data */ 502 /* stored property information and ippdrv in private data */
519 c_node->priv = priv; 503 c_node->priv = priv;
@@ -569,8 +553,6 @@ err_clear:
569 553
570static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node) 554static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
571{ 555{
572 DRM_DEBUG_KMS("%s\n", __func__);
573
574 /* delete list */ 556 /* delete list */
575 list_del(&c_node->list); 557 list_del(&c_node->list);
576 558
@@ -593,8 +575,6 @@ static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
593 struct list_head *head; 575 struct list_head *head;
594 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, }; 576 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
595 577
596 DRM_DEBUG_KMS("%s\n", __func__);
597
598 mutex_lock(&c_node->mem_lock); 578 mutex_lock(&c_node->mem_lock);
599 579
600 for_each_ipp_ops(i) { 580 for_each_ipp_ops(i) {
@@ -602,20 +582,19 @@ static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
602 head = &c_node->mem_list[i]; 582 head = &c_node->mem_list[i];
603 583
604 if (list_empty(head)) { 584 if (list_empty(head)) {
605 DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__, 585 DRM_DEBUG_KMS("%s memory empty.\n", i ? "dst" : "src");
606 i ? "dst" : "src");
607 continue; 586 continue;
608 } 587 }
609 588
610 /* find memory node entry */ 589 /* find memory node entry */
611 list_for_each_entry(m_node, head, list) { 590 list_for_each_entry(m_node, head, list) {
612 DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__, 591 DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
613 i ? "dst" : "src", count[i], (int)m_node); 592 i ? "dst" : "src", count[i], (int)m_node);
614 count[i]++; 593 count[i]++;
615 } 594 }
616 } 595 }
617 596
618 DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__, 597 DRM_DEBUG_KMS("min[%d]max[%d]\n",
619 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]), 598 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
620 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST])); 599 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
621 600
@@ -644,15 +623,14 @@ static struct drm_exynos_ipp_mem_node
644 struct list_head *head; 623 struct list_head *head;
645 int count = 0; 624 int count = 0;
646 625
647 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id); 626 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
648 627
649 /* source/destination memory list */ 628 /* source/destination memory list */
650 head = &c_node->mem_list[qbuf->ops_id]; 629 head = &c_node->mem_list[qbuf->ops_id];
651 630
652 /* find memory node from memory list */ 631 /* find memory node from memory list */
653 list_for_each_entry(m_node, head, list) { 632 list_for_each_entry(m_node, head, list) {
654 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n", 633 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
655 __func__, count++, (int)m_node);
656 634
657 /* compare buffer id */ 635 /* compare buffer id */
658 if (m_node->buf_id == qbuf->buf_id) 636 if (m_node->buf_id == qbuf->buf_id)
@@ -669,7 +647,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
669 struct exynos_drm_ipp_ops *ops = NULL; 647 struct exynos_drm_ipp_ops *ops = NULL;
670 int ret = 0; 648 int ret = 0;
671 649
672 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node); 650 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
673 651
674 if (!m_node) { 652 if (!m_node) {
675 DRM_ERROR("invalid queue node.\n"); 653 DRM_ERROR("invalid queue node.\n");
@@ -678,7 +656,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
678 656
679 mutex_lock(&c_node->mem_lock); 657 mutex_lock(&c_node->mem_lock);
680 658
681 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id); 659 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
682 660
683 /* get operations callback */ 661 /* get operations callback */
684 ops = ippdrv->ops[m_node->ops_id]; 662 ops = ippdrv->ops[m_node->ops_id];
@@ -714,8 +692,6 @@ static struct drm_exynos_ipp_mem_node
714 void *addr; 692 void *addr;
715 int i; 693 int i;
716 694
717 DRM_DEBUG_KMS("%s\n", __func__);
718
719 mutex_lock(&c_node->mem_lock); 695 mutex_lock(&c_node->mem_lock);
720 696
721 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); 697 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
@@ -732,14 +708,11 @@ static struct drm_exynos_ipp_mem_node
732 m_node->prop_id = qbuf->prop_id; 708 m_node->prop_id = qbuf->prop_id;
733 m_node->buf_id = qbuf->buf_id; 709 m_node->buf_id = qbuf->buf_id;
734 710
735 DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__, 711 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
736 (int)m_node, qbuf->ops_id); 712 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
737 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
738 qbuf->prop_id, m_node->buf_id);
739 713
740 for_each_ipp_planar(i) { 714 for_each_ipp_planar(i) {
741 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__, 715 DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
742 i, qbuf->handle[i]);
743 716
744 /* get dma address by handle */ 717 /* get dma address by handle */
745 if (qbuf->handle[i]) { 718 if (qbuf->handle[i]) {
@@ -752,9 +725,8 @@ static struct drm_exynos_ipp_mem_node
752 725
753 buf_info.handles[i] = qbuf->handle[i]; 726 buf_info.handles[i] = qbuf->handle[i];
754 buf_info.base[i] = *(dma_addr_t *) addr; 727 buf_info.base[i] = *(dma_addr_t *) addr;
755 DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n", 728 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n",
756 __func__, i, buf_info.base[i], 729 i, buf_info.base[i], (int)buf_info.handles[i]);
757 (int)buf_info.handles[i]);
758 } 730 }
759 } 731 }
760 732
@@ -778,7 +750,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
778{ 750{
779 int i; 751 int i;
780 752
781 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node); 753 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
782 754
783 if (!m_node) { 755 if (!m_node) {
784 DRM_ERROR("invalid dequeue node.\n"); 756 DRM_ERROR("invalid dequeue node.\n");
@@ -792,7 +764,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
792 764
793 mutex_lock(&c_node->mem_lock); 765 mutex_lock(&c_node->mem_lock);
794 766
795 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id); 767 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
796 768
797 /* put gem buffer */ 769 /* put gem buffer */
798 for_each_ipp_planar(i) { 770 for_each_ipp_planar(i) {
@@ -824,8 +796,7 @@ static int ipp_get_event(struct drm_device *drm_dev,
824 struct drm_exynos_ipp_send_event *e; 796 struct drm_exynos_ipp_send_event *e;
825 unsigned long flags; 797 unsigned long flags;
826 798
827 DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__, 799 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
828 qbuf->ops_id, qbuf->buf_id);
829 800
830 e = kzalloc(sizeof(*e), GFP_KERNEL); 801 e = kzalloc(sizeof(*e), GFP_KERNEL);
831 802
@@ -857,16 +828,13 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
857 struct drm_exynos_ipp_send_event *e, *te; 828 struct drm_exynos_ipp_send_event *e, *te;
858 int count = 0; 829 int count = 0;
859 830
860 DRM_DEBUG_KMS("%s\n", __func__);
861
862 if (list_empty(&c_node->event_list)) { 831 if (list_empty(&c_node->event_list)) {
863 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__); 832 DRM_DEBUG_KMS("event_list is empty.\n");
864 return; 833 return;
865 } 834 }
866 835
867 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { 836 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
868 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n", 837 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
869 __func__, count++, (int)e);
870 838
871 /* 839 /*
872 * quf == NULL condition means all event deletion. 840 * quf == NULL condition means all event deletion.
@@ -912,8 +880,6 @@ static int ipp_queue_buf_with_run(struct device *dev,
912 struct exynos_drm_ipp_ops *ops; 880 struct exynos_drm_ipp_ops *ops;
913 int ret; 881 int ret;
914 882
915 DRM_DEBUG_KMS("%s\n", __func__);
916
917 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id); 883 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
918 if (IS_ERR(ippdrv)) { 884 if (IS_ERR(ippdrv)) {
919 DRM_ERROR("failed to get ipp driver.\n"); 885 DRM_ERROR("failed to get ipp driver.\n");
@@ -929,12 +895,12 @@ static int ipp_queue_buf_with_run(struct device *dev,
929 property = &c_node->property; 895 property = &c_node->property;
930 896
931 if (c_node->state != IPP_STATE_START) { 897 if (c_node->state != IPP_STATE_START) {
932 DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__); 898 DRM_DEBUG_KMS("bypass for invalid state.\n");
933 return 0; 899 return 0;
934 } 900 }
935 901
936 if (!ipp_check_mem_list(c_node)) { 902 if (!ipp_check_mem_list(c_node)) {
937 DRM_DEBUG_KMS("%s:empty memory.\n", __func__); 903 DRM_DEBUG_KMS("empty memory.\n");
938 return 0; 904 return 0;
939 } 905 }
940 906
@@ -964,8 +930,6 @@ static void ipp_clean_queue_buf(struct drm_device *drm_dev,
964{ 930{
965 struct drm_exynos_ipp_mem_node *m_node, *tm_node; 931 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
966 932
967 DRM_DEBUG_KMS("%s\n", __func__);
968
969 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) { 933 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
970 /* delete list */ 934 /* delete list */
971 list_for_each_entry_safe(m_node, tm_node, 935 list_for_each_entry_safe(m_node, tm_node,
@@ -989,8 +953,6 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
989 struct drm_exynos_ipp_mem_node *m_node; 953 struct drm_exynos_ipp_mem_node *m_node;
990 int ret; 954 int ret;
991 955
992 DRM_DEBUG_KMS("%s\n", __func__);
993
994 if (!qbuf) { 956 if (!qbuf) {
995 DRM_ERROR("invalid buf parameter.\n"); 957 DRM_ERROR("invalid buf parameter.\n");
996 return -EINVAL; 958 return -EINVAL;
@@ -1001,8 +963,8 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
1001 return -EINVAL; 963 return -EINVAL;
1002 } 964 }
1003 965
1004 DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n", 966 DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
1005 __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src", 967 qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
1006 qbuf->buf_id, qbuf->buf_type); 968 qbuf->buf_id, qbuf->buf_type);
1007 969
1008 /* find command node */ 970 /* find command node */
@@ -1075,8 +1037,6 @@ err_clean_node:
1075static bool exynos_drm_ipp_check_valid(struct device *dev, 1037static bool exynos_drm_ipp_check_valid(struct device *dev,
1076 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state) 1038 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1077{ 1039{
1078 DRM_DEBUG_KMS("%s\n", __func__);
1079
1080 if (ctrl != IPP_CTRL_PLAY) { 1040 if (ctrl != IPP_CTRL_PLAY) {
1081 if (pm_runtime_suspended(dev)) { 1041 if (pm_runtime_suspended(dev)) {
1082 DRM_ERROR("pm:runtime_suspended.\n"); 1042 DRM_ERROR("pm:runtime_suspended.\n");
@@ -1104,7 +1064,6 @@ static bool exynos_drm_ipp_check_valid(struct device *dev,
1104 default: 1064 default:
1105 DRM_ERROR("invalid state.\n"); 1065 DRM_ERROR("invalid state.\n");
1106 goto err_status; 1066 goto err_status;
1107 break;
1108 } 1067 }
1109 1068
1110 return true; 1069 return true;
@@ -1126,8 +1085,6 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1126 struct drm_exynos_ipp_cmd_work *cmd_work; 1085 struct drm_exynos_ipp_cmd_work *cmd_work;
1127 struct drm_exynos_ipp_cmd_node *c_node; 1086 struct drm_exynos_ipp_cmd_node *c_node;
1128 1087
1129 DRM_DEBUG_KMS("%s\n", __func__);
1130
1131 if (!ctx) { 1088 if (!ctx) {
1132 DRM_ERROR("invalid context.\n"); 1089 DRM_ERROR("invalid context.\n");
1133 return -EINVAL; 1090 return -EINVAL;
@@ -1138,7 +1095,7 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1138 return -EINVAL; 1095 return -EINVAL;
1139 } 1096 }
1140 1097
1141 DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__, 1098 DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
1142 cmd_ctrl->ctrl, cmd_ctrl->prop_id); 1099 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1143 1100
1144 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id); 1101 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
@@ -1213,7 +1170,7 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1213 return -EINVAL; 1170 return -EINVAL;
1214 } 1171 }
1215 1172
1216 DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__, 1173 DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
1217 cmd_ctrl->ctrl, cmd_ctrl->prop_id); 1174 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1218 1175
1219 return 0; 1176 return 0;
@@ -1249,7 +1206,7 @@ static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1249 return -EINVAL; 1206 return -EINVAL;
1250 } 1207 }
1251 1208
1252 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id); 1209 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1253 1210
1254 /* reset h/w block */ 1211 /* reset h/w block */
1255 if (ippdrv->reset && 1212 if (ippdrv->reset &&
@@ -1310,13 +1267,13 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1310 struct list_head *head; 1267 struct list_head *head;
1311 int ret, i; 1268 int ret, i;
1312 1269
1313 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id); 1270 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1314 1271
1315 /* store command info in ippdrv */ 1272 /* store command info in ippdrv */
1316 ippdrv->c_node = c_node; 1273 ippdrv->c_node = c_node;
1317 1274
1318 if (!ipp_check_mem_list(c_node)) { 1275 if (!ipp_check_mem_list(c_node)) {
1319 DRM_DEBUG_KMS("%s:empty memory.\n", __func__); 1276 DRM_DEBUG_KMS("empty memory.\n");
1320 return -ENOMEM; 1277 return -ENOMEM;
1321 } 1278 }
1322 1279
@@ -1343,8 +1300,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1343 return ret; 1300 return ret;
1344 } 1301 }
1345 1302
1346 DRM_DEBUG_KMS("%s:m_node[0x%x]\n", 1303 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
1347 __func__, (int)m_node);
1348 1304
1349 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1305 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1350 if (ret) { 1306 if (ret) {
@@ -1382,7 +1338,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1382 return -EINVAL; 1338 return -EINVAL;
1383 } 1339 }
1384 1340
1385 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd); 1341 DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
1386 1342
1387 /* start operations */ 1343 /* start operations */
1388 if (ippdrv->start) { 1344 if (ippdrv->start) {
@@ -1405,7 +1361,7 @@ static int ipp_stop_property(struct drm_device *drm_dev,
1405 struct list_head *head; 1361 struct list_head *head;
1406 int ret = 0, i; 1362 int ret = 0, i;
1407 1363
1408 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id); 1364 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1409 1365
1410 /* put event */ 1366 /* put event */
1411 ipp_put_event(c_node, NULL); 1367 ipp_put_event(c_node, NULL);
@@ -1418,8 +1374,7 @@ static int ipp_stop_property(struct drm_device *drm_dev,
1418 head = &c_node->mem_list[i]; 1374 head = &c_node->mem_list[i];
1419 1375
1420 if (list_empty(head)) { 1376 if (list_empty(head)) {
1421 DRM_DEBUG_KMS("%s:mem_list is empty.\n", 1377 DRM_DEBUG_KMS("mem_list is empty.\n");
1422 __func__);
1423 break; 1378 break;
1424 } 1379 }
1425 1380
@@ -1439,7 +1394,7 @@ static int ipp_stop_property(struct drm_device *drm_dev,
1439 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; 1394 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1440 1395
1441 if (list_empty(head)) { 1396 if (list_empty(head)) {
1442 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__); 1397 DRM_DEBUG_KMS("mem_list is empty.\n");
1443 break; 1398 break;
1444 } 1399 }
1445 1400
@@ -1456,7 +1411,7 @@ static int ipp_stop_property(struct drm_device *drm_dev,
1456 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1411 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1457 1412
1458 if (list_empty(head)) { 1413 if (list_empty(head)) {
1459 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__); 1414 DRM_DEBUG_KMS("mem_list is empty.\n");
1460 break; 1415 break;
1461 } 1416 }
1462 1417
@@ -1491,8 +1446,6 @@ void ipp_sched_cmd(struct work_struct *work)
1491 struct drm_exynos_ipp_property *property; 1446 struct drm_exynos_ipp_property *property;
1492 int ret; 1447 int ret;
1493 1448
1494 DRM_DEBUG_KMS("%s\n", __func__);
1495
1496 ippdrv = cmd_work->ippdrv; 1449 ippdrv = cmd_work->ippdrv;
1497 if (!ippdrv) { 1450 if (!ippdrv) {
1498 DRM_ERROR("invalid ippdrv list.\n"); 1451 DRM_ERROR("invalid ippdrv list.\n");
@@ -1550,7 +1503,7 @@ void ipp_sched_cmd(struct work_struct *work)
1550 break; 1503 break;
1551 } 1504 }
1552 1505
1553 DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl); 1506 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
1554 1507
1555err_unlock: 1508err_unlock:
1556 mutex_unlock(&c_node->cmd_lock); 1509 mutex_unlock(&c_node->cmd_lock);
@@ -1571,8 +1524,7 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1571 int ret, i; 1524 int ret, i;
1572 1525
1573 for_each_ipp_ops(i) 1526 for_each_ipp_ops(i)
1574 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__, 1527 DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
1575 i ? "dst" : "src", buf_id[i]);
1576 1528
1577 if (!drm_dev) { 1529 if (!drm_dev) {
1578 DRM_ERROR("failed to get drm_dev.\n"); 1530 DRM_ERROR("failed to get drm_dev.\n");
@@ -1585,12 +1537,12 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1585 } 1537 }
1586 1538
1587 if (list_empty(&c_node->event_list)) { 1539 if (list_empty(&c_node->event_list)) {
1588 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__); 1540 DRM_DEBUG_KMS("event list is empty.\n");
1589 return 0; 1541 return 0;
1590 } 1542 }
1591 1543
1592 if (!ipp_check_mem_list(c_node)) { 1544 if (!ipp_check_mem_list(c_node)) {
1593 DRM_DEBUG_KMS("%s:empty memory.\n", __func__); 1545 DRM_DEBUG_KMS("empty memory.\n");
1594 return 0; 1546 return 0;
1595 } 1547 }
1596 1548
@@ -1609,7 +1561,7 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1609 } 1561 }
1610 1562
1611 tbuf_id[i] = m_node->buf_id; 1563 tbuf_id[i] = m_node->buf_id;
1612 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__, 1564 DRM_DEBUG_KMS("%s buf_id[%d]\n",
1613 i ? "dst" : "src", tbuf_id[i]); 1565 i ? "dst" : "src", tbuf_id[i]);
1614 1566
1615 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1567 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
@@ -1677,8 +1629,7 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1677 } 1629 }
1678 1630
1679 do_gettimeofday(&now); 1631 do_gettimeofday(&now);
1680 DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n" 1632 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
1681 , __func__, now.tv_sec, now.tv_usec);
1682 e->event.tv_sec = now.tv_sec; 1633 e->event.tv_sec = now.tv_sec;
1683 e->event.tv_usec = now.tv_usec; 1634 e->event.tv_usec = now.tv_usec;
1684 e->event.prop_id = property->prop_id; 1635 e->event.prop_id = property->prop_id;
@@ -1692,7 +1643,7 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1692 wake_up_interruptible(&e->base.file_priv->event_wait); 1643 wake_up_interruptible(&e->base.file_priv->event_wait);
1693 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 1644 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1694 1645
1695 DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__, 1646 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
1696 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]); 1647 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1697 1648
1698 return 0; 1649 return 0;
@@ -1711,8 +1662,7 @@ void ipp_sched_event(struct work_struct *work)
1711 return; 1662 return;
1712 } 1663 }
1713 1664
1714 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, 1665 DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1715 event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1716 1666
1717 ippdrv = event_work->ippdrv; 1667 ippdrv = event_work->ippdrv;
1718 if (!ippdrv) { 1668 if (!ippdrv) {
@@ -1733,8 +1683,8 @@ void ipp_sched_event(struct work_struct *work)
1733 * or going out operations. 1683 * or going out operations.
1734 */ 1684 */
1735 if (c_node->state != IPP_STATE_START) { 1685 if (c_node->state != IPP_STATE_START) {
1736 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n", 1686 DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1737 __func__, c_node->state, c_node->property.prop_id); 1687 c_node->state, c_node->property.prop_id);
1738 goto err_completion; 1688 goto err_completion;
1739 } 1689 }
1740 1690
@@ -1759,8 +1709,6 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1759 struct exynos_drm_ippdrv *ippdrv; 1709 struct exynos_drm_ippdrv *ippdrv;
1760 int ret, count = 0; 1710 int ret, count = 0;
1761 1711
1762 DRM_DEBUG_KMS("%s\n", __func__);
1763
1764 /* get ipp driver entry */ 1712 /* get ipp driver entry */
1765 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1713 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1766 ippdrv->drm_dev = drm_dev; 1714 ippdrv->drm_dev = drm_dev;
@@ -1772,7 +1720,7 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1772 goto err_idr; 1720 goto err_idr;
1773 } 1721 }
1774 1722
1775 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__, 1723 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
1776 count++, (int)ippdrv, ippdrv->ipp_id); 1724 count++, (int)ippdrv, ippdrv->ipp_id);
1777 1725
1778 if (ippdrv->ipp_id == 0) { 1726 if (ippdrv->ipp_id == 0) {
@@ -1816,8 +1764,6 @@ static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1816{ 1764{
1817 struct exynos_drm_ippdrv *ippdrv; 1765 struct exynos_drm_ippdrv *ippdrv;
1818 1766
1819 DRM_DEBUG_KMS("%s\n", __func__);
1820
1821 /* get ipp driver entry */ 1767 /* get ipp driver entry */
1822 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1768 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1823 if (is_drm_iommu_supported(drm_dev)) 1769 if (is_drm_iommu_supported(drm_dev))
@@ -1834,8 +1780,6 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1834 struct drm_exynos_file_private *file_priv = file->driver_priv; 1780 struct drm_exynos_file_private *file_priv = file->driver_priv;
1835 struct exynos_drm_ipp_private *priv; 1781 struct exynos_drm_ipp_private *priv;
1836 1782
1837 DRM_DEBUG_KMS("%s\n", __func__);
1838
1839 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1783 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1840 if (!priv) { 1784 if (!priv) {
1841 DRM_ERROR("failed to allocate priv.\n"); 1785 DRM_ERROR("failed to allocate priv.\n");
@@ -1846,7 +1790,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1846 1790
1847 INIT_LIST_HEAD(&priv->event_list); 1791 INIT_LIST_HEAD(&priv->event_list);
1848 1792
1849 DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv); 1793 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv);
1850 1794
1851 return 0; 1795 return 0;
1852} 1796}
@@ -1860,10 +1804,10 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1860 struct drm_exynos_ipp_cmd_node *c_node, *tc_node; 1804 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1861 int count = 0; 1805 int count = 0;
1862 1806
1863 DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv); 1807 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
1864 1808
1865 if (list_empty(&exynos_drm_ippdrv_list)) { 1809 if (list_empty(&exynos_drm_ippdrv_list)) {
1866 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__); 1810 DRM_DEBUG_KMS("ippdrv_list is empty.\n");
1867 goto err_clear; 1811 goto err_clear;
1868 } 1812 }
1869 1813
@@ -1873,8 +1817,8 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1873 1817
1874 list_for_each_entry_safe(c_node, tc_node, 1818 list_for_each_entry_safe(c_node, tc_node,
1875 &ippdrv->cmd_list, list) { 1819 &ippdrv->cmd_list, list) {
1876 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", 1820 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1877 __func__, count++, (int)ippdrv); 1821 count++, (int)ippdrv);
1878 1822
1879 if (c_node->priv == priv) { 1823 if (c_node->priv == priv) {
1880 /* 1824 /*
@@ -1913,8 +1857,6 @@ static int ipp_probe(struct platform_device *pdev)
1913 if (!ctx) 1857 if (!ctx)
1914 return -ENOMEM; 1858 return -ENOMEM;
1915 1859
1916 DRM_DEBUG_KMS("%s\n", __func__);
1917
1918 mutex_init(&ctx->ipp_lock); 1860 mutex_init(&ctx->ipp_lock);
1919 mutex_init(&ctx->prop_lock); 1861 mutex_init(&ctx->prop_lock);
1920 1862
@@ -1978,8 +1920,6 @@ static int ipp_remove(struct platform_device *pdev)
1978{ 1920{
1979 struct ipp_context *ctx = platform_get_drvdata(pdev); 1921 struct ipp_context *ctx = platform_get_drvdata(pdev);
1980 1922
1981 DRM_DEBUG_KMS("%s\n", __func__);
1982
1983 /* unregister sub driver */ 1923 /* unregister sub driver */
1984 exynos_drm_subdrv_unregister(&ctx->subdrv); 1924 exynos_drm_subdrv_unregister(&ctx->subdrv);
1985 1925
@@ -1999,7 +1939,7 @@ static int ipp_remove(struct platform_device *pdev)
1999 1939
2000static int ipp_power_ctrl(struct ipp_context *ctx, bool enable) 1940static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
2001{ 1941{
2002 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable); 1942 DRM_DEBUG_KMS("enable[%d]\n", enable);
2003 1943
2004 return 0; 1944 return 0;
2005} 1945}
@@ -2009,8 +1949,6 @@ static int ipp_suspend(struct device *dev)
2009{ 1949{
2010 struct ipp_context *ctx = get_ipp_context(dev); 1950 struct ipp_context *ctx = get_ipp_context(dev);
2011 1951
2012 DRM_DEBUG_KMS("%s\n", __func__);
2013
2014 if (pm_runtime_suspended(dev)) 1952 if (pm_runtime_suspended(dev))
2015 return 0; 1953 return 0;
2016 1954
@@ -2021,8 +1959,6 @@ static int ipp_resume(struct device *dev)
2021{ 1959{
2022 struct ipp_context *ctx = get_ipp_context(dev); 1960 struct ipp_context *ctx = get_ipp_context(dev);
2023 1961
2024 DRM_DEBUG_KMS("%s\n", __func__);
2025
2026 if (!pm_runtime_suspended(dev)) 1962 if (!pm_runtime_suspended(dev))
2027 return ipp_power_ctrl(ctx, true); 1963 return ipp_power_ctrl(ctx, true);
2028 1964
@@ -2035,8 +1971,6 @@ static int ipp_runtime_suspend(struct device *dev)
2035{ 1971{
2036 struct ipp_context *ctx = get_ipp_context(dev); 1972 struct ipp_context *ctx = get_ipp_context(dev);
2037 1973
2038 DRM_DEBUG_KMS("%s\n", __func__);
2039
2040 return ipp_power_ctrl(ctx, false); 1974 return ipp_power_ctrl(ctx, false);
2041} 1975}
2042 1976
@@ -2044,8 +1978,6 @@ static int ipp_runtime_resume(struct device *dev)
2044{ 1978{
2045 struct ipp_context *ctx = get_ipp_context(dev); 1979 struct ipp_context *ctx = get_ipp_context(dev);
2046 1980
2047 DRM_DEBUG_KMS("%s\n", __func__);
2048
2049 return ipp_power_ctrl(ctx, true); 1981 return ipp_power_ctrl(ctx, true);
2050} 1982}
2051#endif 1983#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 83efc662d65a..6ee55e68e0a2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -81,8 +81,6 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
81 int nr; 81 int nr;
82 int i; 82 int i;
83 83
84 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
85
86 nr = exynos_drm_fb_get_buf_cnt(fb); 84 nr = exynos_drm_fb_get_buf_cnt(fb);
87 for (i = 0; i < nr; i++) { 85 for (i = 0; i < nr; i++) {
88 struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i); 86 struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i);
@@ -159,8 +157,6 @@ void exynos_plane_dpms(struct drm_plane *plane, int mode)
159 struct exynos_plane *exynos_plane = to_exynos_plane(plane); 157 struct exynos_plane *exynos_plane = to_exynos_plane(plane);
160 struct exynos_drm_overlay *overlay = &exynos_plane->overlay; 158 struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
161 159
162 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
163
164 if (mode == DRM_MODE_DPMS_ON) { 160 if (mode == DRM_MODE_DPMS_ON) {
165 if (exynos_plane->enabled) 161 if (exynos_plane->enabled)
166 return; 162 return;
@@ -189,8 +185,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
189{ 185{
190 int ret; 186 int ret;
191 187
192 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
193
194 ret = exynos_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y, 188 ret = exynos_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y,
195 crtc_w, crtc_h, src_x >> 16, src_y >> 16, 189 crtc_w, crtc_h, src_x >> 16, src_y >> 16,
196 src_w >> 16, src_h >> 16); 190 src_w >> 16, src_h >> 16);
@@ -207,8 +201,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
207 201
208static int exynos_disable_plane(struct drm_plane *plane) 202static int exynos_disable_plane(struct drm_plane *plane)
209{ 203{
210 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
211
212 exynos_plane_dpms(plane, DRM_MODE_DPMS_OFF); 204 exynos_plane_dpms(plane, DRM_MODE_DPMS_OFF);
213 205
214 return 0; 206 return 0;
@@ -218,8 +210,6 @@ static void exynos_plane_destroy(struct drm_plane *plane)
218{ 210{
219 struct exynos_plane *exynos_plane = to_exynos_plane(plane); 211 struct exynos_plane *exynos_plane = to_exynos_plane(plane);
220 212
221 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
222
223 exynos_disable_plane(plane); 213 exynos_disable_plane(plane);
224 drm_plane_cleanup(plane); 214 drm_plane_cleanup(plane);
225 kfree(exynos_plane); 215 kfree(exynos_plane);
@@ -233,8 +223,6 @@ static int exynos_plane_set_property(struct drm_plane *plane,
233 struct exynos_plane *exynos_plane = to_exynos_plane(plane); 223 struct exynos_plane *exynos_plane = to_exynos_plane(plane);
234 struct exynos_drm_private *dev_priv = dev->dev_private; 224 struct exynos_drm_private *dev_priv = dev->dev_private;
235 225
236 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
237
238 if (property == dev_priv->plane_zpos_property) { 226 if (property == dev_priv->plane_zpos_property) {
239 exynos_plane->overlay.zpos = val; 227 exynos_plane->overlay.zpos = val;
240 return 0; 228 return 0;
@@ -256,8 +244,6 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
256 struct exynos_drm_private *dev_priv = dev->dev_private; 244 struct exynos_drm_private *dev_priv = dev->dev_private;
257 struct drm_property *prop; 245 struct drm_property *prop;
258 246
259 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
260
261 prop = dev_priv->plane_zpos_property; 247 prop = dev_priv->plane_zpos_property;
262 if (!prop) { 248 if (!prop) {
263 prop = drm_property_create_range(dev, 0, "zpos", 0, 249 prop = drm_property_create_range(dev, 0, "zpos", 0,
@@ -277,8 +263,6 @@ struct drm_plane *exynos_plane_init(struct drm_device *dev,
277 struct exynos_plane *exynos_plane; 263 struct exynos_plane *exynos_plane;
278 int err; 264 int err;
279 265
280 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
281
282 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL); 266 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
283 if (!exynos_plane) { 267 if (!exynos_plane) {
284 DRM_ERROR("failed to allocate plane\n"); 268 DRM_ERROR("failed to allocate plane\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 9b6c70964d71..427640aa5148 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -244,7 +244,7 @@ static int rotator_src_set_size(struct device *dev, int swap,
244 /* Get format */ 244 /* Get format */
245 fmt = rotator_reg_get_fmt(rot); 245 fmt = rotator_reg_get_fmt(rot);
246 if (!rotator_check_reg_fmt(fmt)) { 246 if (!rotator_check_reg_fmt(fmt)) {
247 DRM_ERROR("%s:invalid format.\n", __func__); 247 DRM_ERROR("invalid format.\n");
248 return -EINVAL; 248 return -EINVAL;
249 } 249 }
250 250
@@ -287,7 +287,7 @@ static int rotator_src_set_addr(struct device *dev,
287 /* Get format */ 287 /* Get format */
288 fmt = rotator_reg_get_fmt(rot); 288 fmt = rotator_reg_get_fmt(rot);
289 if (!rotator_check_reg_fmt(fmt)) { 289 if (!rotator_check_reg_fmt(fmt)) {
290 DRM_ERROR("%s:invalid format.\n", __func__); 290 DRM_ERROR("invalid format.\n");
291 return -EINVAL; 291 return -EINVAL;
292 } 292 }
293 293
@@ -381,7 +381,7 @@ static int rotator_dst_set_size(struct device *dev, int swap,
381 /* Get format */ 381 /* Get format */
382 fmt = rotator_reg_get_fmt(rot); 382 fmt = rotator_reg_get_fmt(rot);
383 if (!rotator_check_reg_fmt(fmt)) { 383 if (!rotator_check_reg_fmt(fmt)) {
384 DRM_ERROR("%s:invalid format.\n", __func__); 384 DRM_ERROR("invalid format.\n");
385 return -EINVAL; 385 return -EINVAL;
386 } 386 }
387 387
@@ -422,7 +422,7 @@ static int rotator_dst_set_addr(struct device *dev,
422 /* Get format */ 422 /* Get format */
423 fmt = rotator_reg_get_fmt(rot); 423 fmt = rotator_reg_get_fmt(rot);
424 if (!rotator_check_reg_fmt(fmt)) { 424 if (!rotator_check_reg_fmt(fmt)) {
425 DRM_ERROR("%s:invalid format.\n", __func__); 425 DRM_ERROR("invalid format.\n");
426 return -EINVAL; 426 return -EINVAL;
427 } 427 }
428 428
@@ -471,8 +471,6 @@ static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
471{ 471{
472 struct drm_exynos_ipp_prop_list *prop_list; 472 struct drm_exynos_ipp_prop_list *prop_list;
473 473
474 DRM_DEBUG_KMS("%s\n", __func__);
475
476 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); 474 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
477 if (!prop_list) { 475 if (!prop_list) {
478 DRM_ERROR("failed to alloc property list.\n"); 476 DRM_ERROR("failed to alloc property list.\n");
@@ -502,7 +500,7 @@ static inline bool rotator_check_drm_fmt(u32 fmt)
502 case DRM_FORMAT_NV12: 500 case DRM_FORMAT_NV12:
503 return true; 501 return true;
504 default: 502 default:
505 DRM_DEBUG_KMS("%s:not support format\n", __func__); 503 DRM_DEBUG_KMS("not support format\n");
506 return false; 504 return false;
507 } 505 }
508} 506}
@@ -516,7 +514,7 @@ static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
516 case EXYNOS_DRM_FLIP_BOTH: 514 case EXYNOS_DRM_FLIP_BOTH:
517 return true; 515 return true;
518 default: 516 default:
519 DRM_DEBUG_KMS("%s:invalid flip\n", __func__); 517 DRM_DEBUG_KMS("invalid flip\n");
520 return false; 518 return false;
521 } 519 }
522} 520}
@@ -536,19 +534,18 @@ static int rotator_ippdrv_check_property(struct device *dev,
536 534
537 /* Check format configuration */ 535 /* Check format configuration */
538 if (src_config->fmt != dst_config->fmt) { 536 if (src_config->fmt != dst_config->fmt) {
539 DRM_DEBUG_KMS("%s:not support csc feature\n", __func__); 537 DRM_DEBUG_KMS("not support csc feature\n");
540 return -EINVAL; 538 return -EINVAL;
541 } 539 }
542 540
543 if (!rotator_check_drm_fmt(dst_config->fmt)) { 541 if (!rotator_check_drm_fmt(dst_config->fmt)) {
544 DRM_DEBUG_KMS("%s:invalid format\n", __func__); 542 DRM_DEBUG_KMS("invalid format\n");
545 return -EINVAL; 543 return -EINVAL;
546 } 544 }
547 545
548 /* Check transform configuration */ 546 /* Check transform configuration */
549 if (src_config->degree != EXYNOS_DRM_DEGREE_0) { 547 if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
550 DRM_DEBUG_KMS("%s:not support source-side rotation\n", 548 DRM_DEBUG_KMS("not support source-side rotation\n");
551 __func__);
552 return -EINVAL; 549 return -EINVAL;
553 } 550 }
554 551
@@ -561,51 +558,47 @@ static int rotator_ippdrv_check_property(struct device *dev,
561 /* No problem */ 558 /* No problem */
562 break; 559 break;
563 default: 560 default:
564 DRM_DEBUG_KMS("%s:invalid degree\n", __func__); 561 DRM_DEBUG_KMS("invalid degree\n");
565 return -EINVAL; 562 return -EINVAL;
566 } 563 }
567 564
568 if (src_config->flip != EXYNOS_DRM_FLIP_NONE) { 565 if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
569 DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__); 566 DRM_DEBUG_KMS("not support source-side flip\n");
570 return -EINVAL; 567 return -EINVAL;
571 } 568 }
572 569
573 if (!rotator_check_drm_flip(dst_config->flip)) { 570 if (!rotator_check_drm_flip(dst_config->flip)) {
574 DRM_DEBUG_KMS("%s:invalid flip\n", __func__); 571 DRM_DEBUG_KMS("invalid flip\n");
575 return -EINVAL; 572 return -EINVAL;
576 } 573 }
577 574
578 /* Check size configuration */ 575 /* Check size configuration */
579 if ((src_pos->x + src_pos->w > src_sz->hsize) || 576 if ((src_pos->x + src_pos->w > src_sz->hsize) ||
580 (src_pos->y + src_pos->h > src_sz->vsize)) { 577 (src_pos->y + src_pos->h > src_sz->vsize)) {
581 DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__); 578 DRM_DEBUG_KMS("out of source buffer bound\n");
582 return -EINVAL; 579 return -EINVAL;
583 } 580 }
584 581
585 if (swap) { 582 if (swap) {
586 if ((dst_pos->x + dst_pos->h > dst_sz->vsize) || 583 if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
587 (dst_pos->y + dst_pos->w > dst_sz->hsize)) { 584 (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
588 DRM_DEBUG_KMS("%s:out of destination buffer bound\n", 585 DRM_DEBUG_KMS("out of destination buffer bound\n");
589 __func__);
590 return -EINVAL; 586 return -EINVAL;
591 } 587 }
592 588
593 if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) { 589 if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
594 DRM_DEBUG_KMS("%s:not support scale feature\n", 590 DRM_DEBUG_KMS("not support scale feature\n");
595 __func__);
596 return -EINVAL; 591 return -EINVAL;
597 } 592 }
598 } else { 593 } else {
599 if ((dst_pos->x + dst_pos->w > dst_sz->hsize) || 594 if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
600 (dst_pos->y + dst_pos->h > dst_sz->vsize)) { 595 (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
601 DRM_DEBUG_KMS("%s:out of destination buffer bound\n", 596 DRM_DEBUG_KMS("out of destination buffer bound\n");
602 __func__);
603 return -EINVAL; 597 return -EINVAL;
604 } 598 }
605 599
606 if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) { 600 if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
607 DRM_DEBUG_KMS("%s:not support scale feature\n", 601 DRM_DEBUG_KMS("not support scale feature\n");
608 __func__);
609 return -EINVAL; 602 return -EINVAL;
610 } 603 }
611 } 604 }
@@ -693,7 +686,7 @@ static int rotator_probe(struct platform_device *pdev)
693 goto err_ippdrv_register; 686 goto err_ippdrv_register;
694 } 687 }
695 688
696 DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv); 689 DRM_DEBUG_KMS("ippdrv[0x%x]\n", (int)ippdrv);
697 690
698 platform_set_drvdata(pdev, rot); 691 platform_set_drvdata(pdev, rot);
699 692
@@ -752,8 +745,6 @@ static struct platform_device_id rotator_driver_ids[] = {
752 745
753static int rotator_clk_crtl(struct rot_context *rot, bool enable) 746static int rotator_clk_crtl(struct rot_context *rot, bool enable)
754{ 747{
755 DRM_DEBUG_KMS("%s\n", __func__);
756
757 if (enable) { 748 if (enable) {
758 clk_enable(rot->clock); 749 clk_enable(rot->clock);
759 rot->suspended = false; 750 rot->suspended = false;
@@ -771,8 +762,6 @@ static int rotator_suspend(struct device *dev)
771{ 762{
772 struct rot_context *rot = dev_get_drvdata(dev); 763 struct rot_context *rot = dev_get_drvdata(dev);
773 764
774 DRM_DEBUG_KMS("%s\n", __func__);
775
776 if (pm_runtime_suspended(dev)) 765 if (pm_runtime_suspended(dev))
777 return 0; 766 return 0;
778 767
@@ -783,8 +772,6 @@ static int rotator_resume(struct device *dev)
783{ 772{
784 struct rot_context *rot = dev_get_drvdata(dev); 773 struct rot_context *rot = dev_get_drvdata(dev);
785 774
786 DRM_DEBUG_KMS("%s\n", __func__);
787
788 if (!pm_runtime_suspended(dev)) 775 if (!pm_runtime_suspended(dev))
789 return rotator_clk_crtl(rot, true); 776 return rotator_clk_crtl(rot, true);
790 777
@@ -797,8 +784,6 @@ static int rotator_runtime_suspend(struct device *dev)
797{ 784{
798 struct rot_context *rot = dev_get_drvdata(dev); 785 struct rot_context *rot = dev_get_drvdata(dev);
799 786
800 DRM_DEBUG_KMS("%s\n", __func__);
801
802 return rotator_clk_crtl(rot, false); 787 return rotator_clk_crtl(rot, false);
803} 788}
804 789
@@ -806,8 +791,6 @@ static int rotator_runtime_resume(struct device *dev)
806{ 791{
807 struct rot_context *rot = dev_get_drvdata(dev); 792 struct rot_context *rot = dev_get_drvdata(dev);
808 793
809 DRM_DEBUG_KMS("%s\n", __func__);
810
811 return rotator_clk_crtl(rot, true); 794 return rotator_clk_crtl(rot, true);
812} 795}
813#endif 796#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 24376c194a5e..784bbce0741a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -89,8 +89,6 @@ static bool vidi_display_is_connected(struct device *dev)
89{ 89{
90 struct vidi_context *ctx = get_vidi_context(dev); 90 struct vidi_context *ctx = get_vidi_context(dev);
91 91
92 DRM_DEBUG_KMS("%s\n", __FILE__);
93
94 /* 92 /*
95 * connection request would come from user side 93 * connection request would come from user side
96 * to do hotplug through specific ioctl. 94 * to do hotplug through specific ioctl.
@@ -105,8 +103,6 @@ static struct edid *vidi_get_edid(struct device *dev,
105 struct edid *edid; 103 struct edid *edid;
106 int edid_len; 104 int edid_len;
107 105
108 DRM_DEBUG_KMS("%s\n", __FILE__);
109
110 /* 106 /*
111 * the edid data comes from user side and it would be set 107 * the edid data comes from user side and it would be set
112 * to ctx->raw_edid through specific ioctl. 108 * to ctx->raw_edid through specific ioctl.
@@ -128,17 +124,13 @@ static struct edid *vidi_get_edid(struct device *dev,
128 124
129static void *vidi_get_panel(struct device *dev) 125static void *vidi_get_panel(struct device *dev)
130{ 126{
131 DRM_DEBUG_KMS("%s\n", __FILE__);
132
133 /* TODO. */ 127 /* TODO. */
134 128
135 return NULL; 129 return NULL;
136} 130}
137 131
138static int vidi_check_timing(struct device *dev, void *timing) 132static int vidi_check_mode(struct device *dev, struct drm_display_mode *mode)
139{ 133{
140 DRM_DEBUG_KMS("%s\n", __FILE__);
141
142 /* TODO. */ 134 /* TODO. */
143 135
144 return 0; 136 return 0;
@@ -146,8 +138,6 @@ static int vidi_check_timing(struct device *dev, void *timing)
146 138
147static int vidi_display_power_on(struct device *dev, int mode) 139static int vidi_display_power_on(struct device *dev, int mode)
148{ 140{
149 DRM_DEBUG_KMS("%s\n", __FILE__);
150
151 /* TODO */ 141 /* TODO */
152 142
153 return 0; 143 return 0;
@@ -158,7 +148,7 @@ static struct exynos_drm_display_ops vidi_display_ops = {
158 .is_connected = vidi_display_is_connected, 148 .is_connected = vidi_display_is_connected,
159 .get_edid = vidi_get_edid, 149 .get_edid = vidi_get_edid,
160 .get_panel = vidi_get_panel, 150 .get_panel = vidi_get_panel,
161 .check_timing = vidi_check_timing, 151 .check_mode = vidi_check_mode,
162 .power_on = vidi_display_power_on, 152 .power_on = vidi_display_power_on,
163}; 153};
164 154
@@ -166,7 +156,7 @@ static void vidi_dpms(struct device *subdrv_dev, int mode)
166{ 156{
167 struct vidi_context *ctx = get_vidi_context(subdrv_dev); 157 struct vidi_context *ctx = get_vidi_context(subdrv_dev);
168 158
169 DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode); 159 DRM_DEBUG_KMS("%d\n", mode);
170 160
171 mutex_lock(&ctx->lock); 161 mutex_lock(&ctx->lock);
172 162
@@ -196,8 +186,6 @@ static void vidi_apply(struct device *subdrv_dev)
196 struct vidi_win_data *win_data; 186 struct vidi_win_data *win_data;
197 int i; 187 int i;
198 188
199 DRM_DEBUG_KMS("%s\n", __FILE__);
200
201 for (i = 0; i < WINDOWS_NR; i++) { 189 for (i = 0; i < WINDOWS_NR; i++) {
202 win_data = &ctx->win_data[i]; 190 win_data = &ctx->win_data[i];
203 if (win_data->enabled && (ovl_ops && ovl_ops->commit)) 191 if (win_data->enabled && (ovl_ops && ovl_ops->commit))
@@ -212,8 +200,6 @@ static void vidi_commit(struct device *dev)
212{ 200{
213 struct vidi_context *ctx = get_vidi_context(dev); 201 struct vidi_context *ctx = get_vidi_context(dev);
214 202
215 DRM_DEBUG_KMS("%s\n", __FILE__);
216
217 if (ctx->suspended) 203 if (ctx->suspended)
218 return; 204 return;
219} 205}
@@ -222,8 +208,6 @@ static int vidi_enable_vblank(struct device *dev)
222{ 208{
223 struct vidi_context *ctx = get_vidi_context(dev); 209 struct vidi_context *ctx = get_vidi_context(dev);
224 210
225 DRM_DEBUG_KMS("%s\n", __FILE__);
226
227 if (ctx->suspended) 211 if (ctx->suspended)
228 return -EPERM; 212 return -EPERM;
229 213
@@ -246,8 +230,6 @@ static void vidi_disable_vblank(struct device *dev)
246{ 230{
247 struct vidi_context *ctx = get_vidi_context(dev); 231 struct vidi_context *ctx = get_vidi_context(dev);
248 232
249 DRM_DEBUG_KMS("%s\n", __FILE__);
250
251 if (ctx->suspended) 233 if (ctx->suspended)
252 return; 234 return;
253 235
@@ -271,8 +253,6 @@ static void vidi_win_mode_set(struct device *dev,
271 int win; 253 int win;
272 unsigned long offset; 254 unsigned long offset;
273 255
274 DRM_DEBUG_KMS("%s\n", __FILE__);
275
276 if (!overlay) { 256 if (!overlay) {
277 dev_err(dev, "overlay is NULL\n"); 257 dev_err(dev, "overlay is NULL\n");
278 return; 258 return;
@@ -282,7 +262,7 @@ static void vidi_win_mode_set(struct device *dev,
282 if (win == DEFAULT_ZPOS) 262 if (win == DEFAULT_ZPOS)
283 win = ctx->default_win; 263 win = ctx->default_win;
284 264
285 if (win < 0 || win > WINDOWS_NR) 265 if (win < 0 || win >= WINDOWS_NR)
286 return; 266 return;
287 267
288 offset = overlay->fb_x * (overlay->bpp >> 3); 268 offset = overlay->fb_x * (overlay->bpp >> 3);
@@ -324,15 +304,13 @@ static void vidi_win_commit(struct device *dev, int zpos)
324 struct vidi_win_data *win_data; 304 struct vidi_win_data *win_data;
325 int win = zpos; 305 int win = zpos;
326 306
327 DRM_DEBUG_KMS("%s\n", __FILE__);
328
329 if (ctx->suspended) 307 if (ctx->suspended)
330 return; 308 return;
331 309
332 if (win == DEFAULT_ZPOS) 310 if (win == DEFAULT_ZPOS)
333 win = ctx->default_win; 311 win = ctx->default_win;
334 312
335 if (win < 0 || win > WINDOWS_NR) 313 if (win < 0 || win >= WINDOWS_NR)
336 return; 314 return;
337 315
338 win_data = &ctx->win_data[win]; 316 win_data = &ctx->win_data[win];
@@ -351,12 +329,10 @@ static void vidi_win_disable(struct device *dev, int zpos)
351 struct vidi_win_data *win_data; 329 struct vidi_win_data *win_data;
352 int win = zpos; 330 int win = zpos;
353 331
354 DRM_DEBUG_KMS("%s\n", __FILE__);
355
356 if (win == DEFAULT_ZPOS) 332 if (win == DEFAULT_ZPOS)
357 win = ctx->default_win; 333 win = ctx->default_win;
358 334
359 if (win < 0 || win > WINDOWS_NR) 335 if (win < 0 || win >= WINDOWS_NR)
360 return; 336 return;
361 337
362 win_data = &ctx->win_data[win]; 338 win_data = &ctx->win_data[win];
@@ -407,8 +383,6 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
407 383
408static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 384static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
409{ 385{
410 DRM_DEBUG_KMS("%s\n", __FILE__);
411
412 /* 386 /*
413 * enable drm irq mode. 387 * enable drm irq mode.
414 * - with irq_enabled = 1, we can use the vblank feature. 388 * - with irq_enabled = 1, we can use the vblank feature.
@@ -431,8 +405,6 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
431 405
432static void vidi_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 406static void vidi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
433{ 407{
434 DRM_DEBUG_KMS("%s\n", __FILE__);
435
436 /* TODO. */ 408 /* TODO. */
437} 409}
438 410
@@ -441,8 +413,6 @@ static int vidi_power_on(struct vidi_context *ctx, bool enable)
441 struct exynos_drm_subdrv *subdrv = &ctx->subdrv; 413 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
442 struct device *dev = subdrv->dev; 414 struct device *dev = subdrv->dev;
443 415
444 DRM_DEBUG_KMS("%s\n", __FILE__);
445
446 if (enable != false && enable != true) 416 if (enable != false && enable != true)
447 return -EINVAL; 417 return -EINVAL;
448 418
@@ -483,8 +453,6 @@ static int vidi_store_connection(struct device *dev,
483 struct vidi_context *ctx = get_vidi_context(dev); 453 struct vidi_context *ctx = get_vidi_context(dev);
484 int ret; 454 int ret;
485 455
486 DRM_DEBUG_KMS("%s\n", __FILE__);
487
488 ret = kstrtoint(buf, 0, &ctx->connected); 456 ret = kstrtoint(buf, 0, &ctx->connected);
489 if (ret) 457 if (ret)
490 return ret; 458 return ret;
@@ -522,8 +490,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
522 struct drm_exynos_vidi_connection *vidi = data; 490 struct drm_exynos_vidi_connection *vidi = data;
523 int edid_len; 491 int edid_len;
524 492
525 DRM_DEBUG_KMS("%s\n", __FILE__);
526
527 if (!vidi) { 493 if (!vidi) {
528 DRM_DEBUG_KMS("user data for vidi is null.\n"); 494 DRM_DEBUG_KMS("user data for vidi is null.\n");
529 return -EINVAL; 495 return -EINVAL;
@@ -592,8 +558,6 @@ static int vidi_probe(struct platform_device *pdev)
592 struct exynos_drm_subdrv *subdrv; 558 struct exynos_drm_subdrv *subdrv;
593 int ret; 559 int ret;
594 560
595 DRM_DEBUG_KMS("%s\n", __FILE__);
596
597 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 561 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
598 if (!ctx) 562 if (!ctx)
599 return -ENOMEM; 563 return -ENOMEM;
@@ -625,8 +589,6 @@ static int vidi_remove(struct platform_device *pdev)
625{ 589{
626 struct vidi_context *ctx = platform_get_drvdata(pdev); 590 struct vidi_context *ctx = platform_get_drvdata(pdev);
627 591
628 DRM_DEBUG_KMS("%s\n", __FILE__);
629
630 exynos_drm_subdrv_unregister(&ctx->subdrv); 592 exynos_drm_subdrv_unregister(&ctx->subdrv);
631 593
632 if (ctx->raw_edid != (struct edid *)fake_edid_info) { 594 if (ctx->raw_edid != (struct edid *)fake_edid_info) {
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index fd1426dca882..62ef5971ac3c 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -83,6 +83,7 @@ struct hdmi_resources {
83 struct clk *sclk_pixel; 83 struct clk *sclk_pixel;
84 struct clk *sclk_hdmiphy; 84 struct clk *sclk_hdmiphy;
85 struct clk *hdmiphy; 85 struct clk *hdmiphy;
86 struct clk *mout_hdmi;
86 struct regulator_bulk_data *regul_bulk; 87 struct regulator_bulk_data *regul_bulk;
87 int regul_count; 88 int regul_count;
88}; 89};
@@ -689,8 +690,6 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
689 u32 mod; 690 u32 mod;
690 u32 vic; 691 u32 vic;
691 692
692 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
693
694 mod = hdmi_reg_read(hdata, HDMI_MODE_SEL); 693 mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
695 if (hdata->dvi_mode) { 694 if (hdata->dvi_mode) {
696 hdmi_reg_writeb(hdata, HDMI_VSI_CON, 695 hdmi_reg_writeb(hdata, HDMI_VSI_CON,
@@ -755,8 +754,6 @@ static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector)
755 struct edid *raw_edid; 754 struct edid *raw_edid;
756 struct hdmi_context *hdata = ctx; 755 struct hdmi_context *hdata = ctx;
757 756
758 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
759
760 if (!hdata->ddc_port) 757 if (!hdata->ddc_port)
761 return ERR_PTR(-ENODEV); 758 return ERR_PTR(-ENODEV);
762 759
@@ -777,8 +774,6 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
777 const struct hdmiphy_config *confs; 774 const struct hdmiphy_config *confs;
778 int count, i; 775 int count, i;
779 776
780 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
781
782 if (hdata->type == HDMI_TYPE13) { 777 if (hdata->type == HDMI_TYPE13) {
783 confs = hdmiphy_v13_configs; 778 confs = hdmiphy_v13_configs;
784 count = ARRAY_SIZE(hdmiphy_v13_configs); 779 count = ARRAY_SIZE(hdmiphy_v13_configs);
@@ -796,18 +791,17 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
796 return -EINVAL; 791 return -EINVAL;
797} 792}
798 793
799static int hdmi_check_timing(void *ctx, struct fb_videomode *timing) 794static int hdmi_check_mode(void *ctx, struct drm_display_mode *mode)
800{ 795{
801 struct hdmi_context *hdata = ctx; 796 struct hdmi_context *hdata = ctx;
802 int ret; 797 int ret;
803 798
804 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 799 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
800 mode->hdisplay, mode->vdisplay, mode->vrefresh,
801 (mode->flags & DRM_MODE_FLAG_INTERLACE) ? true :
802 false, mode->clock * 1000);
805 803
806 DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", timing->xres, 804 ret = hdmi_find_phy_conf(hdata, mode->clock * 1000);
807 timing->yres, timing->refresh,
808 timing->vmode);
809
810 ret = hdmi_find_phy_conf(hdata, timing->pixclock);
811 if (ret < 0) 805 if (ret < 0)
812 return ret; 806 return ret;
813 return 0; 807 return 0;
@@ -1042,7 +1036,7 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1042 } 1036 }
1043} 1037}
1044 1038
1045static void hdmi_v13_timing_apply(struct hdmi_context *hdata) 1039static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
1046{ 1040{
1047 const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg; 1041 const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
1048 const struct hdmi_v13_core_regs *core = 1042 const struct hdmi_v13_core_regs *core =
@@ -1118,9 +1112,9 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
1118 hdmi_regs_dump(hdata, "timing apply"); 1112 hdmi_regs_dump(hdata, "timing apply");
1119 } 1113 }
1120 1114
1121 clk_disable(hdata->res.sclk_hdmi); 1115 clk_disable_unprepare(hdata->res.sclk_hdmi);
1122 clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy); 1116 clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_hdmiphy);
1123 clk_enable(hdata->res.sclk_hdmi); 1117 clk_prepare_enable(hdata->res.sclk_hdmi);
1124 1118
1125 /* enable HDMI and timing generator */ 1119 /* enable HDMI and timing generator */
1126 hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN); 1120 hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
@@ -1131,7 +1125,7 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
1131 hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN); 1125 hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
1132} 1126}
1133 1127
1134static void hdmi_v14_timing_apply(struct hdmi_context *hdata) 1128static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
1135{ 1129{
1136 const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg; 1130 const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
1137 const struct hdmi_v14_core_regs *core = 1131 const struct hdmi_v14_core_regs *core =
@@ -1285,9 +1279,9 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
1285 hdmi_regs_dump(hdata, "timing apply"); 1279 hdmi_regs_dump(hdata, "timing apply");
1286 } 1280 }
1287 1281
1288 clk_disable(hdata->res.sclk_hdmi); 1282 clk_disable_unprepare(hdata->res.sclk_hdmi);
1289 clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy); 1283 clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_hdmiphy);
1290 clk_enable(hdata->res.sclk_hdmi); 1284 clk_prepare_enable(hdata->res.sclk_hdmi);
1291 1285
1292 /* enable HDMI and timing generator */ 1286 /* enable HDMI and timing generator */
1293 hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN); 1287 hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
@@ -1298,12 +1292,12 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
1298 hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN); 1292 hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
1299} 1293}
1300 1294
1301static void hdmi_timing_apply(struct hdmi_context *hdata) 1295static void hdmi_mode_apply(struct hdmi_context *hdata)
1302{ 1296{
1303 if (hdata->type == HDMI_TYPE13) 1297 if (hdata->type == HDMI_TYPE13)
1304 hdmi_v13_timing_apply(hdata); 1298 hdmi_v13_mode_apply(hdata);
1305 else 1299 else
1306 hdmi_v14_timing_apply(hdata); 1300 hdmi_v14_mode_apply(hdata);
1307} 1301}
1308 1302
1309static void hdmiphy_conf_reset(struct hdmi_context *hdata) 1303static void hdmiphy_conf_reset(struct hdmi_context *hdata)
@@ -1311,9 +1305,9 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
1311 u8 buffer[2]; 1305 u8 buffer[2];
1312 u32 reg; 1306 u32 reg;
1313 1307
1314 clk_disable(hdata->res.sclk_hdmi); 1308 clk_disable_unprepare(hdata->res.sclk_hdmi);
1315 clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_pixel); 1309 clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_pixel);
1316 clk_enable(hdata->res.sclk_hdmi); 1310 clk_prepare_enable(hdata->res.sclk_hdmi);
1317 1311
1318 /* operation mode */ 1312 /* operation mode */
1319 buffer[0] = 0x1f; 1313 buffer[0] = 0x1f;
@@ -1336,8 +1330,6 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
1336 1330
1337static void hdmiphy_poweron(struct hdmi_context *hdata) 1331static void hdmiphy_poweron(struct hdmi_context *hdata)
1338{ 1332{
1339 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1340
1341 if (hdata->type == HDMI_TYPE14) 1333 if (hdata->type == HDMI_TYPE14)
1342 hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0, 1334 hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
1343 HDMI_PHY_POWER_OFF_EN); 1335 HDMI_PHY_POWER_OFF_EN);
@@ -1345,8 +1337,6 @@ static void hdmiphy_poweron(struct hdmi_context *hdata)
1345 1337
1346static void hdmiphy_poweroff(struct hdmi_context *hdata) 1338static void hdmiphy_poweroff(struct hdmi_context *hdata)
1347{ 1339{
1348 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1349
1350 if (hdata->type == HDMI_TYPE14) 1340 if (hdata->type == HDMI_TYPE14)
1351 hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0, 1341 hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
1352 HDMI_PHY_POWER_OFF_EN); 1342 HDMI_PHY_POWER_OFF_EN);
@@ -1410,8 +1400,6 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
1410 1400
1411static void hdmi_conf_apply(struct hdmi_context *hdata) 1401static void hdmi_conf_apply(struct hdmi_context *hdata)
1412{ 1402{
1413 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1414
1415 hdmiphy_conf_reset(hdata); 1403 hdmiphy_conf_reset(hdata);
1416 hdmiphy_conf_apply(hdata); 1404 hdmiphy_conf_apply(hdata);
1417 1405
@@ -1423,7 +1411,7 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
1423 hdmi_audio_init(hdata); 1411 hdmi_audio_init(hdata);
1424 1412
1425 /* setting core registers */ 1413 /* setting core registers */
1426 hdmi_timing_apply(hdata); 1414 hdmi_mode_apply(hdata);
1427 hdmi_audio_control(hdata, true); 1415 hdmi_audio_control(hdata, true);
1428 1416
1429 hdmi_regs_dump(hdata, "start"); 1417 hdmi_regs_dump(hdata, "start");
@@ -1569,8 +1557,7 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
1569 (m->vsync_start - m->vdisplay) / 2); 1557 (m->vsync_start - m->vdisplay) / 2);
1570 hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2); 1558 hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2);
1571 hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2); 1559 hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2);
1572 hdmi_set_reg(core->v_blank_f0, 2, (m->vtotal + 1560 hdmi_set_reg(core->v_blank_f0, 2, m->vtotal - m->vdisplay / 2);
1573 ((m->vsync_end - m->vsync_start) * 4) + 5) / 2);
1574 hdmi_set_reg(core->v_blank_f1, 2, m->vtotal); 1561 hdmi_set_reg(core->v_blank_f1, 2, m->vtotal);
1575 hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7); 1562 hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7);
1576 hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2); 1563 hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2);
@@ -1580,7 +1567,10 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
1580 (m->htotal / 2) + (m->hsync_start - m->hdisplay)); 1567 (m->htotal / 2) + (m->hsync_start - m->hdisplay));
1581 hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2); 1568 hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
1582 hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2); 1569 hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
1583 hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/ 1570 hdmi_set_reg(tg->vact_st2, 2, m->vtotal - m->vdisplay / 2);
1571 hdmi_set_reg(tg->vsync2, 2, (m->vtotal / 2) + 1);
1572 hdmi_set_reg(tg->vsync_bot_hdmi, 2, (m->vtotal / 2) + 1);
1573 hdmi_set_reg(tg->field_bot_hdmi, 2, (m->vtotal / 2) + 1);
1584 hdmi_set_reg(tg->vact_st3, 2, 0x0); 1574 hdmi_set_reg(tg->vact_st3, 2, 0x0);
1585 hdmi_set_reg(tg->vact_st4, 2, 0x0); 1575 hdmi_set_reg(tg->vact_st4, 2, 0x0);
1586 } else { 1576 } else {
@@ -1602,6 +1592,9 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
1602 hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */ 1592 hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
1603 hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */ 1593 hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */
1604 hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */ 1594 hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */
1595 hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
1596 hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
1597 hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
1605 } 1598 }
1606 1599
1607 /* Following values & calculations are same irrespective of mode type */ 1600 /* Following values & calculations are same irrespective of mode type */
@@ -1633,22 +1626,19 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
1633 hdmi_set_reg(tg->hact_sz, 2, m->hdisplay); 1626 hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
1634 hdmi_set_reg(tg->v_fsz, 2, m->vtotal); 1627 hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
1635 hdmi_set_reg(tg->vsync, 2, 0x1); 1628 hdmi_set_reg(tg->vsync, 2, 0x1);
1636 hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
1637 hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */ 1629 hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
1638 hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */ 1630 hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
1639 hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
1640 hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */ 1631 hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
1641 hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
1642 hdmi_set_reg(tg->tg_3d, 1, 0x0); 1632 hdmi_set_reg(tg->tg_3d, 1, 0x0);
1643} 1633}
1644 1634
1645static void hdmi_mode_set(void *ctx, void *mode) 1635static void hdmi_mode_set(void *ctx, struct drm_display_mode *mode)
1646{ 1636{
1647 struct hdmi_context *hdata = ctx; 1637 struct hdmi_context *hdata = ctx;
1648 struct drm_display_mode *m = mode; 1638 struct drm_display_mode *m = mode;
1649 1639
1650 DRM_DEBUG_KMS("[%s]: xres=%d, yres=%d, refresh=%d, intl=%s\n", 1640 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
1651 __func__, m->hdisplay, m->vdisplay, 1641 m->hdisplay, m->vdisplay,
1652 m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1642 m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
1653 "INTERLACED" : "PROGERESSIVE"); 1643 "INTERLACED" : "PROGERESSIVE");
1654 1644
@@ -1661,8 +1651,6 @@ static void hdmi_mode_set(void *ctx, void *mode)
1661static void hdmi_get_max_resol(void *ctx, unsigned int *width, 1651static void hdmi_get_max_resol(void *ctx, unsigned int *width,
1662 unsigned int *height) 1652 unsigned int *height)
1663{ 1653{
1664 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1665
1666 *width = MAX_WIDTH; 1654 *width = MAX_WIDTH;
1667 *height = MAX_HEIGHT; 1655 *height = MAX_HEIGHT;
1668} 1656}
@@ -1671,8 +1659,6 @@ static void hdmi_commit(void *ctx)
1671{ 1659{
1672 struct hdmi_context *hdata = ctx; 1660 struct hdmi_context *hdata = ctx;
1673 1661
1674 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1675
1676 mutex_lock(&hdata->hdmi_mutex); 1662 mutex_lock(&hdata->hdmi_mutex);
1677 if (!hdata->powered) { 1663 if (!hdata->powered) {
1678 mutex_unlock(&hdata->hdmi_mutex); 1664 mutex_unlock(&hdata->hdmi_mutex);
@@ -1687,8 +1673,6 @@ static void hdmi_poweron(struct hdmi_context *hdata)
1687{ 1673{
1688 struct hdmi_resources *res = &hdata->res; 1674 struct hdmi_resources *res = &hdata->res;
1689 1675
1690 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1691
1692 mutex_lock(&hdata->hdmi_mutex); 1676 mutex_lock(&hdata->hdmi_mutex);
1693 if (hdata->powered) { 1677 if (hdata->powered) {
1694 mutex_unlock(&hdata->hdmi_mutex); 1678 mutex_unlock(&hdata->hdmi_mutex);
@@ -1699,10 +1683,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
1699 1683
1700 mutex_unlock(&hdata->hdmi_mutex); 1684 mutex_unlock(&hdata->hdmi_mutex);
1701 1685
1702 regulator_bulk_enable(res->regul_count, res->regul_bulk); 1686 if (regulator_bulk_enable(res->regul_count, res->regul_bulk))
1703 clk_enable(res->hdmiphy); 1687 DRM_DEBUG_KMS("failed to enable regulator bulk\n");
1704 clk_enable(res->hdmi); 1688
1705 clk_enable(res->sclk_hdmi); 1689 clk_prepare_enable(res->hdmiphy);
1690 clk_prepare_enable(res->hdmi);
1691 clk_prepare_enable(res->sclk_hdmi);
1706 1692
1707 hdmiphy_poweron(hdata); 1693 hdmiphy_poweron(hdata);
1708} 1694}
@@ -1711,8 +1697,6 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
1711{ 1697{
1712 struct hdmi_resources *res = &hdata->res; 1698 struct hdmi_resources *res = &hdata->res;
1713 1699
1714 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1715
1716 mutex_lock(&hdata->hdmi_mutex); 1700 mutex_lock(&hdata->hdmi_mutex);
1717 if (!hdata->powered) 1701 if (!hdata->powered)
1718 goto out; 1702 goto out;
@@ -1725,9 +1709,9 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
1725 hdmiphy_conf_reset(hdata); 1709 hdmiphy_conf_reset(hdata);
1726 hdmiphy_poweroff(hdata); 1710 hdmiphy_poweroff(hdata);
1727 1711
1728 clk_disable(res->sclk_hdmi); 1712 clk_disable_unprepare(res->sclk_hdmi);
1729 clk_disable(res->hdmi); 1713 clk_disable_unprepare(res->hdmi);
1730 clk_disable(res->hdmiphy); 1714 clk_disable_unprepare(res->hdmiphy);
1731 regulator_bulk_disable(res->regul_count, res->regul_bulk); 1715 regulator_bulk_disable(res->regul_count, res->regul_bulk);
1732 1716
1733 mutex_lock(&hdata->hdmi_mutex); 1717 mutex_lock(&hdata->hdmi_mutex);
@@ -1742,7 +1726,7 @@ static void hdmi_dpms(void *ctx, int mode)
1742{ 1726{
1743 struct hdmi_context *hdata = ctx; 1727 struct hdmi_context *hdata = ctx;
1744 1728
1745 DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode); 1729 DRM_DEBUG_KMS("mode %d\n", mode);
1746 1730
1747 switch (mode) { 1731 switch (mode) {
1748 case DRM_MODE_DPMS_ON: 1732 case DRM_MODE_DPMS_ON:
@@ -1765,7 +1749,7 @@ static struct exynos_hdmi_ops hdmi_ops = {
1765 /* display */ 1749 /* display */
1766 .is_connected = hdmi_is_connected, 1750 .is_connected = hdmi_is_connected,
1767 .get_edid = hdmi_get_edid, 1751 .get_edid = hdmi_get_edid,
1768 .check_timing = hdmi_check_timing, 1752 .check_mode = hdmi_check_mode,
1769 1753
1770 /* manager */ 1754 /* manager */
1771 .mode_set = hdmi_mode_set, 1755 .mode_set = hdmi_mode_set,
@@ -1831,8 +1815,13 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
1831 DRM_ERROR("failed to get clock 'hdmiphy'\n"); 1815 DRM_ERROR("failed to get clock 'hdmiphy'\n");
1832 goto fail; 1816 goto fail;
1833 } 1817 }
1818 res->mout_hdmi = devm_clk_get(dev, "mout_hdmi");
1819 if (IS_ERR(res->mout_hdmi)) {
1820 DRM_ERROR("failed to get clock 'mout_hdmi'\n");
1821 goto fail;
1822 }
1834 1823
1835 clk_set_parent(res->sclk_hdmi, res->sclk_pixel); 1824 clk_set_parent(res->mout_hdmi, res->sclk_pixel);
1836 1825
1837 res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) * 1826 res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
1838 sizeof(res->regul_bulk[0]), GFP_KERNEL); 1827 sizeof(res->regul_bulk[0]), GFP_KERNEL);
@@ -1877,7 +1866,6 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
1877{ 1866{
1878 struct device_node *np = dev->of_node; 1867 struct device_node *np = dev->of_node;
1879 struct s5p_hdmi_platform_data *pd; 1868 struct s5p_hdmi_platform_data *pd;
1880 enum of_gpio_flags flags;
1881 u32 value; 1869 u32 value;
1882 1870
1883 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); 1871 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
@@ -1891,7 +1879,7 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
1891 goto err_data; 1879 goto err_data;
1892 } 1880 }
1893 1881
1894 pd->hpd_gpio = of_get_named_gpio_flags(np, "hpd-gpio", 0, &flags); 1882 pd->hpd_gpio = of_get_named_gpio(np, "hpd-gpio", 0);
1895 1883
1896 return pd; 1884 return pd;
1897 1885
@@ -1930,6 +1918,9 @@ static struct of_device_id hdmi_match_types[] = {
1930 .compatible = "samsung,exynos5-hdmi", 1918 .compatible = "samsung,exynos5-hdmi",
1931 .data = (void *)HDMI_TYPE14, 1919 .data = (void *)HDMI_TYPE14,
1932 }, { 1920 }, {
1921 .compatible = "samsung,exynos4212-hdmi",
1922 .data = (void *)HDMI_TYPE14,
1923 }, {
1933 /* end node */ 1924 /* end node */
1934 } 1925 }
1935}; 1926};
@@ -1944,8 +1935,6 @@ static int hdmi_probe(struct platform_device *pdev)
1944 struct resource *res; 1935 struct resource *res;
1945 int ret; 1936 int ret;
1946 1937
1947 DRM_DEBUG_KMS("[%d]\n", __LINE__);
1948
1949 if (dev->of_node) { 1938 if (dev->of_node) {
1950 pdata = drm_hdmi_dt_parse_pdata(dev); 1939 pdata = drm_hdmi_dt_parse_pdata(dev);
1951 if (IS_ERR(pdata)) { 1940 if (IS_ERR(pdata)) {
@@ -2071,8 +2060,6 @@ static int hdmi_remove(struct platform_device *pdev)
2071{ 2060{
2072 struct device *dev = &pdev->dev; 2061 struct device *dev = &pdev->dev;
2073 2062
2074 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2075
2076 pm_runtime_disable(dev); 2063 pm_runtime_disable(dev);
2077 2064
2078 /* hdmiphy i2c driver */ 2065 /* hdmiphy i2c driver */
@@ -2089,8 +2076,6 @@ static int hdmi_suspend(struct device *dev)
2089 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); 2076 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2090 struct hdmi_context *hdata = ctx->ctx; 2077 struct hdmi_context *hdata = ctx->ctx;
2091 2078
2092 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2093
2094 disable_irq(hdata->irq); 2079 disable_irq(hdata->irq);
2095 2080
2096 hdata->hpd = false; 2081 hdata->hpd = false;
@@ -2098,7 +2083,7 @@ static int hdmi_suspend(struct device *dev)
2098 drm_helper_hpd_irq_event(ctx->drm_dev); 2083 drm_helper_hpd_irq_event(ctx->drm_dev);
2099 2084
2100 if (pm_runtime_suspended(dev)) { 2085 if (pm_runtime_suspended(dev)) {
2101 DRM_DEBUG_KMS("%s : Already suspended\n", __func__); 2086 DRM_DEBUG_KMS("Already suspended\n");
2102 return 0; 2087 return 0;
2103 } 2088 }
2104 2089
@@ -2112,14 +2097,12 @@ static int hdmi_resume(struct device *dev)
2112 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); 2097 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2113 struct hdmi_context *hdata = ctx->ctx; 2098 struct hdmi_context *hdata = ctx->ctx;
2114 2099
2115 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2116
2117 hdata->hpd = gpio_get_value(hdata->hpd_gpio); 2100 hdata->hpd = gpio_get_value(hdata->hpd_gpio);
2118 2101
2119 enable_irq(hdata->irq); 2102 enable_irq(hdata->irq);
2120 2103
2121 if (!pm_runtime_suspended(dev)) { 2104 if (!pm_runtime_suspended(dev)) {
2122 DRM_DEBUG_KMS("%s : Already resumed\n", __func__); 2105 DRM_DEBUG_KMS("Already resumed\n");
2123 return 0; 2106 return 0;
2124 } 2107 }
2125 2108
@@ -2134,7 +2117,6 @@ static int hdmi_runtime_suspend(struct device *dev)
2134{ 2117{
2135 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); 2118 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2136 struct hdmi_context *hdata = ctx->ctx; 2119 struct hdmi_context *hdata = ctx->ctx;
2137 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2138 2120
2139 hdmi_poweroff(hdata); 2121 hdmi_poweroff(hdata);
2140 2122
@@ -2145,7 +2127,6 @@ static int hdmi_runtime_resume(struct device *dev)
2145{ 2127{
2146 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); 2128 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2147 struct hdmi_context *hdata = ctx->ctx; 2129 struct hdmi_context *hdata = ctx->ctx;
2148 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2149 2130
2150 hdmi_poweron(hdata); 2131 hdmi_poweron(hdata);
2151 2132
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index ea49d132ecf6..ef04255076c7 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -51,6 +51,10 @@ static struct of_device_id hdmiphy_match_types[] = {
51 { 51 {
52 .compatible = "samsung,exynos5-hdmiphy", 52 .compatible = "samsung,exynos5-hdmiphy",
53 }, { 53 }, {
54 .compatible = "samsung,exynos4210-hdmiphy",
55 }, {
56 .compatible = "samsung,exynos4212-hdmiphy",
57 }, {
54 /* end node */ 58 /* end node */
55 } 59 }
56}; 60};
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 7c197d3820c5..b1280b43931c 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -78,6 +78,7 @@ struct mixer_resources {
78enum mixer_version_id { 78enum mixer_version_id {
79 MXR_VER_0_0_0_16, 79 MXR_VER_0_0_0_16,
80 MXR_VER_16_0_33_0, 80 MXR_VER_16_0_33_0,
81 MXR_VER_128_0_0_184,
81}; 82};
82 83
83struct mixer_context { 84struct mixer_context {
@@ -283,17 +284,19 @@ static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
283 val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE : 284 val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
284 MXR_CFG_SCAN_PROGRASSIVE); 285 MXR_CFG_SCAN_PROGRASSIVE);
285 286
286 /* choosing between porper HD and SD mode */ 287 if (ctx->mxr_ver != MXR_VER_128_0_0_184) {
287 if (height <= 480) 288 /* choosing between proper HD and SD mode */
288 val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD; 289 if (height <= 480)
289 else if (height <= 576) 290 val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
290 val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD; 291 else if (height <= 576)
291 else if (height <= 720) 292 val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
292 val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD; 293 else if (height <= 720)
293 else if (height <= 1080) 294 val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
294 val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD; 295 else if (height <= 1080)
295 else 296 val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
296 val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD; 297 else
298 val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
299 }
297 300
298 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK); 301 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK);
299} 302}
@@ -557,6 +560,14 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
557 /* setup geometry */ 560 /* setup geometry */
558 mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width); 561 mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
559 562
563 /* setup display size */
564 if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
565 win == MIXER_DEFAULT_WIN) {
566 val = MXR_MXR_RES_HEIGHT(win_data->fb_height);
567 val |= MXR_MXR_RES_WIDTH(win_data->fb_width);
568 mixer_reg_write(res, MXR_RESOLUTION, val);
569 }
570
560 val = MXR_GRP_WH_WIDTH(win_data->crtc_width); 571 val = MXR_GRP_WH_WIDTH(win_data->crtc_width);
561 val |= MXR_GRP_WH_HEIGHT(win_data->crtc_height); 572 val |= MXR_GRP_WH_HEIGHT(win_data->crtc_height);
562 val |= MXR_GRP_WH_H_SCALE(x_ratio); 573 val |= MXR_GRP_WH_H_SCALE(x_ratio);
@@ -581,7 +592,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
581 mixer_cfg_layer(ctx, win, true); 592 mixer_cfg_layer(ctx, win, true);
582 593
583 /* layer update mandatory for mixer 16.0.33.0 */ 594 /* layer update mandatory for mixer 16.0.33.0 */
584 if (ctx->mxr_ver == MXR_VER_16_0_33_0) 595 if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
596 ctx->mxr_ver == MXR_VER_128_0_0_184)
585 mixer_layer_update(ctx); 597 mixer_layer_update(ctx);
586 598
587 mixer_run(ctx); 599 mixer_run(ctx);
@@ -696,8 +708,6 @@ static int mixer_enable_vblank(void *ctx, int pipe)
696 struct mixer_context *mixer_ctx = ctx; 708 struct mixer_context *mixer_ctx = ctx;
697 struct mixer_resources *res = &mixer_ctx->mixer_res; 709 struct mixer_resources *res = &mixer_ctx->mixer_res;
698 710
699 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
700
701 mixer_ctx->pipe = pipe; 711 mixer_ctx->pipe = pipe;
702 712
703 /* enable vsync interrupt */ 713 /* enable vsync interrupt */
@@ -712,8 +722,6 @@ static void mixer_disable_vblank(void *ctx)
712 struct mixer_context *mixer_ctx = ctx; 722 struct mixer_context *mixer_ctx = ctx;
713 struct mixer_resources *res = &mixer_ctx->mixer_res; 723 struct mixer_resources *res = &mixer_ctx->mixer_res;
714 724
715 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
716
717 /* disable vsync interrupt */ 725 /* disable vsync interrupt */
718 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); 726 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
719} 727}
@@ -725,8 +733,6 @@ static void mixer_win_mode_set(void *ctx,
725 struct hdmi_win_data *win_data; 733 struct hdmi_win_data *win_data;
726 int win; 734 int win;
727 735
728 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
729
730 if (!overlay) { 736 if (!overlay) {
731 DRM_ERROR("overlay is NULL\n"); 737 DRM_ERROR("overlay is NULL\n");
732 return; 738 return;
@@ -742,7 +748,7 @@ static void mixer_win_mode_set(void *ctx,
742 if (win == DEFAULT_ZPOS) 748 if (win == DEFAULT_ZPOS)
743 win = MIXER_DEFAULT_WIN; 749 win = MIXER_DEFAULT_WIN;
744 750
745 if (win < 0 || win > MIXER_WIN_NR) { 751 if (win < 0 || win >= MIXER_WIN_NR) {
746 DRM_ERROR("mixer window[%d] is wrong\n", win); 752 DRM_ERROR("mixer window[%d] is wrong\n", win);
747 return; 753 return;
748 } 754 }
@@ -776,7 +782,7 @@ static void mixer_win_commit(void *ctx, int win)
776{ 782{
777 struct mixer_context *mixer_ctx = ctx; 783 struct mixer_context *mixer_ctx = ctx;
778 784
779 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); 785 DRM_DEBUG_KMS("win: %d\n", win);
780 786
781 mutex_lock(&mixer_ctx->mixer_mutex); 787 mutex_lock(&mixer_ctx->mixer_mutex);
782 if (!mixer_ctx->powered) { 788 if (!mixer_ctx->powered) {
@@ -799,7 +805,7 @@ static void mixer_win_disable(void *ctx, int win)
799 struct mixer_resources *res = &mixer_ctx->mixer_res; 805 struct mixer_resources *res = &mixer_ctx->mixer_res;
800 unsigned long flags; 806 unsigned long flags;
801 807
802 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); 808 DRM_DEBUG_KMS("win: %d\n", win);
803 809
804 mutex_lock(&mixer_ctx->mixer_mutex); 810 mutex_lock(&mixer_ctx->mixer_mutex);
805 if (!mixer_ctx->powered) { 811 if (!mixer_ctx->powered) {
@@ -820,17 +826,21 @@ static void mixer_win_disable(void *ctx, int win)
820 mixer_ctx->win_data[win].enabled = false; 826 mixer_ctx->win_data[win].enabled = false;
821} 827}
822 828
823static int mixer_check_timing(void *ctx, struct fb_videomode *timing) 829static int mixer_check_mode(void *ctx, struct drm_display_mode *mode)
824{ 830{
831 struct mixer_context *mixer_ctx = ctx;
825 u32 w, h; 832 u32 w, h;
826 833
827 w = timing->xres; 834 w = mode->hdisplay;
828 h = timing->yres; 835 h = mode->vdisplay;
829 836
830 DRM_DEBUG_KMS("%s : xres=%d, yres=%d, refresh=%d, intl=%d\n", 837 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n",
831 __func__, timing->xres, timing->yres, 838 mode->hdisplay, mode->vdisplay, mode->vrefresh,
832 timing->refresh, (timing->vmode & 839 (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
833 FB_VMODE_INTERLACED) ? true : false); 840
841 if (mixer_ctx->mxr_ver == MXR_VER_0_0_0_16 ||
842 mixer_ctx->mxr_ver == MXR_VER_128_0_0_184)
843 return 0;
834 844
835 if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) || 845 if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
836 (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) || 846 (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
@@ -891,8 +901,6 @@ static void mixer_poweron(struct mixer_context *ctx)
891{ 901{
892 struct mixer_resources *res = &ctx->mixer_res; 902 struct mixer_resources *res = &ctx->mixer_res;
893 903
894 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
895
896 mutex_lock(&ctx->mixer_mutex); 904 mutex_lock(&ctx->mixer_mutex);
897 if (ctx->powered) { 905 if (ctx->powered) {
898 mutex_unlock(&ctx->mixer_mutex); 906 mutex_unlock(&ctx->mixer_mutex);
@@ -901,10 +909,10 @@ static void mixer_poweron(struct mixer_context *ctx)
901 ctx->powered = true; 909 ctx->powered = true;
902 mutex_unlock(&ctx->mixer_mutex); 910 mutex_unlock(&ctx->mixer_mutex);
903 911
904 clk_enable(res->mixer); 912 clk_prepare_enable(res->mixer);
905 if (ctx->vp_enabled) { 913 if (ctx->vp_enabled) {
906 clk_enable(res->vp); 914 clk_prepare_enable(res->vp);
907 clk_enable(res->sclk_mixer); 915 clk_prepare_enable(res->sclk_mixer);
908 } 916 }
909 917
910 mixer_reg_write(res, MXR_INT_EN, ctx->int_en); 918 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
@@ -917,8 +925,6 @@ static void mixer_poweroff(struct mixer_context *ctx)
917{ 925{
918 struct mixer_resources *res = &ctx->mixer_res; 926 struct mixer_resources *res = &ctx->mixer_res;
919 927
920 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
921
922 mutex_lock(&ctx->mixer_mutex); 928 mutex_lock(&ctx->mixer_mutex);
923 if (!ctx->powered) 929 if (!ctx->powered)
924 goto out; 930 goto out;
@@ -928,10 +934,10 @@ static void mixer_poweroff(struct mixer_context *ctx)
928 934
929 ctx->int_en = mixer_reg_read(res, MXR_INT_EN); 935 ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
930 936
931 clk_disable(res->mixer); 937 clk_disable_unprepare(res->mixer);
932 if (ctx->vp_enabled) { 938 if (ctx->vp_enabled) {
933 clk_disable(res->vp); 939 clk_disable_unprepare(res->vp);
934 clk_disable(res->sclk_mixer); 940 clk_disable_unprepare(res->sclk_mixer);
935 } 941 }
936 942
937 mutex_lock(&ctx->mixer_mutex); 943 mutex_lock(&ctx->mixer_mutex);
@@ -945,8 +951,6 @@ static void mixer_dpms(void *ctx, int mode)
945{ 951{
946 struct mixer_context *mixer_ctx = ctx; 952 struct mixer_context *mixer_ctx = ctx;
947 953
948 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
949
950 switch (mode) { 954 switch (mode) {
951 case DRM_MODE_DPMS_ON: 955 case DRM_MODE_DPMS_ON:
952 if (pm_runtime_suspended(mixer_ctx->dev)) 956 if (pm_runtime_suspended(mixer_ctx->dev))
@@ -978,7 +982,7 @@ static struct exynos_mixer_ops mixer_ops = {
978 .win_disable = mixer_win_disable, 982 .win_disable = mixer_win_disable,
979 983
980 /* display */ 984 /* display */
981 .check_timing = mixer_check_timing, 985 .check_mode = mixer_check_mode,
982}; 986};
983 987
984static irqreturn_t mixer_irq_handler(int irq, void *arg) 988static irqreturn_t mixer_irq_handler(int irq, void *arg)
@@ -1128,12 +1132,17 @@ static int vp_resources_init(struct exynos_drm_hdmi_context *ctx,
1128 return 0; 1132 return 0;
1129} 1133}
1130 1134
1131static struct mixer_drv_data exynos5_mxr_drv_data = { 1135static struct mixer_drv_data exynos5420_mxr_drv_data = {
1136 .version = MXR_VER_128_0_0_184,
1137 .is_vp_enabled = 0,
1138};
1139
1140static struct mixer_drv_data exynos5250_mxr_drv_data = {
1132 .version = MXR_VER_16_0_33_0, 1141 .version = MXR_VER_16_0_33_0,
1133 .is_vp_enabled = 0, 1142 .is_vp_enabled = 0,
1134}; 1143};
1135 1144
1136static struct mixer_drv_data exynos4_mxr_drv_data = { 1145static struct mixer_drv_data exynos4210_mxr_drv_data = {
1137 .version = MXR_VER_0_0_0_16, 1146 .version = MXR_VER_0_0_0_16,
1138 .is_vp_enabled = 1, 1147 .is_vp_enabled = 1,
1139}; 1148};
@@ -1141,10 +1150,10 @@ static struct mixer_drv_data exynos4_mxr_drv_data = {
1141static struct platform_device_id mixer_driver_types[] = { 1150static struct platform_device_id mixer_driver_types[] = {
1142 { 1151 {
1143 .name = "s5p-mixer", 1152 .name = "s5p-mixer",
1144 .driver_data = (unsigned long)&exynos4_mxr_drv_data, 1153 .driver_data = (unsigned long)&exynos4210_mxr_drv_data,
1145 }, { 1154 }, {
1146 .name = "exynos5-mixer", 1155 .name = "exynos5-mixer",
1147 .driver_data = (unsigned long)&exynos5_mxr_drv_data, 1156 .driver_data = (unsigned long)&exynos5250_mxr_drv_data,
1148 }, { 1157 }, {
1149 /* end node */ 1158 /* end node */
1150 } 1159 }
@@ -1153,7 +1162,13 @@ static struct platform_device_id mixer_driver_types[] = {
1153static struct of_device_id mixer_match_types[] = { 1162static struct of_device_id mixer_match_types[] = {
1154 { 1163 {
1155 .compatible = "samsung,exynos5-mixer", 1164 .compatible = "samsung,exynos5-mixer",
1156 .data = &exynos5_mxr_drv_data, 1165 .data = &exynos5250_mxr_drv_data,
1166 }, {
1167 .compatible = "samsung,exynos5250-mixer",
1168 .data = &exynos5250_mxr_drv_data,
1169 }, {
1170 .compatible = "samsung,exynos5420-mixer",
1171 .data = &exynos5420_mxr_drv_data,
1157 }, { 1172 }, {
1158 /* end node */ 1173 /* end node */
1159 } 1174 }
@@ -1186,8 +1201,7 @@ static int mixer_probe(struct platform_device *pdev)
1186 1201
1187 if (dev->of_node) { 1202 if (dev->of_node) {
1188 const struct of_device_id *match; 1203 const struct of_device_id *match;
1189 match = of_match_node(of_match_ptr(mixer_match_types), 1204 match = of_match_node(mixer_match_types, dev->of_node);
1190 dev->of_node);
1191 drv = (struct mixer_drv_data *)match->data; 1205 drv = (struct mixer_drv_data *)match->data;
1192 } else { 1206 } else {
1193 drv = (struct mixer_drv_data *) 1207 drv = (struct mixer_drv_data *)
@@ -1251,10 +1265,8 @@ static int mixer_suspend(struct device *dev)
1251 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev); 1265 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1252 struct mixer_context *ctx = drm_hdmi_ctx->ctx; 1266 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1253 1267
1254 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1255
1256 if (pm_runtime_suspended(dev)) { 1268 if (pm_runtime_suspended(dev)) {
1257 DRM_DEBUG_KMS("%s : Already suspended\n", __func__); 1269 DRM_DEBUG_KMS("Already suspended\n");
1258 return 0; 1270 return 0;
1259 } 1271 }
1260 1272
@@ -1268,10 +1280,8 @@ static int mixer_resume(struct device *dev)
1268 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev); 1280 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1269 struct mixer_context *ctx = drm_hdmi_ctx->ctx; 1281 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1270 1282
1271 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1272
1273 if (!pm_runtime_suspended(dev)) { 1283 if (!pm_runtime_suspended(dev)) {
1274 DRM_DEBUG_KMS("%s : Already resumed\n", __func__); 1284 DRM_DEBUG_KMS("Already resumed\n");
1275 return 0; 1285 return 0;
1276 } 1286 }
1277 1287
@@ -1287,8 +1297,6 @@ static int mixer_runtime_suspend(struct device *dev)
1287 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev); 1297 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1288 struct mixer_context *ctx = drm_hdmi_ctx->ctx; 1298 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1289 1299
1290 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1291
1292 mixer_poweroff(ctx); 1300 mixer_poweroff(ctx);
1293 1301
1294 return 0; 1302 return 0;
@@ -1299,8 +1307,6 @@ static int mixer_runtime_resume(struct device *dev)
1299 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev); 1307 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1300 struct mixer_context *ctx = drm_hdmi_ctx->ctx; 1308 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1301 1309
1302 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1303
1304 mixer_poweron(ctx); 1310 mixer_poweron(ctx);
1305 1311
1306 return 0; 1312 return 0;
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index 5d8dbc0301e6..4537026bc385 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -44,6 +44,9 @@
44#define MXR_CM_COEFF_Y 0x0080 44#define MXR_CM_COEFF_Y 0x0080
45#define MXR_CM_COEFF_CB 0x0084 45#define MXR_CM_COEFF_CB 0x0084
46#define MXR_CM_COEFF_CR 0x0088 46#define MXR_CM_COEFF_CR 0x0088
47#define MXR_MO 0x0304
48#define MXR_RESOLUTION 0x0310
49
47#define MXR_GRAPHIC0_BASE_S 0x2024 50#define MXR_GRAPHIC0_BASE_S 0x2024
48#define MXR_GRAPHIC1_BASE_S 0x2044 51#define MXR_GRAPHIC1_BASE_S 0x2044
49 52
@@ -119,6 +122,10 @@
119#define MXR_GRP_WH_WIDTH(x) MXR_MASK_VAL(x, 26, 16) 122#define MXR_GRP_WH_WIDTH(x) MXR_MASK_VAL(x, 26, 16)
120#define MXR_GRP_WH_HEIGHT(x) MXR_MASK_VAL(x, 10, 0) 123#define MXR_GRP_WH_HEIGHT(x) MXR_MASK_VAL(x, 10, 0)
121 124
125/* bits for MXR_RESOLUTION */
126#define MXR_MXR_RES_HEIGHT(x) MXR_MASK_VAL(x, 26, 16)
127#define MXR_MXR_RES_WIDTH(x) MXR_MASK_VAL(x, 10, 0)
128
122/* bits for MXR_GRAPHICn_SXY */ 129/* bits for MXR_GRAPHICn_SXY */
123#define MXR_GRP_SXY_SX(x) MXR_MASK_VAL(x, 26, 16) 130#define MXR_GRP_SXY_SX(x) MXR_MASK_VAL(x, 26, 16)
124#define MXR_GRP_SXY_SY(x) MXR_MASK_VAL(x, 10, 0) 131#define MXR_GRP_SXY_SY(x) MXR_MASK_VAL(x, 10, 0)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 91f3ac6cef35..40034ecefd3b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -36,6 +36,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
36 intel_overlay.o \ 36 intel_overlay.o \
37 intel_sprite.o \ 37 intel_sprite.o \
38 intel_opregion.o \ 38 intel_opregion.o \
39 intel_sideband.o \
39 dvo_ch7xxx.o \ 40 dvo_ch7xxx.o \
40 dvo_ch7017.o \ 41 dvo_ch7017.o \
41 dvo_ivch.o \ 42 dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 3edd981e0770..757e0fa11043 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -32,12 +32,14 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
32#define CH7xxx_REG_DID 0x4b 32#define CH7xxx_REG_DID 0x4b
33 33
34#define CH7011_VID 0x83 /* 7010 as well */ 34#define CH7011_VID 0x83 /* 7010 as well */
35#define CH7010B_VID 0x05
35#define CH7009A_VID 0x84 36#define CH7009A_VID 0x84
36#define CH7009B_VID 0x85 37#define CH7009B_VID 0x85
37#define CH7301_VID 0x95 38#define CH7301_VID 0x95
38 39
39#define CH7xxx_VID 0x84 40#define CH7xxx_VID 0x84
40#define CH7xxx_DID 0x17 41#define CH7xxx_DID 0x17
42#define CH7010_DID 0x16
41 43
42#define CH7xxx_NUM_REGS 0x4c 44#define CH7xxx_NUM_REGS 0x4c
43 45
@@ -87,11 +89,20 @@ static struct ch7xxx_id_struct {
87 char *name; 89 char *name;
88} ch7xxx_ids[] = { 90} ch7xxx_ids[] = {
89 { CH7011_VID, "CH7011" }, 91 { CH7011_VID, "CH7011" },
92 { CH7010B_VID, "CH7010B" },
90 { CH7009A_VID, "CH7009A" }, 93 { CH7009A_VID, "CH7009A" },
91 { CH7009B_VID, "CH7009B" }, 94 { CH7009B_VID, "CH7009B" },
92 { CH7301_VID, "CH7301" }, 95 { CH7301_VID, "CH7301" },
93}; 96};
94 97
98static struct ch7xxx_did_struct {
99 uint8_t did;
100 char *name;
101} ch7xxx_dids[] = {
102 { CH7xxx_DID, "CH7XXX" },
103 { CH7010_DID, "CH7010B" },
104};
105
95struct ch7xxx_priv { 106struct ch7xxx_priv {
96 bool quiet; 107 bool quiet;
97}; 108};
@@ -108,6 +119,18 @@ static char *ch7xxx_get_id(uint8_t vid)
108 return NULL; 119 return NULL;
109} 120}
110 121
122static char *ch7xxx_get_did(uint8_t did)
123{
124 int i;
125
126 for (i = 0; i < ARRAY_SIZE(ch7xxx_dids); i++) {
127 if (ch7xxx_dids[i].did == did)
128 return ch7xxx_dids[i].name;
129 }
130
131 return NULL;
132}
133
111/** Reads an 8 bit register */ 134/** Reads an 8 bit register */
112static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) 135static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
113{ 136{
@@ -179,7 +202,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
179 /* this will detect the CH7xxx chip on the specified i2c bus */ 202 /* this will detect the CH7xxx chip on the specified i2c bus */
180 struct ch7xxx_priv *ch7xxx; 203 struct ch7xxx_priv *ch7xxx;
181 uint8_t vendor, device; 204 uint8_t vendor, device;
182 char *name; 205 char *name, *devid;
183 206
184 ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL); 207 ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
185 if (ch7xxx == NULL) 208 if (ch7xxx == NULL)
@@ -204,7 +227,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
204 if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device)) 227 if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
205 goto out; 228 goto out;
206 229
207 if (device != CH7xxx_DID) { 230 devid = ch7xxx_get_did(device);
231 if (!devid) {
208 DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s " 232 DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
209 "slave %d.\n", 233 "slave %d.\n",
210 vendor, adapter->name, dvo->slave_addr); 234 vendor, adapter->name, dvo->slave_addr);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e913d325d5b8..47d6c748057e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -61,11 +61,11 @@ static int i915_capabilities(struct seq_file *m, void *data)
61 61
62 seq_printf(m, "gen: %d\n", info->gen); 62 seq_printf(m, "gen: %d\n", info->gen);
63 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 63 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
64#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 64#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65#define DEV_INFO_SEP ; 65#define SEP_SEMICOLON ;
66 DEV_INFO_FLAGS; 66 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
67#undef DEV_INFO_FLAG 67#undef PRINT_FLAG
68#undef DEV_INFO_SEP 68#undef SEP_SEMICOLON
69 69
70 return 0; 70 return 0;
71} 71}
@@ -196,6 +196,32 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
196 } \ 196 } \
197} while (0) 197} while (0)
198 198
199struct file_stats {
200 int count;
201 size_t total, active, inactive, unbound;
202};
203
204static int per_file_stats(int id, void *ptr, void *data)
205{
206 struct drm_i915_gem_object *obj = ptr;
207 struct file_stats *stats = data;
208
209 stats->count++;
210 stats->total += obj->base.size;
211
212 if (obj->gtt_space) {
213 if (!list_empty(&obj->ring_list))
214 stats->active += obj->base.size;
215 else
216 stats->inactive += obj->base.size;
217 } else {
218 if (!list_empty(&obj->global_list))
219 stats->unbound += obj->base.size;
220 }
221
222 return 0;
223}
224
199static int i915_gem_object_info(struct seq_file *m, void* data) 225static int i915_gem_object_info(struct seq_file *m, void* data)
200{ 226{
201 struct drm_info_node *node = (struct drm_info_node *) m->private; 227 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -204,6 +230,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
204 u32 count, mappable_count, purgeable_count; 230 u32 count, mappable_count, purgeable_count;
205 size_t size, mappable_size, purgeable_size; 231 size_t size, mappable_size, purgeable_size;
206 struct drm_i915_gem_object *obj; 232 struct drm_i915_gem_object *obj;
233 struct drm_file *file;
207 int ret; 234 int ret;
208 235
209 ret = mutex_lock_interruptible(&dev->struct_mutex); 236 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -215,7 +242,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
215 dev_priv->mm.object_memory); 242 dev_priv->mm.object_memory);
216 243
217 size = count = mappable_size = mappable_count = 0; 244 size = count = mappable_size = mappable_count = 0;
218 count_objects(&dev_priv->mm.bound_list, gtt_list); 245 count_objects(&dev_priv->mm.bound_list, global_list);
219 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 246 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
220 count, mappable_count, size, mappable_size); 247 count, mappable_count, size, mappable_size);
221 248
@@ -230,7 +257,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
230 count, mappable_count, size, mappable_size); 257 count, mappable_count, size, mappable_size);
231 258
232 size = count = purgeable_size = purgeable_count = 0; 259 size = count = purgeable_size = purgeable_count = 0;
233 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) { 260 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
234 size += obj->base.size, ++count; 261 size += obj->base.size, ++count;
235 if (obj->madv == I915_MADV_DONTNEED) 262 if (obj->madv == I915_MADV_DONTNEED)
236 purgeable_size += obj->base.size, ++purgeable_count; 263 purgeable_size += obj->base.size, ++purgeable_count;
@@ -238,7 +265,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
238 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); 265 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
239 266
240 size = count = mappable_size = mappable_count = 0; 267 size = count = mappable_size = mappable_count = 0;
241 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 268 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
242 if (obj->fault_mappable) { 269 if (obj->fault_mappable) {
243 size += obj->gtt_space->size; 270 size += obj->gtt_space->size;
244 ++count; 271 ++count;
@@ -263,6 +290,21 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
263 dev_priv->gtt.total, 290 dev_priv->gtt.total,
264 dev_priv->gtt.mappable_end - dev_priv->gtt.start); 291 dev_priv->gtt.mappable_end - dev_priv->gtt.start);
265 292
293 seq_printf(m, "\n");
294 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
295 struct file_stats stats;
296
297 memset(&stats, 0, sizeof(stats));
298 idr_for_each(&file->object_idr, per_file_stats, &stats);
299 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
300 get_pid_task(file->pid, PIDTYPE_PID)->comm,
301 stats.count,
302 stats.total,
303 stats.active,
304 stats.inactive,
305 stats.unbound);
306 }
307
266 mutex_unlock(&dev->struct_mutex); 308 mutex_unlock(&dev->struct_mutex);
267 309
268 return 0; 310 return 0;
@@ -283,7 +325,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
283 return ret; 325 return ret;
284 326
285 total_obj_size = total_gtt_size = count = 0; 327 total_obj_size = total_gtt_size = count = 0;
286 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 328 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
287 if (list == PINNED_LIST && obj->pin_count == 0) 329 if (list == PINNED_LIST && obj->pin_count == 0)
288 continue; 330 continue;
289 331
@@ -570,6 +612,7 @@ static const char *ring_str(int ring)
570 case RCS: return "render"; 612 case RCS: return "render";
571 case VCS: return "bsd"; 613 case VCS: return "bsd";
572 case BCS: return "blt"; 614 case BCS: return "blt";
615 case VECS: return "vebox";
573 default: return ""; 616 default: return "";
574 } 617 }
575} 618}
@@ -604,73 +647,187 @@ static const char *purgeable_flag(int purgeable)
604 return purgeable ? " purgeable" : ""; 647 return purgeable ? " purgeable" : "";
605} 648}
606 649
607static void print_error_buffers(struct seq_file *m, 650static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
651{
652
653 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
654 e->err = -ENOSPC;
655 return false;
656 }
657
658 if (e->bytes == e->size - 1 || e->err)
659 return false;
660
661 return true;
662}
663
664static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
665 unsigned len)
666{
667 if (e->pos + len <= e->start) {
668 e->pos += len;
669 return false;
670 }
671
672 /* First vsnprintf needs to fit in its entirety for memmove */
673 if (len >= e->size) {
674 e->err = -EIO;
675 return false;
676 }
677
678 return true;
679}
680
681static void __i915_error_advance(struct drm_i915_error_state_buf *e,
682 unsigned len)
683{
684 /* If this is first printf in this window, adjust it so that
685 * start position matches start of the buffer
686 */
687
688 if (e->pos < e->start) {
689 const size_t off = e->start - e->pos;
690
691 /* Should not happen but be paranoid */
692 if (off > len || e->bytes) {
693 e->err = -EIO;
694 return;
695 }
696
697 memmove(e->buf, e->buf + off, len - off);
698 e->bytes = len - off;
699 e->pos = e->start;
700 return;
701 }
702
703 e->bytes += len;
704 e->pos += len;
705}
706
707static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
708 const char *f, va_list args)
709{
710 unsigned len;
711
712 if (!__i915_error_ok(e))
713 return;
714
715 /* Seek the first printf which is hits start position */
716 if (e->pos < e->start) {
717 len = vsnprintf(NULL, 0, f, args);
718 if (!__i915_error_seek(e, len))
719 return;
720 }
721
722 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
723 if (len >= e->size - e->bytes)
724 len = e->size - e->bytes - 1;
725
726 __i915_error_advance(e, len);
727}
728
729static void i915_error_puts(struct drm_i915_error_state_buf *e,
730 const char *str)
731{
732 unsigned len;
733
734 if (!__i915_error_ok(e))
735 return;
736
737 len = strlen(str);
738
739 /* Seek the first printf which is hits start position */
740 if (e->pos < e->start) {
741 if (!__i915_error_seek(e, len))
742 return;
743 }
744
745 if (len >= e->size - e->bytes)
746 len = e->size - e->bytes - 1;
747 memcpy(e->buf + e->bytes, str, len);
748
749 __i915_error_advance(e, len);
750}
751
752void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
753{
754 va_list args;
755
756 va_start(args, f);
757 i915_error_vprintf(e, f, args);
758 va_end(args);
759}
760
761#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
762#define err_puts(e, s) i915_error_puts(e, s)
763
764static void print_error_buffers(struct drm_i915_error_state_buf *m,
608 const char *name, 765 const char *name,
609 struct drm_i915_error_buffer *err, 766 struct drm_i915_error_buffer *err,
610 int count) 767 int count)
611{ 768{
612 seq_printf(m, "%s [%d]:\n", name, count); 769 err_printf(m, "%s [%d]:\n", name, count);
613 770
614 while (count--) { 771 while (count--) {
615 seq_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s", 772 err_printf(m, " %08x %8u %02x %02x %x %x",
616 err->gtt_offset, 773 err->gtt_offset,
617 err->size, 774 err->size,
618 err->read_domains, 775 err->read_domains,
619 err->write_domain, 776 err->write_domain,
620 err->rseqno, err->wseqno, 777 err->rseqno, err->wseqno);
621 pin_flag(err->pinned), 778 err_puts(m, pin_flag(err->pinned));
622 tiling_flag(err->tiling), 779 err_puts(m, tiling_flag(err->tiling));
623 dirty_flag(err->dirty), 780 err_puts(m, dirty_flag(err->dirty));
624 purgeable_flag(err->purgeable), 781 err_puts(m, purgeable_flag(err->purgeable));
625 err->ring != -1 ? " " : "", 782 err_puts(m, err->ring != -1 ? " " : "");
626 ring_str(err->ring), 783 err_puts(m, ring_str(err->ring));
627 cache_level_str(err->cache_level)); 784 err_puts(m, cache_level_str(err->cache_level));
628 785
629 if (err->name) 786 if (err->name)
630 seq_printf(m, " (name: %d)", err->name); 787 err_printf(m, " (name: %d)", err->name);
631 if (err->fence_reg != I915_FENCE_REG_NONE) 788 if (err->fence_reg != I915_FENCE_REG_NONE)
632 seq_printf(m, " (fence: %d)", err->fence_reg); 789 err_printf(m, " (fence: %d)", err->fence_reg);
633 790
634 seq_printf(m, "\n"); 791 err_puts(m, "\n");
635 err++; 792 err++;
636 } 793 }
637} 794}
638 795
639static void i915_ring_error_state(struct seq_file *m, 796static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
640 struct drm_device *dev, 797 struct drm_device *dev,
641 struct drm_i915_error_state *error, 798 struct drm_i915_error_state *error,
642 unsigned ring) 799 unsigned ring)
643{ 800{
644 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ 801 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
645 seq_printf(m, "%s command stream:\n", ring_str(ring)); 802 err_printf(m, "%s command stream:\n", ring_str(ring));
646 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 803 err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
647 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 804 err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
648 seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); 805 err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
649 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 806 err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
650 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 807 err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
651 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 808 err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
652 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 809 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
653 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) 810 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
654 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); 811 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
655 812
656 if (INTEL_INFO(dev)->gen >= 4) 813 if (INTEL_INFO(dev)->gen >= 4)
657 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 814 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
658 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 815 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
659 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 816 err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
660 if (INTEL_INFO(dev)->gen >= 6) { 817 if (INTEL_INFO(dev)->gen >= 6) {
661 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); 818 err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
662 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 819 err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
663 seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", 820 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
664 error->semaphore_mboxes[ring][0], 821 error->semaphore_mboxes[ring][0],
665 error->semaphore_seqno[ring][0]); 822 error->semaphore_seqno[ring][0]);
666 seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", 823 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
667 error->semaphore_mboxes[ring][1], 824 error->semaphore_mboxes[ring][1],
668 error->semaphore_seqno[ring][1]); 825 error->semaphore_seqno[ring][1]);
669 } 826 }
670 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 827 err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
671 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 828 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
672 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 829 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
673 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 830 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
674} 831}
675 832
676struct i915_error_state_file_priv { 833struct i915_error_state_file_priv {
@@ -678,9 +835,11 @@ struct i915_error_state_file_priv {
678 struct drm_i915_error_state *error; 835 struct drm_i915_error_state *error;
679}; 836};
680 837
681static int i915_error_state(struct seq_file *m, void *unused) 838
839static int i915_error_state(struct i915_error_state_file_priv *error_priv,
840 struct drm_i915_error_state_buf *m)
841
682{ 842{
683 struct i915_error_state_file_priv *error_priv = m->private;
684 struct drm_device *dev = error_priv->dev; 843 struct drm_device *dev = error_priv->dev;
685 drm_i915_private_t *dev_priv = dev->dev_private; 844 drm_i915_private_t *dev_priv = dev->dev_private;
686 struct drm_i915_error_state *error = error_priv->error; 845 struct drm_i915_error_state *error = error_priv->error;
@@ -688,34 +847,35 @@ static int i915_error_state(struct seq_file *m, void *unused)
688 int i, j, page, offset, elt; 847 int i, j, page, offset, elt;
689 848
690 if (!error) { 849 if (!error) {
691 seq_printf(m, "no error state collected\n"); 850 err_printf(m, "no error state collected\n");
692 return 0; 851 return 0;
693 } 852 }
694 853
695 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 854 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
696 error->time.tv_usec); 855 error->time.tv_usec);
697 seq_printf(m, "Kernel: " UTS_RELEASE "\n"); 856 err_printf(m, "Kernel: " UTS_RELEASE "\n");
698 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 857 err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
699 seq_printf(m, "EIR: 0x%08x\n", error->eir); 858 err_printf(m, "EIR: 0x%08x\n", error->eir);
700 seq_printf(m, "IER: 0x%08x\n", error->ier); 859 err_printf(m, "IER: 0x%08x\n", error->ier);
701 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 860 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
702 seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); 861 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
703 seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr); 862 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
704 seq_printf(m, "CCID: 0x%08x\n", error->ccid); 863 err_printf(m, "CCID: 0x%08x\n", error->ccid);
705 864
706 for (i = 0; i < dev_priv->num_fence_regs; i++) 865 for (i = 0; i < dev_priv->num_fence_regs; i++)
707 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 866 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
708 867
709 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) 868 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
710 seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]); 869 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
870 error->extra_instdone[i]);
711 871
712 if (INTEL_INFO(dev)->gen >= 6) { 872 if (INTEL_INFO(dev)->gen >= 6) {
713 seq_printf(m, "ERROR: 0x%08x\n", error->error); 873 err_printf(m, "ERROR: 0x%08x\n", error->error);
714 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 874 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
715 } 875 }
716 876
717 if (INTEL_INFO(dev)->gen == 7) 877 if (INTEL_INFO(dev)->gen == 7)
718 seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 878 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
719 879
720 for_each_ring(ring, dev_priv, i) 880 for_each_ring(ring, dev_priv, i)
721 i915_ring_error_state(m, dev, error, i); 881 i915_ring_error_state(m, dev, error, i);
@@ -734,24 +894,25 @@ static int i915_error_state(struct seq_file *m, void *unused)
734 struct drm_i915_error_object *obj; 894 struct drm_i915_error_object *obj;
735 895
736 if ((obj = error->ring[i].batchbuffer)) { 896 if ((obj = error->ring[i].batchbuffer)) {
737 seq_printf(m, "%s --- gtt_offset = 0x%08x\n", 897 err_printf(m, "%s --- gtt_offset = 0x%08x\n",
738 dev_priv->ring[i].name, 898 dev_priv->ring[i].name,
739 obj->gtt_offset); 899 obj->gtt_offset);
740 offset = 0; 900 offset = 0;
741 for (page = 0; page < obj->page_count; page++) { 901 for (page = 0; page < obj->page_count; page++) {
742 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 902 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
743 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); 903 err_printf(m, "%08x : %08x\n", offset,
904 obj->pages[page][elt]);
744 offset += 4; 905 offset += 4;
745 } 906 }
746 } 907 }
747 } 908 }
748 909
749 if (error->ring[i].num_requests) { 910 if (error->ring[i].num_requests) {
750 seq_printf(m, "%s --- %d requests\n", 911 err_printf(m, "%s --- %d requests\n",
751 dev_priv->ring[i].name, 912 dev_priv->ring[i].name,
752 error->ring[i].num_requests); 913 error->ring[i].num_requests);
753 for (j = 0; j < error->ring[i].num_requests; j++) { 914 for (j = 0; j < error->ring[i].num_requests; j++) {
754 seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", 915 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
755 error->ring[i].requests[j].seqno, 916 error->ring[i].requests[j].seqno,
756 error->ring[i].requests[j].jiffies, 917 error->ring[i].requests[j].jiffies,
757 error->ring[i].requests[j].tail); 918 error->ring[i].requests[j].tail);
@@ -759,13 +920,13 @@ static int i915_error_state(struct seq_file *m, void *unused)
759 } 920 }
760 921
761 if ((obj = error->ring[i].ringbuffer)) { 922 if ((obj = error->ring[i].ringbuffer)) {
762 seq_printf(m, "%s --- ringbuffer = 0x%08x\n", 923 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
763 dev_priv->ring[i].name, 924 dev_priv->ring[i].name,
764 obj->gtt_offset); 925 obj->gtt_offset);
765 offset = 0; 926 offset = 0;
766 for (page = 0; page < obj->page_count; page++) { 927 for (page = 0; page < obj->page_count; page++) {
767 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 928 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
768 seq_printf(m, "%08x : %08x\n", 929 err_printf(m, "%08x : %08x\n",
769 offset, 930 offset,
770 obj->pages[page][elt]); 931 obj->pages[page][elt]);
771 offset += 4; 932 offset += 4;
@@ -775,12 +936,12 @@ static int i915_error_state(struct seq_file *m, void *unused)
775 936
776 obj = error->ring[i].ctx; 937 obj = error->ring[i].ctx;
777 if (obj) { 938 if (obj) {
778 seq_printf(m, "%s --- HW Context = 0x%08x\n", 939 err_printf(m, "%s --- HW Context = 0x%08x\n",
779 dev_priv->ring[i].name, 940 dev_priv->ring[i].name,
780 obj->gtt_offset); 941 obj->gtt_offset);
781 offset = 0; 942 offset = 0;
782 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { 943 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
783 seq_printf(m, "[%04x] %08x %08x %08x %08x\n", 944 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
784 offset, 945 offset,
785 obj->pages[0][elt], 946 obj->pages[0][elt],
786 obj->pages[0][elt+1], 947 obj->pages[0][elt+1],
@@ -806,8 +967,7 @@ i915_error_state_write(struct file *filp,
806 size_t cnt, 967 size_t cnt,
807 loff_t *ppos) 968 loff_t *ppos)
808{ 969{
809 struct seq_file *m = filp->private_data; 970 struct i915_error_state_file_priv *error_priv = filp->private_data;
810 struct i915_error_state_file_priv *error_priv = m->private;
811 struct drm_device *dev = error_priv->dev; 971 struct drm_device *dev = error_priv->dev;
812 int ret; 972 int ret;
813 973
@@ -842,25 +1002,81 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
842 kref_get(&error_priv->error->ref); 1002 kref_get(&error_priv->error->ref);
843 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1003 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
844 1004
845 return single_open(file, i915_error_state, error_priv); 1005 file->private_data = error_priv;
1006
1007 return 0;
846} 1008}
847 1009
848static int i915_error_state_release(struct inode *inode, struct file *file) 1010static int i915_error_state_release(struct inode *inode, struct file *file)
849{ 1011{
850 struct seq_file *m = file->private_data; 1012 struct i915_error_state_file_priv *error_priv = file->private_data;
851 struct i915_error_state_file_priv *error_priv = m->private;
852 1013
853 if (error_priv->error) 1014 if (error_priv->error)
854 kref_put(&error_priv->error->ref, i915_error_state_free); 1015 kref_put(&error_priv->error->ref, i915_error_state_free);
855 kfree(error_priv); 1016 kfree(error_priv);
856 1017
857 return single_release(inode, file); 1018 return 0;
1019}
1020
1021static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
1022 size_t count, loff_t *pos)
1023{
1024 struct i915_error_state_file_priv *error_priv = file->private_data;
1025 struct drm_i915_error_state_buf error_str;
1026 loff_t tmp_pos = 0;
1027 ssize_t ret_count = 0;
1028 int ret = 0;
1029
1030 memset(&error_str, 0, sizeof(error_str));
1031
1032 /* We need to have enough room to store any i915_error_state printf
1033 * so that we can move it to start position.
1034 */
1035 error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
1036 error_str.buf = kmalloc(error_str.size,
1037 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
1038
1039 if (error_str.buf == NULL) {
1040 error_str.size = PAGE_SIZE;
1041 error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
1042 }
1043
1044 if (error_str.buf == NULL) {
1045 error_str.size = 128;
1046 error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
1047 }
1048
1049 if (error_str.buf == NULL)
1050 return -ENOMEM;
1051
1052 error_str.start = *pos;
1053
1054 ret = i915_error_state(error_priv, &error_str);
1055 if (ret)
1056 goto out;
1057
1058 if (error_str.bytes == 0 && error_str.err) {
1059 ret = error_str.err;
1060 goto out;
1061 }
1062
1063 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
1064 error_str.buf,
1065 error_str.bytes);
1066
1067 if (ret_count < 0)
1068 ret = ret_count;
1069 else
1070 *pos = error_str.start + ret_count;
1071out:
1072 kfree(error_str.buf);
1073 return ret ?: ret_count;
858} 1074}
859 1075
860static const struct file_operations i915_error_state_fops = { 1076static const struct file_operations i915_error_state_fops = {
861 .owner = THIS_MODULE, 1077 .owner = THIS_MODULE,
862 .open = i915_error_state_open, 1078 .open = i915_error_state_open,
863 .read = seq_read, 1079 .read = i915_error_state_read,
864 .write = i915_error_state_write, 1080 .write = i915_error_state_write,
865 .llseek = default_llseek, 1081 .llseek = default_llseek,
866 .release = i915_error_state_release, 1082 .release = i915_error_state_release,
@@ -941,7 +1157,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
941 MEMSTAT_VID_SHIFT); 1157 MEMSTAT_VID_SHIFT);
942 seq_printf(m, "Current P-state: %d\n", 1158 seq_printf(m, "Current P-state: %d\n",
943 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1159 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
944 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 1160 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
945 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1161 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
946 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1162 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
947 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1163 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1009,6 +1225,26 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
1009 1225
1010 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1226 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1011 dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER); 1227 dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
1228 } else if (IS_VALLEYVIEW(dev)) {
1229 u32 freq_sts, val;
1230
1231 mutex_lock(&dev_priv->rps.hw_lock);
1232 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1233 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1234 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1235
1236 val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1);
1237 seq_printf(m, "max GPU freq: %d MHz\n",
1238 vlv_gpu_freq(dev_priv->mem_freq, val));
1239
1240 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM);
1241 seq_printf(m, "min GPU freq: %d MHz\n",
1242 vlv_gpu_freq(dev_priv->mem_freq, val));
1243
1244 seq_printf(m, "current GPU freq: %d MHz\n",
1245 vlv_gpu_freq(dev_priv->mem_freq,
1246 (freq_sts >> 8) & 0xff));
1247 mutex_unlock(&dev_priv->rps.hw_lock);
1012 } else { 1248 } else {
1013 seq_printf(m, "no P-state info available\n"); 1249 seq_printf(m, "no P-state info available\n");
1014 } 1250 }
@@ -1290,6 +1526,25 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1290 return 0; 1526 return 0;
1291} 1527}
1292 1528
1529static int i915_ips_status(struct seq_file *m, void *unused)
1530{
1531 struct drm_info_node *node = (struct drm_info_node *) m->private;
1532 struct drm_device *dev = node->minor->dev;
1533 struct drm_i915_private *dev_priv = dev->dev_private;
1534
1535 if (!HAS_IPS(dev)) {
1536 seq_puts(m, "not supported\n");
1537 return 0;
1538 }
1539
1540 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1541 seq_puts(m, "enabled\n");
1542 else
1543 seq_puts(m, "disabled\n");
1544
1545 return 0;
1546}
1547
1293static int i915_sr_status(struct seq_file *m, void *unused) 1548static int i915_sr_status(struct seq_file *m, void *unused)
1294{ 1549{
1295 struct drm_info_node *node = (struct drm_info_node *) m->private; 1550 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1642,27 +1897,27 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1642 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1897 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1643 1898
1644 seq_printf(m, "DPIO_DIV_A: 0x%08x\n", 1899 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1645 intel_dpio_read(dev_priv, _DPIO_DIV_A)); 1900 vlv_dpio_read(dev_priv, _DPIO_DIV_A));
1646 seq_printf(m, "DPIO_DIV_B: 0x%08x\n", 1901 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1647 intel_dpio_read(dev_priv, _DPIO_DIV_B)); 1902 vlv_dpio_read(dev_priv, _DPIO_DIV_B));
1648 1903
1649 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", 1904 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1650 intel_dpio_read(dev_priv, _DPIO_REFSFR_A)); 1905 vlv_dpio_read(dev_priv, _DPIO_REFSFR_A));
1651 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", 1906 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1652 intel_dpio_read(dev_priv, _DPIO_REFSFR_B)); 1907 vlv_dpio_read(dev_priv, _DPIO_REFSFR_B));
1653 1908
1654 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", 1909 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1655 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); 1910 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
1656 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1911 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1657 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); 1912 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
1658 1913
1659 seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n", 1914 seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
1660 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A)); 1915 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A));
1661 seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n", 1916 seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
1662 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B)); 1917 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B));
1663 1918
1664 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1919 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1665 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1920 vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
1666 1921
1667 mutex_unlock(&dev_priv->dpio_lock); 1922 mutex_unlock(&dev_priv->dpio_lock);
1668 1923
@@ -1780,7 +2035,8 @@ i915_drop_caches_set(void *data, u64 val)
1780 } 2035 }
1781 2036
1782 if (val & DROP_UNBOUND) { 2037 if (val & DROP_UNBOUND) {
1783 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) 2038 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
2039 global_list)
1784 if (obj->pages_pin_count == 0) { 2040 if (obj->pages_pin_count == 0) {
1785 ret = i915_gem_object_put_pages(obj); 2041 ret = i915_gem_object_put_pages(obj);
1786 if (ret) 2042 if (ret)
@@ -1812,7 +2068,11 @@ i915_max_freq_get(void *data, u64 *val)
1812 if (ret) 2068 if (ret)
1813 return ret; 2069 return ret;
1814 2070
1815 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 2071 if (IS_VALLEYVIEW(dev))
2072 *val = vlv_gpu_freq(dev_priv->mem_freq,
2073 dev_priv->rps.max_delay);
2074 else
2075 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
1816 mutex_unlock(&dev_priv->rps.hw_lock); 2076 mutex_unlock(&dev_priv->rps.hw_lock);
1817 2077
1818 return 0; 2078 return 0;
@@ -1837,9 +2097,16 @@ i915_max_freq_set(void *data, u64 val)
1837 /* 2097 /*
1838 * Turbo will still be enabled, but won't go above the set value. 2098 * Turbo will still be enabled, but won't go above the set value.
1839 */ 2099 */
1840 do_div(val, GT_FREQUENCY_MULTIPLIER); 2100 if (IS_VALLEYVIEW(dev)) {
1841 dev_priv->rps.max_delay = val; 2101 val = vlv_freq_opcode(dev_priv->mem_freq, val);
1842 gen6_set_rps(dev, val); 2102 dev_priv->rps.max_delay = val;
2103 gen6_set_rps(dev, val);
2104 } else {
2105 do_div(val, GT_FREQUENCY_MULTIPLIER);
2106 dev_priv->rps.max_delay = val;
2107 gen6_set_rps(dev, val);
2108 }
2109
1843 mutex_unlock(&dev_priv->rps.hw_lock); 2110 mutex_unlock(&dev_priv->rps.hw_lock);
1844 2111
1845 return 0; 2112 return 0;
@@ -1863,7 +2130,11 @@ i915_min_freq_get(void *data, u64 *val)
1863 if (ret) 2130 if (ret)
1864 return ret; 2131 return ret;
1865 2132
1866 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 2133 if (IS_VALLEYVIEW(dev))
2134 *val = vlv_gpu_freq(dev_priv->mem_freq,
2135 dev_priv->rps.min_delay);
2136 else
2137 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
1867 mutex_unlock(&dev_priv->rps.hw_lock); 2138 mutex_unlock(&dev_priv->rps.hw_lock);
1868 2139
1869 return 0; 2140 return 0;
@@ -1888,9 +2159,15 @@ i915_min_freq_set(void *data, u64 val)
1888 /* 2159 /*
1889 * Turbo will still be enabled, but won't go below the set value. 2160 * Turbo will still be enabled, but won't go below the set value.
1890 */ 2161 */
1891 do_div(val, GT_FREQUENCY_MULTIPLIER); 2162 if (IS_VALLEYVIEW(dev)) {
1892 dev_priv->rps.min_delay = val; 2163 val = vlv_freq_opcode(dev_priv->mem_freq, val);
1893 gen6_set_rps(dev, val); 2164 dev_priv->rps.min_delay = val;
2165 valleyview_set_rps(dev, val);
2166 } else {
2167 do_div(val, GT_FREQUENCY_MULTIPLIER);
2168 dev_priv->rps.min_delay = val;
2169 gen6_set_rps(dev, val);
2170 }
1894 mutex_unlock(&dev_priv->rps.hw_lock); 2171 mutex_unlock(&dev_priv->rps.hw_lock);
1895 2172
1896 return 0; 2173 return 0;
@@ -2057,6 +2334,7 @@ static struct drm_info_list i915_debugfs_list[] = {
2057 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 2334 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
2058 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 2335 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
2059 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 2336 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
2337 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
2060 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 2338 {"i915_rstdby_delays", i915_rstdby_delays, 0},
2061 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 2339 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
2062 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 2340 {"i915_delayfreq_table", i915_delayfreq_table, 0},
@@ -2066,6 +2344,7 @@ static struct drm_info_list i915_debugfs_list[] = {
2066 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 2344 {"i915_ring_freq_table", i915_ring_freq_table, 0},
2067 {"i915_gfxec", i915_gfxec, 0}, 2345 {"i915_gfxec", i915_gfxec, 0},
2068 {"i915_fbc_status", i915_fbc_status, 0}, 2346 {"i915_fbc_status", i915_fbc_status, 0},
2347 {"i915_ips_status", i915_ips_status, 0},
2069 {"i915_sr_status", i915_sr_status, 0}, 2348 {"i915_sr_status", i915_sr_status, 0},
2070 {"i915_opregion", i915_opregion, 0}, 2349 {"i915_opregion", i915_opregion, 0},
2071 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 2350 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 3b315ba85a3e..adb319b53ecd 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -42,7 +42,6 @@
42#include <linux/vga_switcheroo.h> 42#include <linux/vga_switcheroo.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <acpi/video.h> 44#include <acpi/video.h>
45#include <asm/pat.h>
46 45
47#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) 46#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
48 47
@@ -956,6 +955,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
956 case I915_PARAM_HAS_BLT: 955 case I915_PARAM_HAS_BLT:
957 value = intel_ring_initialized(&dev_priv->ring[BCS]); 956 value = intel_ring_initialized(&dev_priv->ring[BCS]);
958 break; 957 break;
958 case I915_PARAM_HAS_VEBOX:
959 value = intel_ring_initialized(&dev_priv->ring[VECS]);
960 break;
959 case I915_PARAM_HAS_RELAXED_FENCING: 961 case I915_PARAM_HAS_RELAXED_FENCING:
960 value = 1; 962 value = 1;
961 break; 963 break;
@@ -999,8 +1001,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
999 value = 1; 1001 value = 1;
1000 break; 1002 break;
1001 default: 1003 default:
1002 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 1004 DRM_DEBUG("Unknown parameter %d\n", param->param);
1003 param->param);
1004 return -EINVAL; 1005 return -EINVAL;
1005 } 1006 }
1006 1007
@@ -1359,8 +1360,10 @@ static int i915_load_modeset_init(struct drm_device *dev)
1359cleanup_gem: 1360cleanup_gem:
1360 mutex_lock(&dev->struct_mutex); 1361 mutex_lock(&dev->struct_mutex);
1361 i915_gem_cleanup_ringbuffer(dev); 1362 i915_gem_cleanup_ringbuffer(dev);
1363 i915_gem_context_fini(dev);
1362 mutex_unlock(&dev->struct_mutex); 1364 mutex_unlock(&dev->struct_mutex);
1363 i915_gem_cleanup_aliasing_ppgtt(dev); 1365 i915_gem_cleanup_aliasing_ppgtt(dev);
1366 drm_mm_takedown(&dev_priv->mm.gtt_space);
1364cleanup_irq: 1367cleanup_irq:
1365 drm_irq_uninstall(dev); 1368 drm_irq_uninstall(dev);
1366cleanup_gem_stolen: 1369cleanup_gem_stolen:
@@ -1397,29 +1400,6 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1397 master->driver_priv = NULL; 1400 master->driver_priv = NULL;
1398} 1401}
1399 1402
1400static void
1401i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
1402 unsigned long size)
1403{
1404 dev_priv->mm.gtt_mtrr = -1;
1405
1406#if defined(CONFIG_X86_PAT)
1407 if (cpu_has_pat)
1408 return;
1409#endif
1410
1411 /* Set up a WC MTRR for non-PAT systems. This is more common than
1412 * one would think, because the kernel disables PAT on first
1413 * generation Core chips because WC PAT gets overridden by a UC
1414 * MTRR if present. Even if a UC MTRR isn't present.
1415 */
1416 dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1);
1417 if (dev_priv->mm.gtt_mtrr < 0) {
1418 DRM_INFO("MTRR allocation failed. Graphics "
1419 "performance may suffer.\n");
1420 }
1421}
1422
1423static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 1403static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1424{ 1404{
1425 struct apertures_struct *ap; 1405 struct apertures_struct *ap;
@@ -1431,7 +1411,7 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1431 return; 1411 return;
1432 1412
1433 ap->ranges[0].base = dev_priv->gtt.mappable_base; 1413 ap->ranges[0].base = dev_priv->gtt.mappable_base;
1434 ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start; 1414 ap->ranges[0].size = dev_priv->gtt.mappable_end;
1435 1415
1436 primary = 1416 primary =
1437 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 1417 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
@@ -1445,15 +1425,19 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1445{ 1425{
1446 const struct intel_device_info *info = dev_priv->info; 1426 const struct intel_device_info *info = dev_priv->info;
1447 1427
1448#define DEV_INFO_FLAG(name) info->name ? #name "," : "" 1428#define PRINT_S(name) "%s"
1449#define DEV_INFO_SEP , 1429#define SEP_EMPTY
1430#define PRINT_FLAG(name) info->name ? #name "," : ""
1431#define SEP_COMMA ,
1450 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" 1432 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
1451 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 1433 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
1452 info->gen, 1434 info->gen,
1453 dev_priv->dev->pdev->device, 1435 dev_priv->dev->pdev->device,
1454 DEV_INFO_FLAGS); 1436 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
1455#undef DEV_INFO_FLAG 1437#undef PRINT_S
1456#undef DEV_INFO_SEP 1438#undef SEP_EMPTY
1439#undef PRINT_FLAG
1440#undef SEP_COMMA
1457} 1441}
1458 1442
1459/** 1443/**
@@ -1468,7 +1452,7 @@ static void intel_early_sanitize_regs(struct drm_device *dev)
1468{ 1452{
1469 struct drm_i915_private *dev_priv = dev->dev_private; 1453 struct drm_i915_private *dev_priv = dev->dev_private;
1470 1454
1471 if (IS_HASWELL(dev)) 1455 if (HAS_FPGA_DBG_UNCLAIMED(dev))
1472 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1456 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1473} 1457}
1474 1458
@@ -1574,8 +1558,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1574 goto out_rmmap; 1558 goto out_rmmap;
1575 } 1559 }
1576 1560
1577 i915_mtrr_setup(dev_priv, dev_priv->gtt.mappable_base, 1561 dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
1578 aperture_size); 1562 aperture_size);
1579 1563
1580 /* The i915 workqueue is primarily used for batched retirement of 1564 /* The i915 workqueue is primarily used for batched retirement of
1581 * requests (and thus managing bo) once the task has been completed 1565 * requests (and thus managing bo) once the task has been completed
@@ -1629,6 +1613,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1629 spin_lock_init(&dev_priv->irq_lock); 1613 spin_lock_init(&dev_priv->irq_lock);
1630 spin_lock_init(&dev_priv->gpu_error.lock); 1614 spin_lock_init(&dev_priv->gpu_error.lock);
1631 spin_lock_init(&dev_priv->rps.lock); 1615 spin_lock_init(&dev_priv->rps.lock);
1616 spin_lock_init(&dev_priv->backlight.lock);
1632 mutex_init(&dev_priv->dpio_lock); 1617 mutex_init(&dev_priv->dpio_lock);
1633 1618
1634 mutex_init(&dev_priv->rps.hw_lock); 1619 mutex_init(&dev_priv->rps.hw_lock);
@@ -1647,6 +1632,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1647 /* Start out suspended */ 1632 /* Start out suspended */
1648 dev_priv->mm.suspended = 1; 1633 dev_priv->mm.suspended = 1;
1649 1634
1635 if (HAS_POWER_WELL(dev))
1636 i915_init_power_well(dev);
1637
1650 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1638 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1651 ret = i915_load_modeset_init(dev); 1639 ret = i915_load_modeset_init(dev);
1652 if (ret < 0) { 1640 if (ret < 0) {
@@ -1679,12 +1667,7 @@ out_gem_unload:
1679 intel_teardown_mchbar(dev); 1667 intel_teardown_mchbar(dev);
1680 destroy_workqueue(dev_priv->wq); 1668 destroy_workqueue(dev_priv->wq);
1681out_mtrrfree: 1669out_mtrrfree:
1682 if (dev_priv->mm.gtt_mtrr >= 0) { 1670 arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
1683 mtrr_del(dev_priv->mm.gtt_mtrr,
1684 dev_priv->gtt.mappable_base,
1685 aperture_size);
1686 dev_priv->mm.gtt_mtrr = -1;
1687 }
1688 io_mapping_free(dev_priv->gtt.mappable); 1671 io_mapping_free(dev_priv->gtt.mappable);
1689 dev_priv->gtt.gtt_remove(dev); 1672 dev_priv->gtt.gtt_remove(dev);
1690out_rmmap: 1673out_rmmap:
@@ -1703,6 +1686,9 @@ int i915_driver_unload(struct drm_device *dev)
1703 1686
1704 intel_gpu_ips_teardown(); 1687 intel_gpu_ips_teardown();
1705 1688
1689 if (HAS_POWER_WELL(dev))
1690 i915_remove_power_well(dev);
1691
1706 i915_teardown_sysfs(dev); 1692 i915_teardown_sysfs(dev);
1707 1693
1708 if (dev_priv->mm.inactive_shrinker.shrink) 1694 if (dev_priv->mm.inactive_shrinker.shrink)
@@ -1719,12 +1705,7 @@ int i915_driver_unload(struct drm_device *dev)
1719 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 1705 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
1720 1706
1721 io_mapping_free(dev_priv->gtt.mappable); 1707 io_mapping_free(dev_priv->gtt.mappable);
1722 if (dev_priv->mm.gtt_mtrr >= 0) { 1708 arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
1723 mtrr_del(dev_priv->mm.gtt_mtrr,
1724 dev_priv->gtt.mappable_base,
1725 dev_priv->gtt.mappable_end);
1726 dev_priv->mm.gtt_mtrr = -1;
1727 }
1728 1709
1729 acpi_video_unregister(); 1710 acpi_video_unregister();
1730 1711
@@ -1737,10 +1718,10 @@ int i915_driver_unload(struct drm_device *dev)
1737 * free the memory space allocated for the child device 1718 * free the memory space allocated for the child device
1738 * config parsed from VBT 1719 * config parsed from VBT
1739 */ 1720 */
1740 if (dev_priv->child_dev && dev_priv->child_dev_num) { 1721 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1741 kfree(dev_priv->child_dev); 1722 kfree(dev_priv->vbt.child_dev);
1742 dev_priv->child_dev = NULL; 1723 dev_priv->vbt.child_dev = NULL;
1743 dev_priv->child_dev_num = 0; 1724 dev_priv->vbt.child_dev_num = 0;
1744 } 1725 }
1745 1726
1746 vga_switcheroo_unregister_client(dev->pdev); 1727 vga_switcheroo_unregister_client(dev->pdev);
@@ -1773,6 +1754,7 @@ int i915_driver_unload(struct drm_device *dev)
1773 i915_free_hws(dev); 1754 i915_free_hws(dev);
1774 } 1755 }
1775 1756
1757 drm_mm_takedown(&dev_priv->mm.gtt_space);
1776 if (dev_priv->regs != NULL) 1758 if (dev_priv->regs != NULL)
1777 pci_iounmap(dev->pdev, dev_priv->regs); 1759 pci_iounmap(dev->pdev, dev_priv->regs);
1778 1760
@@ -1782,6 +1764,8 @@ int i915_driver_unload(struct drm_device *dev)
1782 destroy_workqueue(dev_priv->wq); 1764 destroy_workqueue(dev_priv->wq);
1783 pm_qos_remove_request(&dev_priv->pm_qos); 1765 pm_qos_remove_request(&dev_priv->pm_qos);
1784 1766
1767 dev_priv->gtt.gtt_remove(dev);
1768
1785 if (dev_priv->slab) 1769 if (dev_priv->slab)
1786 kmem_cache_destroy(dev_priv->slab); 1770 kmem_cache_destroy(dev_priv->slab);
1787 1771
@@ -1796,7 +1780,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1796 struct drm_i915_file_private *file_priv; 1780 struct drm_i915_file_private *file_priv;
1797 1781
1798 DRM_DEBUG_DRIVER("\n"); 1782 DRM_DEBUG_DRIVER("\n");
1799 file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); 1783 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1800 if (!file_priv) 1784 if (!file_priv)
1801 return -ENOMEM; 1785 return -ENOMEM;
1802 1786
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a2e4953b8e8d..062cbda1bf4a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -128,6 +128,10 @@ module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
128MODULE_PARM_DESC(disable_power_well, 128MODULE_PARM_DESC(disable_power_well,
129 "Disable the power well when possible (default: false)"); 129 "Disable the power well when possible (default: false)");
130 130
131int i915_enable_ips __read_mostly = 1;
132module_param_named(enable_ips, i915_enable_ips, int, 0600);
133MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
134
131static struct drm_driver driver; 135static struct drm_driver driver;
132extern int intel_agp_enabled; 136extern int intel_agp_enabled;
133 137
@@ -280,6 +284,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
280 GEN7_FEATURES, 284 GEN7_FEATURES,
281 .is_ivybridge = 1, 285 .is_ivybridge = 1,
282 .is_mobile = 1, 286 .is_mobile = 1,
287 .has_fbc = 1,
283}; 288};
284 289
285static const struct intel_device_info intel_ivybridge_q_info = { 290static const struct intel_device_info intel_ivybridge_q_info = {
@@ -308,12 +313,19 @@ static const struct intel_device_info intel_valleyview_d_info = {
308static const struct intel_device_info intel_haswell_d_info = { 313static const struct intel_device_info intel_haswell_d_info = {
309 GEN7_FEATURES, 314 GEN7_FEATURES,
310 .is_haswell = 1, 315 .is_haswell = 1,
316 .has_ddi = 1,
317 .has_fpga_dbg = 1,
318 .has_vebox_ring = 1,
311}; 319};
312 320
313static const struct intel_device_info intel_haswell_m_info = { 321static const struct intel_device_info intel_haswell_m_info = {
314 GEN7_FEATURES, 322 GEN7_FEATURES,
315 .is_haswell = 1, 323 .is_haswell = 1,
316 .is_mobile = 1, 324 .is_mobile = 1,
325 .has_ddi = 1,
326 .has_fpga_dbg = 1,
327 .has_fbc = 1,
328 .has_vebox_ring = 1,
317}; 329};
318 330
319static const struct pci_device_id pciidlist[] = { /* aka */ 331static const struct pci_device_id pciidlist[] = { /* aka */
@@ -445,7 +457,6 @@ void intel_detect_pch(struct drm_device *dev)
445 */ 457 */
446 if (INTEL_INFO(dev)->num_pipes == 0) { 458 if (INTEL_INFO(dev)->num_pipes == 0) {
447 dev_priv->pch_type = PCH_NOP; 459 dev_priv->pch_type = PCH_NOP;
448 dev_priv->num_pch_pll = 0;
449 return; 460 return;
450 } 461 }
451 462
@@ -454,9 +465,15 @@ void intel_detect_pch(struct drm_device *dev)
454 * make graphics device passthrough work easy for VMM, that only 465 * make graphics device passthrough work easy for VMM, that only
455 * need to expose ISA bridge to let driver know the real hardware 466 * need to expose ISA bridge to let driver know the real hardware
456 * underneath. This is a requirement from virtualization team. 467 * underneath. This is a requirement from virtualization team.
468 *
469 * In some virtualized environments (e.g. XEN), there is irrelevant
470 * ISA bridge in the system. To work reliably, we should scan trhough
471 * all the ISA bridge devices and check for the first match, instead
472 * of only checking the first one.
457 */ 473 */
458 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); 474 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
459 if (pch) { 475 while (pch) {
476 struct pci_dev *curr = pch;
460 if (pch->vendor == PCI_VENDOR_ID_INTEL) { 477 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
461 unsigned short id; 478 unsigned short id;
462 id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 479 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
@@ -464,37 +481,39 @@ void intel_detect_pch(struct drm_device *dev)
464 481
465 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 482 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
466 dev_priv->pch_type = PCH_IBX; 483 dev_priv->pch_type = PCH_IBX;
467 dev_priv->num_pch_pll = 2;
468 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 484 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
469 WARN_ON(!IS_GEN5(dev)); 485 WARN_ON(!IS_GEN5(dev));
470 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 486 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
471 dev_priv->pch_type = PCH_CPT; 487 dev_priv->pch_type = PCH_CPT;
472 dev_priv->num_pch_pll = 2;
473 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 488 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
474 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 489 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
475 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 490 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
476 /* PantherPoint is CPT compatible */ 491 /* PantherPoint is CPT compatible */
477 dev_priv->pch_type = PCH_CPT; 492 dev_priv->pch_type = PCH_CPT;
478 dev_priv->num_pch_pll = 2;
479 DRM_DEBUG_KMS("Found PatherPoint PCH\n"); 493 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
480 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 494 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
481 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 495 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
482 dev_priv->pch_type = PCH_LPT; 496 dev_priv->pch_type = PCH_LPT;
483 dev_priv->num_pch_pll = 0;
484 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 497 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
485 WARN_ON(!IS_HASWELL(dev)); 498 WARN_ON(!IS_HASWELL(dev));
486 WARN_ON(IS_ULT(dev)); 499 WARN_ON(IS_ULT(dev));
487 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 500 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
488 dev_priv->pch_type = PCH_LPT; 501 dev_priv->pch_type = PCH_LPT;
489 dev_priv->num_pch_pll = 0;
490 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 502 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
491 WARN_ON(!IS_HASWELL(dev)); 503 WARN_ON(!IS_HASWELL(dev));
492 WARN_ON(!IS_ULT(dev)); 504 WARN_ON(!IS_ULT(dev));
505 } else {
506 goto check_next;
493 } 507 }
494 BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); 508 pci_dev_put(pch);
509 break;
495 } 510 }
496 pci_dev_put(pch); 511check_next:
512 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
513 pci_dev_put(curr);
497 } 514 }
515 if (!pch)
516 DRM_DEBUG_KMS("No PCH found?\n");
498} 517}
499 518
500bool i915_semaphore_is_enabled(struct drm_device *dev) 519bool i915_semaphore_is_enabled(struct drm_device *dev)
@@ -549,6 +568,8 @@ static int i915_drm_freeze(struct drm_device *dev)
549 */ 568 */
550 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 569 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
551 dev_priv->display.crtc_disable(crtc); 570 dev_priv->display.crtc_disable(crtc);
571
572 intel_modeset_suspend_hw(dev);
552 } 573 }
553 574
554 i915_save_state(dev); 575 i915_save_state(dev);
@@ -556,7 +577,7 @@ static int i915_drm_freeze(struct drm_device *dev)
556 intel_opregion_fini(dev); 577 intel_opregion_fini(dev);
557 578
558 console_lock(); 579 console_lock();
559 intel_fbdev_set_suspend(dev, 1); 580 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
560 console_unlock(); 581 console_unlock();
561 582
562 return 0; 583 return 0;
@@ -600,7 +621,7 @@ void intel_console_resume(struct work_struct *work)
600 struct drm_device *dev = dev_priv->dev; 621 struct drm_device *dev = dev_priv->dev;
601 622
602 console_lock(); 623 console_lock();
603 intel_fbdev_set_suspend(dev, 0); 624 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
604 console_unlock(); 625 console_unlock();
605} 626}
606 627
@@ -669,7 +690,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
669 * path of resume if possible. 690 * path of resume if possible.
670 */ 691 */
671 if (console_trylock()) { 692 if (console_trylock()) {
672 intel_fbdev_set_suspend(dev, 0); 693 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
673 console_unlock(); 694 console_unlock();
674 } else { 695 } else {
675 schedule_work(&dev_priv->console_resume_work); 696 schedule_work(&dev_priv->console_resume_work);
@@ -855,37 +876,14 @@ static int gen6_do_reset(struct drm_device *dev)
855 876
856int intel_gpu_reset(struct drm_device *dev) 877int intel_gpu_reset(struct drm_device *dev)
857{ 878{
858 struct drm_i915_private *dev_priv = dev->dev_private;
859 int ret = -ENODEV;
860
861 switch (INTEL_INFO(dev)->gen) { 879 switch (INTEL_INFO(dev)->gen) {
862 case 7: 880 case 7:
863 case 6: 881 case 6: return gen6_do_reset(dev);
864 ret = gen6_do_reset(dev); 882 case 5: return ironlake_do_reset(dev);
865 break; 883 case 4: return i965_do_reset(dev);
866 case 5: 884 case 2: return i8xx_do_reset(dev);
867 ret = ironlake_do_reset(dev); 885 default: return -ENODEV;
868 break;
869 case 4:
870 ret = i965_do_reset(dev);
871 break;
872 case 2:
873 ret = i8xx_do_reset(dev);
874 break;
875 }
876
877 /* Also reset the gpu hangman. */
878 if (dev_priv->gpu_error.stop_rings) {
879 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
880 dev_priv->gpu_error.stop_rings = 0;
881 if (ret == -ENODEV) {
882 DRM_ERROR("Reset not implemented, but ignoring "
883 "error for simulated gpu hangs\n");
884 ret = 0;
885 }
886 } 886 }
887
888 return ret;
889} 887}
890 888
891/** 889/**
@@ -906,6 +904,7 @@ int intel_gpu_reset(struct drm_device *dev)
906int i915_reset(struct drm_device *dev) 904int i915_reset(struct drm_device *dev)
907{ 905{
908 drm_i915_private_t *dev_priv = dev->dev_private; 906 drm_i915_private_t *dev_priv = dev->dev_private;
907 bool simulated;
909 int ret; 908 int ret;
910 909
911 if (!i915_try_reset) 910 if (!i915_try_reset)
@@ -915,13 +914,26 @@ int i915_reset(struct drm_device *dev)
915 914
916 i915_gem_reset(dev); 915 i915_gem_reset(dev);
917 916
918 ret = -ENODEV; 917 simulated = dev_priv->gpu_error.stop_rings != 0;
919 if (get_seconds() - dev_priv->gpu_error.last_reset < 5) 918
919 if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
920 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 920 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
921 else 921 ret = -ENODEV;
922 } else {
922 ret = intel_gpu_reset(dev); 923 ret = intel_gpu_reset(dev);
923 924
924 dev_priv->gpu_error.last_reset = get_seconds(); 925 /* Also reset the gpu hangman. */
926 if (simulated) {
927 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
928 dev_priv->gpu_error.stop_rings = 0;
929 if (ret == -ENODEV) {
930 DRM_ERROR("Reset not implemented, but ignoring "
931 "error for simulated gpu hangs\n");
932 ret = 0;
933 }
934 } else
935 dev_priv->gpu_error.last_reset = get_seconds();
936 }
925 if (ret) { 937 if (ret) {
926 DRM_ERROR("Failed to reset chip.\n"); 938 DRM_ERROR("Failed to reset chip.\n");
927 mutex_unlock(&dev->struct_mutex); 939 mutex_unlock(&dev->struct_mutex);
@@ -984,12 +996,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
984 struct intel_device_info *intel_info = 996 struct intel_device_info *intel_info =
985 (struct intel_device_info *) ent->driver_data; 997 (struct intel_device_info *) ent->driver_data;
986 998
987 if (intel_info->is_valleyview)
988 if(!i915_preliminary_hw_support) {
989 DRM_ERROR("Preliminary hardware support disabled\n");
990 return -ENODEV;
991 }
992
993 /* Only bind to function 0 of the device. Early generations 999 /* Only bind to function 0 of the device. Early generations
994 * used function 1 as a placeholder for multi-head. This causes 1000 * used function 1 as a placeholder for multi-head. This causes
995 * us confusion instead, especially on the systems where both 1001 * us confusion instead, especially on the systems where both
@@ -1218,16 +1224,16 @@ MODULE_LICENSE("GPL and additional rights");
1218static void 1224static void
1219ilk_dummy_write(struct drm_i915_private *dev_priv) 1225ilk_dummy_write(struct drm_i915_private *dev_priv)
1220{ 1226{
1221 /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the 1227 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1222 * chip from rc6 before touching it for real. MI_MODE is masked, hence 1228 * the chip from rc6 before touching it for real. MI_MODE is masked,
1223 * harmless to write 0 into. */ 1229 * hence harmless to write 0 into. */
1224 I915_WRITE_NOTRACE(MI_MODE, 0); 1230 I915_WRITE_NOTRACE(MI_MODE, 0);
1225} 1231}
1226 1232
1227static void 1233static void
1228hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) 1234hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
1229{ 1235{
1230 if (IS_HASWELL(dev_priv->dev) && 1236 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
1231 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1237 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1232 DRM_ERROR("Unknown unclaimed register before writing to %x\n", 1238 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
1233 reg); 1239 reg);
@@ -1238,7 +1244,7 @@ hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
1238static void 1244static void
1239hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) 1245hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
1240{ 1246{
1241 if (IS_HASWELL(dev_priv->dev) && 1247 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
1242 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1248 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1243 DRM_ERROR("Unclaimed write to %x\n", reg); 1249 DRM_ERROR("Unclaimed write to %x\n", reg);
1244 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1250 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b9d00dcf9a2d..cc1d6056ab70 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -76,6 +76,8 @@ enum plane {
76}; 76};
77#define plane_name(p) ((p) + 'A') 77#define plane_name(p) ((p) + 'A')
78 78
79#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
80
79enum port { 81enum port {
80 PORT_A = 0, 82 PORT_A = 0,
81 PORT_B, 83 PORT_B,
@@ -86,6 +88,24 @@ enum port {
86}; 88};
87#define port_name(p) ((p) + 'A') 89#define port_name(p) ((p) + 'A')
88 90
91enum intel_display_power_domain {
92 POWER_DOMAIN_PIPE_A,
93 POWER_DOMAIN_PIPE_B,
94 POWER_DOMAIN_PIPE_C,
95 POWER_DOMAIN_PIPE_A_PANEL_FITTER,
96 POWER_DOMAIN_PIPE_B_PANEL_FITTER,
97 POWER_DOMAIN_PIPE_C_PANEL_FITTER,
98 POWER_DOMAIN_TRANSCODER_A,
99 POWER_DOMAIN_TRANSCODER_B,
100 POWER_DOMAIN_TRANSCODER_C,
101 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
102};
103
104#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
105#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
106 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
107#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
108
89enum hpd_pin { 109enum hpd_pin {
90 HPD_NONE = 0, 110 HPD_NONE = 0,
91 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ 111 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
@@ -112,15 +132,38 @@ enum hpd_pin {
112 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 132 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
113 if ((intel_encoder)->base.crtc == (__crtc)) 133 if ((intel_encoder)->base.crtc == (__crtc))
114 134
115struct intel_pch_pll { 135struct drm_i915_private;
136
137enum intel_dpll_id {
138 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
139 /* real shared dpll ids must be >= 0 */
140 DPLL_ID_PCH_PLL_A,
141 DPLL_ID_PCH_PLL_B,
142};
143#define I915_NUM_PLLS 2
144
145struct intel_dpll_hw_state {
146 uint32_t dpll;
147 uint32_t fp0;
148 uint32_t fp1;
149};
150
151struct intel_shared_dpll {
116 int refcount; /* count of number of CRTCs sharing this PLL */ 152 int refcount; /* count of number of CRTCs sharing this PLL */
117 int active; /* count of number of active CRTCs (i.e. DPMS on) */ 153 int active; /* count of number of active CRTCs (i.e. DPMS on) */
118 bool on; /* is the PLL actually active? Disabled during modeset */ 154 bool on; /* is the PLL actually active? Disabled during modeset */
119 int pll_reg; 155 const char *name;
120 int fp0_reg; 156 /* should match the index in the dev_priv->shared_dplls array */
121 int fp1_reg; 157 enum intel_dpll_id id;
158 struct intel_dpll_hw_state hw_state;
159 void (*enable)(struct drm_i915_private *dev_priv,
160 struct intel_shared_dpll *pll);
161 void (*disable)(struct drm_i915_private *dev_priv,
162 struct intel_shared_dpll *pll);
163 bool (*get_hw_state)(struct drm_i915_private *dev_priv,
164 struct intel_shared_dpll *pll,
165 struct intel_dpll_hw_state *hw_state);
122}; 166};
123#define I915_NUM_PLLS 2
124 167
125/* Used by dp and fdi links */ 168/* Used by dp and fdi links */
126struct intel_link_m_n { 169struct intel_link_m_n {
@@ -175,7 +218,6 @@ struct opregion_header;
175struct opregion_acpi; 218struct opregion_acpi;
176struct opregion_swsci; 219struct opregion_swsci;
177struct opregion_asle; 220struct opregion_asle;
178struct drm_i915_private;
179 221
180struct intel_opregion { 222struct intel_opregion {
181 struct opregion_header __iomem *header; 223 struct opregion_header __iomem *header;
@@ -286,6 +328,8 @@ struct drm_i915_error_state {
286 328
287struct intel_crtc_config; 329struct intel_crtc_config;
288struct intel_crtc; 330struct intel_crtc;
331struct intel_limit;
332struct dpll;
289 333
290struct drm_i915_display_funcs { 334struct drm_i915_display_funcs {
291 bool (*fbc_enabled)(struct drm_device *dev); 335 bool (*fbc_enabled)(struct drm_device *dev);
@@ -293,11 +337,28 @@ struct drm_i915_display_funcs {
293 void (*disable_fbc)(struct drm_device *dev); 337 void (*disable_fbc)(struct drm_device *dev);
294 int (*get_display_clock_speed)(struct drm_device *dev); 338 int (*get_display_clock_speed)(struct drm_device *dev);
295 int (*get_fifo_size)(struct drm_device *dev, int plane); 339 int (*get_fifo_size)(struct drm_device *dev, int plane);
340 /**
341 * find_dpll() - Find the best values for the PLL
342 * @limit: limits for the PLL
343 * @crtc: current CRTC
344 * @target: target frequency in kHz
345 * @refclk: reference clock frequency in kHz
346 * @match_clock: if provided, @best_clock P divider must
347 * match the P divider from @match_clock
348 * used for LVDS downclocking
349 * @best_clock: best PLL values found
350 *
351 * Returns true on success, false on failure.
352 */
353 bool (*find_dpll)(const struct intel_limit *limit,
354 struct drm_crtc *crtc,
355 int target, int refclk,
356 struct dpll *match_clock,
357 struct dpll *best_clock);
296 void (*update_wm)(struct drm_device *dev); 358 void (*update_wm)(struct drm_device *dev);
297 void (*update_sprite_wm)(struct drm_device *dev, int pipe, 359 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
298 uint32_t sprite_width, int pixel_size); 360 uint32_t sprite_width, int pixel_size,
299 void (*update_linetime_wm)(struct drm_device *dev, int pipe, 361 bool enable);
300 struct drm_display_mode *mode);
301 void (*modeset_global_resources)(struct drm_device *dev); 362 void (*modeset_global_resources)(struct drm_device *dev);
302 /* Returns the active state of the crtc, and if the crtc is active, 363 /* Returns the active state of the crtc, and if the crtc is active,
303 * fills out the pipe-config with the hw state. */ 364 * fills out the pipe-config with the hw state. */
@@ -331,68 +392,56 @@ struct drm_i915_gt_funcs {
331 void (*force_wake_put)(struct drm_i915_private *dev_priv); 392 void (*force_wake_put)(struct drm_i915_private *dev_priv);
332}; 393};
333 394
334#define DEV_INFO_FLAGS \ 395#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
335 DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \ 396 func(is_mobile) sep \
336 DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \ 397 func(is_i85x) sep \
337 DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \ 398 func(is_i915g) sep \
338 DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \ 399 func(is_i945gm) sep \
339 DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \ 400 func(is_g33) sep \
340 DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \ 401 func(need_gfx_hws) sep \
341 DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \ 402 func(is_g4x) sep \
342 DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \ 403 func(is_pineview) sep \
343 DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \ 404 func(is_broadwater) sep \
344 DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \ 405 func(is_crestline) sep \
345 DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \ 406 func(is_ivybridge) sep \
346 DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \ 407 func(is_valleyview) sep \
347 DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \ 408 func(is_haswell) sep \
348 DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \ 409 func(has_force_wake) sep \
349 DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \ 410 func(has_fbc) sep \
350 DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \ 411 func(has_pipe_cxsr) sep \
351 DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \ 412 func(has_hotplug) sep \
352 DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \ 413 func(cursor_needs_physical) sep \
353 DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \ 414 func(has_overlay) sep \
354 DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \ 415 func(overlay_needs_physical) sep \
355 DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \ 416 func(supports_tv) sep \
356 DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \ 417 func(has_bsd_ring) sep \
357 DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \ 418 func(has_blt_ring) sep \
358 DEV_INFO_FLAG(has_llc) 419 func(has_vebox_ring) sep \
420 func(has_llc) sep \
421 func(has_ddi) sep \
422 func(has_fpga_dbg)
423
424#define DEFINE_FLAG(name) u8 name:1
425#define SEP_SEMICOLON ;
359 426
360struct intel_device_info { 427struct intel_device_info {
361 u32 display_mmio_offset; 428 u32 display_mmio_offset;
362 u8 num_pipes:3; 429 u8 num_pipes:3;
363 u8 gen; 430 u8 gen;
364 u8 is_mobile:1; 431 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
365 u8 is_i85x:1;
366 u8 is_i915g:1;
367 u8 is_i945gm:1;
368 u8 is_g33:1;
369 u8 need_gfx_hws:1;
370 u8 is_g4x:1;
371 u8 is_pineview:1;
372 u8 is_broadwater:1;
373 u8 is_crestline:1;
374 u8 is_ivybridge:1;
375 u8 is_valleyview:1;
376 u8 has_force_wake:1;
377 u8 is_haswell:1;
378 u8 has_fbc:1;
379 u8 has_pipe_cxsr:1;
380 u8 has_hotplug:1;
381 u8 cursor_needs_physical:1;
382 u8 has_overlay:1;
383 u8 overlay_needs_physical:1;
384 u8 supports_tv:1;
385 u8 has_bsd_ring:1;
386 u8 has_blt_ring:1;
387 u8 has_llc:1;
388}; 432};
389 433
434#undef DEFINE_FLAG
435#undef SEP_SEMICOLON
436
390enum i915_cache_level { 437enum i915_cache_level {
391 I915_CACHE_NONE = 0, 438 I915_CACHE_NONE = 0,
392 I915_CACHE_LLC, 439 I915_CACHE_LLC,
393 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ 440 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
394}; 441};
395 442
443typedef uint32_t gen6_gtt_pte_t;
444
396/* The Graphics Translation Table is the way in which GEN hardware translates a 445/* The Graphics Translation Table is the way in which GEN hardware translates a
397 * Graphics Virtual Address into a Physical Address. In addition to the normal 446 * Graphics Virtual Address into a Physical Address. In addition to the normal
398 * collateral associated with any va->pa translations GEN hardware also has a 447 * collateral associated with any va->pa translations GEN hardware also has a
@@ -428,6 +477,9 @@ struct i915_gtt {
428 struct sg_table *st, 477 struct sg_table *st,
429 unsigned int pg_start, 478 unsigned int pg_start,
430 enum i915_cache_level cache_level); 479 enum i915_cache_level cache_level);
480 gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
481 dma_addr_t addr,
482 enum i915_cache_level level);
431}; 483};
432#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) 484#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
433 485
@@ -449,19 +501,31 @@ struct i915_hw_ppgtt {
449 struct sg_table *st, 501 struct sg_table *st,
450 unsigned int pg_start, 502 unsigned int pg_start,
451 enum i915_cache_level cache_level); 503 enum i915_cache_level cache_level);
504 gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
505 dma_addr_t addr,
506 enum i915_cache_level level);
452 int (*enable)(struct drm_device *dev); 507 int (*enable)(struct drm_device *dev);
453 void (*cleanup)(struct i915_hw_ppgtt *ppgtt); 508 void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
454}; 509};
455 510
511struct i915_ctx_hang_stats {
512 /* This context had batch pending when hang was declared */
513 unsigned batch_pending;
514
515 /* This context had batch active when hang was declared */
516 unsigned batch_active;
517};
456 518
457/* This must match up with the value previously used for execbuf2.rsvd1. */ 519/* This must match up with the value previously used for execbuf2.rsvd1. */
458#define DEFAULT_CONTEXT_ID 0 520#define DEFAULT_CONTEXT_ID 0
459struct i915_hw_context { 521struct i915_hw_context {
522 struct kref ref;
460 int id; 523 int id;
461 bool is_initialized; 524 bool is_initialized;
462 struct drm_i915_file_private *file_priv; 525 struct drm_i915_file_private *file_priv;
463 struct intel_ring_buffer *ring; 526 struct intel_ring_buffer *ring;
464 struct drm_i915_gem_object *obj; 527 struct drm_i915_gem_object *obj;
528 struct i915_ctx_hang_stats hang_stats;
465}; 529};
466 530
467enum no_fbc_reason { 531enum no_fbc_reason {
@@ -658,6 +722,7 @@ struct i915_suspend_saved_registers {
658 722
659struct intel_gen6_power_mgmt { 723struct intel_gen6_power_mgmt {
660 struct work_struct work; 724 struct work_struct work;
725 struct delayed_work vlv_work;
661 u32 pm_iir; 726 u32 pm_iir;
662 /* lock - irqsave spinlock that protectects the work_struct and 727 /* lock - irqsave spinlock that protectects the work_struct and
663 * pm_iir. */ 728 * pm_iir. */
@@ -668,6 +733,7 @@ struct intel_gen6_power_mgmt {
668 u8 cur_delay; 733 u8 cur_delay;
669 u8 min_delay; 734 u8 min_delay;
670 u8 max_delay; 735 u8 max_delay;
736 u8 rpe_delay;
671 u8 hw_max; 737 u8 hw_max;
672 738
673 struct delayed_work delayed_resume_work; 739 struct delayed_work delayed_resume_work;
@@ -704,6 +770,15 @@ struct intel_ilk_power_mgmt {
704 struct drm_i915_gem_object *renderctx; 770 struct drm_i915_gem_object *renderctx;
705}; 771};
706 772
773/* Power well structure for haswell */
774struct i915_power_well {
775 struct drm_device *device;
776 spinlock_t lock;
777 /* power well enable/disable usage count */
778 int count;
779 int i915_request;
780};
781
707struct i915_dri1_state { 782struct i915_dri1_state {
708 unsigned allow_batchbuffer : 1; 783 unsigned allow_batchbuffer : 1;
709 u32 __iomem *gfx_hws_cpu_addr; 784 u32 __iomem *gfx_hws_cpu_addr;
@@ -812,14 +887,20 @@ struct i915_gem_mm {
812 u32 object_count; 887 u32 object_count;
813}; 888};
814 889
890struct drm_i915_error_state_buf {
891 unsigned bytes;
892 unsigned size;
893 int err;
894 u8 *buf;
895 loff_t start;
896 loff_t pos;
897};
898
815struct i915_gpu_error { 899struct i915_gpu_error {
816 /* For hangcheck timer */ 900 /* For hangcheck timer */
817#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 901#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
818#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 902#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
819 struct timer_list hangcheck_timer; 903 struct timer_list hangcheck_timer;
820 int hangcheck_count;
821 uint32_t last_acthd[I915_NUM_RINGS];
822 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
823 904
824 /* For reset and error_state handling. */ 905 /* For reset and error_state handling. */
825 spinlock_t lock; 906 spinlock_t lock;
@@ -875,6 +956,37 @@ enum modeset_restore {
875 MODESET_SUSPENDED, 956 MODESET_SUSPENDED,
876}; 957};
877 958
959struct intel_vbt_data {
960 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
961 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
962
963 /* Feature bits */
964 unsigned int int_tv_support:1;
965 unsigned int lvds_dither:1;
966 unsigned int lvds_vbt:1;
967 unsigned int int_crt_support:1;
968 unsigned int lvds_use_ssc:1;
969 unsigned int display_clock_mode:1;
970 unsigned int fdi_rx_polarity_inverted:1;
971 int lvds_ssc_freq;
972 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
973
974 /* eDP */
975 int edp_rate;
976 int edp_lanes;
977 int edp_preemphasis;
978 int edp_vswing;
979 bool edp_initialized;
980 bool edp_support;
981 int edp_bpp;
982 struct edp_power_seq edp_pps;
983
984 int crt_ddc_pin;
985
986 int child_dev_num;
987 struct child_device_config *child_dev;
988};
989
878typedef struct drm_i915_private { 990typedef struct drm_i915_private {
879 struct drm_device *dev; 991 struct drm_device *dev;
880 struct kmem_cache *slab; 992 struct kmem_cache *slab;
@@ -941,9 +1053,9 @@ typedef struct drm_i915_private {
941 HPD_MARK_DISABLED = 2 1053 HPD_MARK_DISABLED = 2
942 } hpd_mark; 1054 } hpd_mark;
943 } hpd_stats[HPD_NUM_PINS]; 1055 } hpd_stats[HPD_NUM_PINS];
1056 u32 hpd_event_bits;
944 struct timer_list hotplug_reenable_timer; 1057 struct timer_list hotplug_reenable_timer;
945 1058
946 int num_pch_pll;
947 int num_plane; 1059 int num_plane;
948 1060
949 unsigned long cfb_size; 1061 unsigned long cfb_size;
@@ -953,6 +1065,7 @@ typedef struct drm_i915_private {
953 struct intel_fbc_work *fbc_work; 1065 struct intel_fbc_work *fbc_work;
954 1066
955 struct intel_opregion opregion; 1067 struct intel_opregion opregion;
1068 struct intel_vbt_data vbt;
956 1069
957 /* overlay */ 1070 /* overlay */
958 struct intel_overlay *overlay; 1071 struct intel_overlay *overlay;
@@ -962,37 +1075,15 @@ typedef struct drm_i915_private {
962 struct { 1075 struct {
963 int level; 1076 int level;
964 bool enabled; 1077 bool enabled;
1078 spinlock_t lock; /* bl registers and the above bl fields */
965 struct backlight_device *device; 1079 struct backlight_device *device;
966 } backlight; 1080 } backlight;
967 1081
968 /* LVDS info */ 1082 /* LVDS info */
969 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1083 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
970 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1084 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
971
972 /* Feature bits from the VBIOS */
973 unsigned int int_tv_support:1;
974 unsigned int lvds_dither:1;
975 unsigned int lvds_vbt:1;
976 unsigned int int_crt_support:1;
977 unsigned int lvds_use_ssc:1;
978 unsigned int display_clock_mode:1;
979 unsigned int fdi_rx_polarity_inverted:1;
980 int lvds_ssc_freq;
981 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
982 struct {
983 int rate;
984 int lanes;
985 int preemphasis;
986 int vswing;
987
988 bool initialized;
989 bool support;
990 int bpp;
991 struct edp_power_seq pps;
992 } edp;
993 bool no_aux_handshake; 1085 bool no_aux_handshake;
994 1086
995 int crt_ddc_pin;
996 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1087 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
997 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 1088 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
998 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1089 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -1020,16 +1111,13 @@ typedef struct drm_i915_private {
1020 /* Kernel Modesetting */ 1111 /* Kernel Modesetting */
1021 1112
1022 struct sdvo_device_mapping sdvo_mappings[2]; 1113 struct sdvo_device_mapping sdvo_mappings[2];
1023 /* indicate whether the LVDS_BORDER should be enabled or not */
1024 unsigned int lvds_border_bits;
1025 /* Panel fitter placement and size for Ironlake+ */
1026 u32 pch_pf_pos, pch_pf_size;
1027 1114
1028 struct drm_crtc *plane_to_crtc_mapping[3]; 1115 struct drm_crtc *plane_to_crtc_mapping[3];
1029 struct drm_crtc *pipe_to_crtc_mapping[3]; 1116 struct drm_crtc *pipe_to_crtc_mapping[3];
1030 wait_queue_head_t pending_flip_queue; 1117 wait_queue_head_t pending_flip_queue;
1031 1118
1032 struct intel_pch_pll pch_plls[I915_NUM_PLLS]; 1119 int num_shared_dpll;
1120 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1033 struct intel_ddi_plls ddi_plls; 1121 struct intel_ddi_plls ddi_plls;
1034 1122
1035 /* Reclocking support */ 1123 /* Reclocking support */
@@ -1038,8 +1126,6 @@ typedef struct drm_i915_private {
1038 /* indicates the reduced downclock for LVDS*/ 1126 /* indicates the reduced downclock for LVDS*/
1039 int lvds_downclock; 1127 int lvds_downclock;
1040 u16 orig_clock; 1128 u16 orig_clock;
1041 int child_dev_num;
1042 struct child_device_config *child_dev;
1043 1129
1044 bool mchbar_need_disable; 1130 bool mchbar_need_disable;
1045 1131
@@ -1052,6 +1138,9 @@ typedef struct drm_i915_private {
1052 * mchdev_lock in intel_pm.c */ 1138 * mchdev_lock in intel_pm.c */
1053 struct intel_ilk_power_mgmt ips; 1139 struct intel_ilk_power_mgmt ips;
1054 1140
1141 /* Haswell power well */
1142 struct i915_power_well power_well;
1143
1055 enum no_fbc_reason no_fbc_reason; 1144 enum no_fbc_reason no_fbc_reason;
1056 1145
1057 struct drm_mm_node *compressed_fb; 1146 struct drm_mm_node *compressed_fb;
@@ -1059,6 +1148,8 @@ typedef struct drm_i915_private {
1059 1148
1060 struct i915_gpu_error gpu_error; 1149 struct i915_gpu_error gpu_error;
1061 1150
1151 struct drm_i915_gem_object *vlv_pctx;
1152
1062 /* list of fbdev register on this device */ 1153 /* list of fbdev register on this device */
1063 struct intel_fbdev *fbdev; 1154 struct intel_fbdev *fbdev;
1064 1155
@@ -1124,7 +1215,7 @@ struct drm_i915_gem_object {
1124 struct drm_mm_node *gtt_space; 1215 struct drm_mm_node *gtt_space;
1125 /** Stolen memory for this object, instead of being backed by shmem. */ 1216 /** Stolen memory for this object, instead of being backed by shmem. */
1126 struct drm_mm_node *stolen; 1217 struct drm_mm_node *stolen;
1127 struct list_head gtt_list; 1218 struct list_head global_list;
1128 1219
1129 /** This object's place on the active/inactive lists */ 1220 /** This object's place on the active/inactive lists */
1130 struct list_head ring_list; 1221 struct list_head ring_list;
@@ -1271,9 +1362,18 @@ struct drm_i915_gem_request {
1271 /** GEM sequence number associated with this request. */ 1362 /** GEM sequence number associated with this request. */
1272 uint32_t seqno; 1363 uint32_t seqno;
1273 1364
1274 /** Postion in the ringbuffer of the end of the request */ 1365 /** Position in the ringbuffer of the start of the request */
1366 u32 head;
1367
1368 /** Position in the ringbuffer of the end of the request */
1275 u32 tail; 1369 u32 tail;
1276 1370
1371 /** Context related to this request */
1372 struct i915_hw_context *ctx;
1373
1374 /** Batch buffer related to this request if any */
1375 struct drm_i915_gem_object *batch_obj;
1376
1277 /** Time at which this request was emitted, in jiffies. */ 1377 /** Time at which this request was emitted, in jiffies. */
1278 unsigned long emitted_jiffies; 1378 unsigned long emitted_jiffies;
1279 1379
@@ -1291,6 +1391,8 @@ struct drm_i915_file_private {
1291 struct list_head request_list; 1391 struct list_head request_list;
1292 } mm; 1392 } mm;
1293 struct idr context_idr; 1393 struct idr context_idr;
1394
1395 struct i915_ctx_hang_stats hang_stats;
1294}; 1396};
1295 1397
1296#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 1398#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
@@ -1341,6 +1443,7 @@ struct drm_i915_file_private {
1341 1443
1342#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) 1444#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1343#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) 1445#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1446#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
1344#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 1447#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1345#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1448#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1346 1449
@@ -1371,10 +1474,13 @@ struct drm_i915_file_private {
1371#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1474#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1372#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1475#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1373 1476
1477#define HAS_IPS(dev) (IS_ULT(dev))
1478
1374#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1479#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1375 1480
1376#define HAS_DDI(dev) (IS_HASWELL(dev)) 1481#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1377#define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) 1482#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
1483#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1378 1484
1379#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1485#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1380#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1486#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -1435,6 +1541,7 @@ extern bool i915_enable_hangcheck __read_mostly;
1435extern int i915_enable_ppgtt __read_mostly; 1541extern int i915_enable_ppgtt __read_mostly;
1436extern unsigned int i915_preliminary_hw_support __read_mostly; 1542extern unsigned int i915_preliminary_hw_support __read_mostly;
1437extern int i915_disable_power_well __read_mostly; 1543extern int i915_disable_power_well __read_mostly;
1544extern int i915_enable_ips __read_mostly;
1438 1545
1439extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1546extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1440extern int i915_resume(struct drm_device *dev); 1547extern int i915_resume(struct drm_device *dev);
@@ -1486,8 +1593,6 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1486void 1593void
1487i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1594i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1488 1595
1489void intel_enable_asle(struct drm_device *dev);
1490
1491#ifdef CONFIG_DEBUG_FS 1596#ifdef CONFIG_DEBUG_FS
1492extern void i915_destroy_error_state(struct drm_device *dev); 1597extern void i915_destroy_error_state(struct drm_device *dev);
1493#else 1598#else
@@ -1626,6 +1731,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1626{ 1731{
1627 if (obj->fence_reg != I915_FENCE_REG_NONE) { 1732 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1628 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1733 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1734 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
1629 dev_priv->fence_regs[obj->fence_reg].pin_count--; 1735 dev_priv->fence_regs[obj->fence_reg].pin_count--;
1630 } 1736 }
1631} 1737}
@@ -1658,9 +1764,12 @@ void i915_gem_init_swizzling(struct drm_device *dev);
1658void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1764void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1659int __must_check i915_gpu_idle(struct drm_device *dev); 1765int __must_check i915_gpu_idle(struct drm_device *dev);
1660int __must_check i915_gem_idle(struct drm_device *dev); 1766int __must_check i915_gem_idle(struct drm_device *dev);
1661int i915_add_request(struct intel_ring_buffer *ring, 1767int __i915_add_request(struct intel_ring_buffer *ring,
1662 struct drm_file *file, 1768 struct drm_file *file,
1663 u32 *seqno); 1769 struct drm_i915_gem_object *batch_obj,
1770 u32 *seqno);
1771#define i915_add_request(ring, seqno) \
1772 __i915_add_request(ring, NULL, NULL, seqno)
1664int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, 1773int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1665 uint32_t seqno); 1774 uint32_t seqno);
1666int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1775int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -1703,6 +1812,21 @@ void i915_gem_context_fini(struct drm_device *dev);
1703void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 1812void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
1704int i915_switch_context(struct intel_ring_buffer *ring, 1813int i915_switch_context(struct intel_ring_buffer *ring,
1705 struct drm_file *file, int to_id); 1814 struct drm_file *file, int to_id);
1815void i915_gem_context_free(struct kref *ctx_ref);
1816static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
1817{
1818 kref_get(&ctx->ref);
1819}
1820
1821static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
1822{
1823 kref_put(&ctx->ref, i915_gem_context_free);
1824}
1825
1826struct i915_ctx_hang_stats * __must_check
1827i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
1828 struct drm_file *file,
1829 u32 id);
1706int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 1830int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1707 struct drm_file *file); 1831 struct drm_file *file);
1708int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 1832int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
@@ -1784,6 +1908,8 @@ void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1784/* i915_debugfs.c */ 1908/* i915_debugfs.c */
1785int i915_debugfs_init(struct drm_minor *minor); 1909int i915_debugfs_init(struct drm_minor *minor);
1786void i915_debugfs_cleanup(struct drm_minor *minor); 1910void i915_debugfs_cleanup(struct drm_minor *minor);
1911__printf(2, 3)
1912void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
1787 1913
1788/* i915_suspend.c */ 1914/* i915_suspend.c */
1789extern int i915_save_state(struct drm_device *dev); 1915extern int i915_save_state(struct drm_device *dev);
@@ -1800,7 +1926,7 @@ void i915_teardown_sysfs(struct drm_device *dev_priv);
1800/* intel_i2c.c */ 1926/* intel_i2c.c */
1801extern int intel_setup_gmbus(struct drm_device *dev); 1927extern int intel_setup_gmbus(struct drm_device *dev);
1802extern void intel_teardown_gmbus(struct drm_device *dev); 1928extern void intel_teardown_gmbus(struct drm_device *dev);
1803extern inline bool intel_gmbus_is_port_valid(unsigned port) 1929static inline bool intel_gmbus_is_port_valid(unsigned port)
1804{ 1930{
1805 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); 1931 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
1806} 1932}
@@ -1809,7 +1935,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
1809 struct drm_i915_private *dev_priv, unsigned port); 1935 struct drm_i915_private *dev_priv, unsigned port);
1810extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 1936extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
1811extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 1937extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
1812extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 1938static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
1813{ 1939{
1814 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 1940 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
1815} 1941}
@@ -1821,14 +1947,10 @@ extern int intel_opregion_setup(struct drm_device *dev);
1821extern void intel_opregion_init(struct drm_device *dev); 1947extern void intel_opregion_init(struct drm_device *dev);
1822extern void intel_opregion_fini(struct drm_device *dev); 1948extern void intel_opregion_fini(struct drm_device *dev);
1823extern void intel_opregion_asle_intr(struct drm_device *dev); 1949extern void intel_opregion_asle_intr(struct drm_device *dev);
1824extern void intel_opregion_gse_intr(struct drm_device *dev);
1825extern void intel_opregion_enable_asle(struct drm_device *dev);
1826#else 1950#else
1827static inline void intel_opregion_init(struct drm_device *dev) { return; } 1951static inline void intel_opregion_init(struct drm_device *dev) { return; }
1828static inline void intel_opregion_fini(struct drm_device *dev) { return; } 1952static inline void intel_opregion_fini(struct drm_device *dev) { return; }
1829static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 1953static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
1830static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
1831static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
1832#endif 1954#endif
1833 1955
1834/* intel_acpi.c */ 1956/* intel_acpi.c */
@@ -1842,6 +1964,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
1842 1964
1843/* modesetting */ 1965/* modesetting */
1844extern void intel_modeset_init_hw(struct drm_device *dev); 1966extern void intel_modeset_init_hw(struct drm_device *dev);
1967extern void intel_modeset_suspend_hw(struct drm_device *dev);
1845extern void intel_modeset_init(struct drm_device *dev); 1968extern void intel_modeset_init(struct drm_device *dev);
1846extern void intel_modeset_gem_init(struct drm_device *dev); 1969extern void intel_modeset_gem_init(struct drm_device *dev);
1847extern void intel_modeset_cleanup(struct drm_device *dev); 1970extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1854,6 +1977,9 @@ extern void intel_disable_fbc(struct drm_device *dev);
1854extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1977extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1855extern void intel_init_pch_refclk(struct drm_device *dev); 1978extern void intel_init_pch_refclk(struct drm_device *dev);
1856extern void gen6_set_rps(struct drm_device *dev, u8 val); 1979extern void gen6_set_rps(struct drm_device *dev, u8 val);
1980extern void valleyview_set_rps(struct drm_device *dev, u8 val);
1981extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
1982extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
1857extern void intel_detect_pch(struct drm_device *dev); 1983extern void intel_detect_pch(struct drm_device *dev);
1858extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1984extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1859extern int intel_enable_rc6(const struct drm_device *dev); 1985extern int intel_enable_rc6(const struct drm_device *dev);
@@ -1865,10 +1991,11 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1865/* overlay */ 1991/* overlay */
1866#ifdef CONFIG_DEBUG_FS 1992#ifdef CONFIG_DEBUG_FS
1867extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 1993extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1868extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); 1994extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
1995 struct intel_overlay_error_state *error);
1869 1996
1870extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 1997extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
1871extern void intel_display_print_error_state(struct seq_file *m, 1998extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
1872 struct drm_device *dev, 1999 struct drm_device *dev,
1873 struct intel_display_error_state *error); 2000 struct intel_display_error_state *error);
1874#endif 2001#endif
@@ -1883,8 +2010,20 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1883 2010
1884int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); 2011int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
1885int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); 2012int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
1886int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val); 2013
1887int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val); 2014/* intel_sideband.c */
2015u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2016void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2017u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
2018u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
2019void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
2020u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2021 enum intel_sbi_destination destination);
2022void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2023 enum intel_sbi_destination destination);
2024
2025int vlv_gpu_freq(int ddr_freq, int val);
2026int vlv_freq_opcode(int ddr_freq, int val);
1888 2027
1889#define __i915_read(x, y) \ 2028#define __i915_read(x, y) \
1890 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 2029 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 970ad17c99ab..769f75262feb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -176,7 +176,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
176 176
177 pinned = 0; 177 pinned = 0;
178 mutex_lock(&dev->struct_mutex); 178 mutex_lock(&dev->struct_mutex);
179 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) 179 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
180 if (obj->pin_count) 180 if (obj->pin_count)
181 pinned += obj->gtt_space->size; 181 pinned += obj->gtt_space->size;
182 mutex_unlock(&dev->struct_mutex); 182 mutex_unlock(&dev->struct_mutex);
@@ -956,7 +956,7 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
956 956
957 ret = 0; 957 ret = 0;
958 if (seqno == ring->outstanding_lazy_request) 958 if (seqno == ring->outstanding_lazy_request)
959 ret = i915_add_request(ring, NULL, NULL); 959 ret = i915_add_request(ring, NULL);
960 960
961 return ret; 961 return ret;
962} 962}
@@ -1087,6 +1087,25 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1087 interruptible, NULL); 1087 interruptible, NULL);
1088} 1088}
1089 1089
1090static int
1091i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1092 struct intel_ring_buffer *ring)
1093{
1094 i915_gem_retire_requests_ring(ring);
1095
1096 /* Manually manage the write flush as we may have not yet
1097 * retired the buffer.
1098 *
1099 * Note that the last_write_seqno is always the earlier of
1100 * the two (read/write) seqno, so if we haved successfully waited,
1101 * we know we have passed the last write.
1102 */
1103 obj->last_write_seqno = 0;
1104 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1105
1106 return 0;
1107}
1108
1090/** 1109/**
1091 * Ensures that all rendering to the object has completed and the object is 1110 * Ensures that all rendering to the object has completed and the object is
1092 * safe to unbind from the GTT or access from the CPU. 1111 * safe to unbind from the GTT or access from the CPU.
@@ -1107,18 +1126,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1107 if (ret) 1126 if (ret)
1108 return ret; 1127 return ret;
1109 1128
1110 i915_gem_retire_requests_ring(ring); 1129 return i915_gem_object_wait_rendering__tail(obj, ring);
1111
1112 /* Manually manage the write flush as we may have not yet
1113 * retired the buffer.
1114 */
1115 if (obj->last_write_seqno &&
1116 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1117 obj->last_write_seqno = 0;
1118 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1119 }
1120
1121 return 0;
1122} 1130}
1123 1131
1124/* A nonblocking variant of the above wait. This is a highly dangerous routine 1132/* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1154,19 +1162,10 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1154 mutex_unlock(&dev->struct_mutex); 1162 mutex_unlock(&dev->struct_mutex);
1155 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); 1163 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1156 mutex_lock(&dev->struct_mutex); 1164 mutex_lock(&dev->struct_mutex);
1165 if (ret)
1166 return ret;
1157 1167
1158 i915_gem_retire_requests_ring(ring); 1168 return i915_gem_object_wait_rendering__tail(obj, ring);
1159
1160 /* Manually manage the write flush as we may have not yet
1161 * retired the buffer.
1162 */
1163 if (obj->last_write_seqno &&
1164 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1165 obj->last_write_seqno = 0;
1166 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1167 }
1168
1169 return ret;
1170} 1169}
1171 1170
1172/** 1171/**
@@ -1676,7 +1675,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1676 /* ->put_pages might need to allocate memory for the bit17 swizzle 1675 /* ->put_pages might need to allocate memory for the bit17 swizzle
1677 * array, hence protect them from being reaped by removing them from gtt 1676 * array, hence protect them from being reaped by removing them from gtt
1678 * lists early. */ 1677 * lists early. */
1679 list_del(&obj->gtt_list); 1678 list_del(&obj->global_list);
1680 1679
1681 ops->put_pages(obj); 1680 ops->put_pages(obj);
1682 obj->pages = NULL; 1681 obj->pages = NULL;
@@ -1696,7 +1695,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1696 1695
1697 list_for_each_entry_safe(obj, next, 1696 list_for_each_entry_safe(obj, next,
1698 &dev_priv->mm.unbound_list, 1697 &dev_priv->mm.unbound_list,
1699 gtt_list) { 1698 global_list) {
1700 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && 1699 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1701 i915_gem_object_put_pages(obj) == 0) { 1700 i915_gem_object_put_pages(obj) == 0) {
1702 count += obj->base.size >> PAGE_SHIFT; 1701 count += obj->base.size >> PAGE_SHIFT;
@@ -1733,7 +1732,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1733 1732
1734 i915_gem_evict_everything(dev_priv->dev); 1733 i915_gem_evict_everything(dev_priv->dev);
1735 1734
1736 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) 1735 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1736 global_list)
1737 i915_gem_object_put_pages(obj); 1737 i915_gem_object_put_pages(obj);
1738} 1738}
1739 1739
@@ -1801,7 +1801,14 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1801 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; 1801 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1802 gfp &= ~(__GFP_IO | __GFP_WAIT); 1802 gfp &= ~(__GFP_IO | __GFP_WAIT);
1803 } 1803 }
1804 1804#ifdef CONFIG_SWIOTLB
1805 if (swiotlb_nr_tbl()) {
1806 st->nents++;
1807 sg_set_page(sg, page, PAGE_SIZE, 0);
1808 sg = sg_next(sg);
1809 continue;
1810 }
1811#endif
1805 if (!i || page_to_pfn(page) != last_pfn + 1) { 1812 if (!i || page_to_pfn(page) != last_pfn + 1) {
1806 if (i) 1813 if (i)
1807 sg = sg_next(sg); 1814 sg = sg_next(sg);
@@ -1812,8 +1819,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1812 } 1819 }
1813 last_pfn = page_to_pfn(page); 1820 last_pfn = page_to_pfn(page);
1814 } 1821 }
1815 1822#ifdef CONFIG_SWIOTLB
1816 sg_mark_end(sg); 1823 if (!swiotlb_nr_tbl())
1824#endif
1825 sg_mark_end(sg);
1817 obj->pages = st; 1826 obj->pages = st;
1818 1827
1819 if (i915_gem_object_needs_bit17_swizzle(obj)) 1828 if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -1858,7 +1867,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1858 if (ret) 1867 if (ret)
1859 return ret; 1868 return ret;
1860 1869
1861 list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); 1870 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
1862 return 0; 1871 return 0;
1863} 1872}
1864 1873
@@ -1996,17 +2005,18 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1996 return 0; 2005 return 0;
1997} 2006}
1998 2007
1999int 2008int __i915_add_request(struct intel_ring_buffer *ring,
2000i915_add_request(struct intel_ring_buffer *ring, 2009 struct drm_file *file,
2001 struct drm_file *file, 2010 struct drm_i915_gem_object *obj,
2002 u32 *out_seqno) 2011 u32 *out_seqno)
2003{ 2012{
2004 drm_i915_private_t *dev_priv = ring->dev->dev_private; 2013 drm_i915_private_t *dev_priv = ring->dev->dev_private;
2005 struct drm_i915_gem_request *request; 2014 struct drm_i915_gem_request *request;
2006 u32 request_ring_position; 2015 u32 request_ring_position, request_start;
2007 int was_empty; 2016 int was_empty;
2008 int ret; 2017 int ret;
2009 2018
2019 request_start = intel_ring_get_tail(ring);
2010 /* 2020 /*
2011 * Emit any outstanding flushes - execbuf can fail to emit the flush 2021 * Emit any outstanding flushes - execbuf can fail to emit the flush
2012 * after having emitted the batchbuffer command. Hence we need to fix 2022 * after having emitted the batchbuffer command. Hence we need to fix
@@ -2038,7 +2048,21 @@ i915_add_request(struct intel_ring_buffer *ring,
2038 2048
2039 request->seqno = intel_ring_get_seqno(ring); 2049 request->seqno = intel_ring_get_seqno(ring);
2040 request->ring = ring; 2050 request->ring = ring;
2051 request->head = request_start;
2041 request->tail = request_ring_position; 2052 request->tail = request_ring_position;
2053 request->ctx = ring->last_context;
2054 request->batch_obj = obj;
2055
2056 /* Whilst this request exists, batch_obj will be on the
2057 * active_list, and so will hold the active reference. Only when this
2058 * request is retired will the the batch_obj be moved onto the
2059 * inactive_list and lose its active reference. Hence we do not need
2060 * to explicitly hold another reference here.
2061 */
2062
2063 if (request->ctx)
2064 i915_gem_context_reference(request->ctx);
2065
2042 request->emitted_jiffies = jiffies; 2066 request->emitted_jiffies = jiffies;
2043 was_empty = list_empty(&ring->request_list); 2067 was_empty = list_empty(&ring->request_list);
2044 list_add_tail(&request->list, &ring->request_list); 2068 list_add_tail(&request->list, &ring->request_list);
@@ -2091,9 +2115,114 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2091 spin_unlock(&file_priv->mm.lock); 2115 spin_unlock(&file_priv->mm.lock);
2092} 2116}
2093 2117
2118static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
2119{
2120 if (acthd >= obj->gtt_offset &&
2121 acthd < obj->gtt_offset + obj->base.size)
2122 return true;
2123
2124 return false;
2125}
2126
2127static bool i915_head_inside_request(const u32 acthd_unmasked,
2128 const u32 request_start,
2129 const u32 request_end)
2130{
2131 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2132
2133 if (request_start < request_end) {
2134 if (acthd >= request_start && acthd < request_end)
2135 return true;
2136 } else if (request_start > request_end) {
2137 if (acthd >= request_start || acthd < request_end)
2138 return true;
2139 }
2140
2141 return false;
2142}
2143
2144static bool i915_request_guilty(struct drm_i915_gem_request *request,
2145 const u32 acthd, bool *inside)
2146{
2147 /* There is a possibility that unmasked head address
2148 * pointing inside the ring, matches the batch_obj address range.
2149 * However this is extremely unlikely.
2150 */
2151
2152 if (request->batch_obj) {
2153 if (i915_head_inside_object(acthd, request->batch_obj)) {
2154 *inside = true;
2155 return true;
2156 }
2157 }
2158
2159 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2160 *inside = false;
2161 return true;
2162 }
2163
2164 return false;
2165}
2166
2167static void i915_set_reset_status(struct intel_ring_buffer *ring,
2168 struct drm_i915_gem_request *request,
2169 u32 acthd)
2170{
2171 struct i915_ctx_hang_stats *hs = NULL;
2172 bool inside, guilty;
2173
2174 /* Innocent until proven guilty */
2175 guilty = false;
2176
2177 if (ring->hangcheck.action != wait &&
2178 i915_request_guilty(request, acthd, &inside)) {
2179 DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
2180 ring->name,
2181 inside ? "inside" : "flushing",
2182 request->batch_obj ?
2183 request->batch_obj->gtt_offset : 0,
2184 request->ctx ? request->ctx->id : 0,
2185 acthd);
2186
2187 guilty = true;
2188 }
2189
2190 /* If contexts are disabled or this is the default context, use
2191 * file_priv->reset_state
2192 */
2193 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2194 hs = &request->ctx->hang_stats;
2195 else if (request->file_priv)
2196 hs = &request->file_priv->hang_stats;
2197
2198 if (hs) {
2199 if (guilty)
2200 hs->batch_active++;
2201 else
2202 hs->batch_pending++;
2203 }
2204}
2205
2206static void i915_gem_free_request(struct drm_i915_gem_request *request)
2207{
2208 list_del(&request->list);
2209 i915_gem_request_remove_from_client(request);
2210
2211 if (request->ctx)
2212 i915_gem_context_unreference(request->ctx);
2213
2214 kfree(request);
2215}
2216
2094static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, 2217static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2095 struct intel_ring_buffer *ring) 2218 struct intel_ring_buffer *ring)
2096{ 2219{
2220 u32 completed_seqno;
2221 u32 acthd;
2222
2223 acthd = intel_ring_get_active_head(ring);
2224 completed_seqno = ring->get_seqno(ring, false);
2225
2097 while (!list_empty(&ring->request_list)) { 2226 while (!list_empty(&ring->request_list)) {
2098 struct drm_i915_gem_request *request; 2227 struct drm_i915_gem_request *request;
2099 2228
@@ -2101,9 +2230,10 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2101 struct drm_i915_gem_request, 2230 struct drm_i915_gem_request,
2102 list); 2231 list);
2103 2232
2104 list_del(&request->list); 2233 if (request->seqno > completed_seqno)
2105 i915_gem_request_remove_from_client(request); 2234 i915_set_reset_status(ring, request, acthd);
2106 kfree(request); 2235
2236 i915_gem_free_request(request);
2107 } 2237 }
2108 2238
2109 while (!list_empty(&ring->active_list)) { 2239 while (!list_empty(&ring->active_list)) {
@@ -2195,9 +2325,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2195 */ 2325 */
2196 ring->last_retired_head = request->tail; 2326 ring->last_retired_head = request->tail;
2197 2327
2198 list_del(&request->list); 2328 i915_gem_free_request(request);
2199 i915_gem_request_remove_from_client(request);
2200 kfree(request);
2201 } 2329 }
2202 2330
2203 /* Move any buffers on the active list that are no longer referenced 2331 /* Move any buffers on the active list that are no longer referenced
@@ -2264,7 +2392,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
2264 idle = true; 2392 idle = true;
2265 for_each_ring(ring, dev_priv, i) { 2393 for_each_ring(ring, dev_priv, i) {
2266 if (ring->gpu_caches_dirty) 2394 if (ring->gpu_caches_dirty)
2267 i915_add_request(ring, NULL, NULL); 2395 i915_add_request(ring, NULL);
2268 2396
2269 idle &= list_empty(&ring->request_list); 2397 idle &= list_empty(&ring->request_list);
2270 } 2398 }
@@ -2496,9 +2624,10 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2496 obj->has_aliasing_ppgtt_mapping = 0; 2624 obj->has_aliasing_ppgtt_mapping = 0;
2497 } 2625 }
2498 i915_gem_gtt_finish_object(obj); 2626 i915_gem_gtt_finish_object(obj);
2627 i915_gem_object_unpin_pages(obj);
2499 2628
2500 list_del(&obj->mm_list); 2629 list_del(&obj->mm_list);
2501 list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); 2630 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2502 /* Avoid an unnecessary call to unbind on rebind. */ 2631 /* Avoid an unnecessary call to unbind on rebind. */
2503 obj->map_and_fenceable = true; 2632 obj->map_and_fenceable = true;
2504 2633
@@ -2678,18 +2807,33 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
2678 return fence - dev_priv->fence_regs; 2807 return fence - dev_priv->fence_regs;
2679} 2808}
2680 2809
2810struct write_fence {
2811 struct drm_device *dev;
2812 struct drm_i915_gem_object *obj;
2813 int fence;
2814};
2815
2681static void i915_gem_write_fence__ipi(void *data) 2816static void i915_gem_write_fence__ipi(void *data)
2682{ 2817{
2818 struct write_fence *args = data;
2819
2820 /* Required for SNB+ with LLC */
2683 wbinvd(); 2821 wbinvd();
2822
2823 /* Required for VLV */
2824 i915_gem_write_fence(args->dev, args->fence, args->obj);
2684} 2825}
2685 2826
2686static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 2827static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2687 struct drm_i915_fence_reg *fence, 2828 struct drm_i915_fence_reg *fence,
2688 bool enable) 2829 bool enable)
2689{ 2830{
2690 struct drm_device *dev = obj->base.dev; 2831 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2691 struct drm_i915_private *dev_priv = dev->dev_private; 2832 struct write_fence args = {
2692 int fence_reg = fence_number(dev_priv, fence); 2833 .dev = obj->base.dev,
2834 .fence = fence_number(dev_priv, fence),
2835 .obj = enable ? obj : NULL,
2836 };
2693 2837
2694 /* In order to fully serialize access to the fenced region and 2838 /* In order to fully serialize access to the fenced region and
2695 * the update to the fence register we need to take extreme 2839 * the update to the fence register we need to take extreme
@@ -2700,13 +2844,19 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2700 * SNB+ we need to take a step further and emit an explicit wbinvd() 2844 * SNB+ we need to take a step further and emit an explicit wbinvd()
2701 * on each processor in order to manually flush all memory 2845 * on each processor in order to manually flush all memory
2702 * transactions before updating the fence register. 2846 * transactions before updating the fence register.
2847 *
2848 * However, Valleyview complicates matter. There the wbinvd is
2849 * insufficient and unlike SNB/IVB requires the serialising
2850 * register write. (Note that that register write by itself is
2851 * conversely not sufficient for SNB+.) To compromise, we do both.
2703 */ 2852 */
2704 if (HAS_LLC(obj->base.dev)) 2853 if (INTEL_INFO(args.dev)->gen >= 6)
2705 on_each_cpu(i915_gem_write_fence__ipi, NULL, 1); 2854 on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
2706 i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL); 2855 else
2856 i915_gem_write_fence(args.dev, args.fence, args.obj);
2707 2857
2708 if (enable) { 2858 if (enable) {
2709 obj->fence_reg = fence_reg; 2859 obj->fence_reg = args.fence;
2710 fence->obj = obj; 2860 fence->obj = obj;
2711 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); 2861 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2712 } else { 2862 } else {
@@ -2885,7 +3035,7 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
2885 struct drm_i915_gem_object *obj; 3035 struct drm_i915_gem_object *obj;
2886 int err = 0; 3036 int err = 0;
2887 3037
2888 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 3038 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
2889 if (obj->gtt_space == NULL) { 3039 if (obj->gtt_space == NULL) {
2890 printk(KERN_ERR "object found on GTT list with no space reserved\n"); 3040 printk(KERN_ERR "object found on GTT list with no space reserved\n");
2891 err++; 3041 err++;
@@ -2932,6 +3082,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2932 struct drm_mm_node *node; 3082 struct drm_mm_node *node;
2933 u32 size, fence_size, fence_alignment, unfenced_alignment; 3083 u32 size, fence_size, fence_alignment, unfenced_alignment;
2934 bool mappable, fenceable; 3084 bool mappable, fenceable;
3085 size_t gtt_max = map_and_fenceable ?
3086 dev_priv->gtt.mappable_end : dev_priv->gtt.total;
2935 int ret; 3087 int ret;
2936 3088
2937 fence_size = i915_gem_get_gtt_size(dev, 3089 fence_size = i915_gem_get_gtt_size(dev,
@@ -2958,9 +3110,11 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2958 /* If the object is bigger than the entire aperture, reject it early 3110 /* If the object is bigger than the entire aperture, reject it early
2959 * before evicting everything in a vain attempt to find space. 3111 * before evicting everything in a vain attempt to find space.
2960 */ 3112 */
2961 if (obj->base.size > 3113 if (obj->base.size > gtt_max) {
2962 (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) { 3114 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
2963 DRM_ERROR("Attempting to bind an object larger than the aperture\n"); 3115 obj->base.size,
3116 map_and_fenceable ? "mappable" : "total",
3117 gtt_max);
2964 return -E2BIG; 3118 return -E2BIG;
2965 } 3119 }
2966 3120
@@ -2976,14 +3130,10 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2976 return -ENOMEM; 3130 return -ENOMEM;
2977 } 3131 }
2978 3132
2979 search_free: 3133search_free:
2980 if (map_and_fenceable) 3134 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
2981 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, 3135 size, alignment,
2982 size, alignment, obj->cache_level, 3136 obj->cache_level, 0, gtt_max);
2983 0, dev_priv->gtt.mappable_end);
2984 else
2985 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
2986 size, alignment, obj->cache_level);
2987 if (ret) { 3137 if (ret) {
2988 ret = i915_gem_evict_something(dev, size, alignment, 3138 ret = i915_gem_evict_something(dev, size, alignment,
2989 obj->cache_level, 3139 obj->cache_level,
@@ -3009,7 +3159,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3009 return ret; 3159 return ret;
3010 } 3160 }
3011 3161
3012 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); 3162 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3013 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 3163 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3014 3164
3015 obj->gtt_space = node; 3165 obj->gtt_space = node;
@@ -3024,7 +3174,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3024 3174
3025 obj->map_and_fenceable = mappable && fenceable; 3175 obj->map_and_fenceable = mappable && fenceable;
3026 3176
3027 i915_gem_object_unpin_pages(obj);
3028 trace_i915_gem_object_bind(obj, map_and_fenceable); 3177 trace_i915_gem_object_bind(obj, map_and_fenceable);
3029 i915_gem_verify_gtt(dev); 3178 i915_gem_verify_gtt(dev);
3030 return 0; 3179 return 0;
@@ -3724,7 +3873,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
3724 const struct drm_i915_gem_object_ops *ops) 3873 const struct drm_i915_gem_object_ops *ops)
3725{ 3874{
3726 INIT_LIST_HEAD(&obj->mm_list); 3875 INIT_LIST_HEAD(&obj->mm_list);
3727 INIT_LIST_HEAD(&obj->gtt_list); 3876 INIT_LIST_HEAD(&obj->global_list);
3728 INIT_LIST_HEAD(&obj->ring_list); 3877 INIT_LIST_HEAD(&obj->ring_list);
3729 INIT_LIST_HEAD(&obj->exec_list); 3878 INIT_LIST_HEAD(&obj->exec_list);
3730 3879
@@ -3824,7 +3973,13 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3824 dev_priv->mm.interruptible = was_interruptible; 3973 dev_priv->mm.interruptible = was_interruptible;
3825 } 3974 }
3826 3975
3827 obj->pages_pin_count = 0; 3976 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
3977 * before progressing. */
3978 if (obj->stolen)
3979 i915_gem_object_unpin_pages(obj);
3980
3981 if (WARN_ON(obj->pages_pin_count))
3982 obj->pages_pin_count = 0;
3828 i915_gem_object_put_pages(obj); 3983 i915_gem_object_put_pages(obj);
3829 i915_gem_object_free_mmap_offset(obj); 3984 i915_gem_object_free_mmap_offset(obj);
3830 i915_gem_object_release_stolen(obj); 3985 i915_gem_object_release_stolen(obj);
@@ -3977,12 +4132,21 @@ static int i915_gem_init_rings(struct drm_device *dev)
3977 goto cleanup_bsd_ring; 4132 goto cleanup_bsd_ring;
3978 } 4133 }
3979 4134
4135 if (HAS_VEBOX(dev)) {
4136 ret = intel_init_vebox_ring_buffer(dev);
4137 if (ret)
4138 goto cleanup_blt_ring;
4139 }
4140
4141
3980 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); 4142 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
3981 if (ret) 4143 if (ret)
3982 goto cleanup_blt_ring; 4144 goto cleanup_vebox_ring;
3983 4145
3984 return 0; 4146 return 0;
3985 4147
4148cleanup_vebox_ring:
4149 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
3986cleanup_blt_ring: 4150cleanup_blt_ring:
3987 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]); 4151 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
3988cleanup_bsd_ring: 4152cleanup_bsd_ring:
@@ -4456,10 +4620,10 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4456 } 4620 }
4457 4621
4458 cnt = 0; 4622 cnt = 0;
4459 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) 4623 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4460 if (obj->pages_pin_count == 0) 4624 if (obj->pages_pin_count == 0)
4461 cnt += obj->base.size >> PAGE_SHIFT; 4625 cnt += obj->base.size >> PAGE_SHIFT;
4462 list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list) 4626 list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list)
4463 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4627 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4464 cnt += obj->base.size >> PAGE_SHIFT; 4628 cnt += obj->base.size >> PAGE_SHIFT;
4465 4629
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a1e8ecb6adf6..51b7a2171cae 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -113,7 +113,7 @@ static int get_context_size(struct drm_device *dev)
113 case 7: 113 case 7:
114 reg = I915_READ(GEN7_CXT_SIZE); 114 reg = I915_READ(GEN7_CXT_SIZE);
115 if (IS_HASWELL(dev)) 115 if (IS_HASWELL(dev))
116 ret = HSW_CXT_TOTAL_SIZE(reg) * 64; 116 ret = HSW_CXT_TOTAL_SIZE;
117 else 117 else
118 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 118 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
119 break; 119 break;
@@ -124,10 +124,10 @@ static int get_context_size(struct drm_device *dev)
124 return ret; 124 return ret;
125} 125}
126 126
127static void do_destroy(struct i915_hw_context *ctx) 127void i915_gem_context_free(struct kref *ctx_ref)
128{ 128{
129 if (ctx->file_priv) 129 struct i915_hw_context *ctx = container_of(ctx_ref,
130 idr_remove(&ctx->file_priv->context_idr, ctx->id); 130 typeof(*ctx), ref);
131 131
132 drm_gem_object_unreference(&ctx->obj->base); 132 drm_gem_object_unreference(&ctx->obj->base);
133 kfree(ctx); 133 kfree(ctx);
@@ -145,6 +145,7 @@ create_hw_context(struct drm_device *dev,
145 if (ctx == NULL) 145 if (ctx == NULL)
146 return ERR_PTR(-ENOMEM); 146 return ERR_PTR(-ENOMEM);
147 147
148 kref_init(&ctx->ref);
148 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); 149 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
149 if (ctx->obj == NULL) { 150 if (ctx->obj == NULL) {
150 kfree(ctx); 151 kfree(ctx);
@@ -155,7 +156,8 @@ create_hw_context(struct drm_device *dev,
155 if (INTEL_INFO(dev)->gen >= 7) { 156 if (INTEL_INFO(dev)->gen >= 7) {
156 ret = i915_gem_object_set_cache_level(ctx->obj, 157 ret = i915_gem_object_set_cache_level(ctx->obj,
157 I915_CACHE_LLC_MLC); 158 I915_CACHE_LLC_MLC);
158 if (ret) 159 /* Failure shouldn't ever happen this early */
160 if (WARN_ON(ret))
159 goto err_out; 161 goto err_out;
160 } 162 }
161 163
@@ -169,18 +171,18 @@ create_hw_context(struct drm_device *dev,
169 if (file_priv == NULL) 171 if (file_priv == NULL)
170 return ctx; 172 return ctx;
171 173
172 ctx->file_priv = file_priv;
173
174 ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0, 174 ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
175 GFP_KERNEL); 175 GFP_KERNEL);
176 if (ret < 0) 176 if (ret < 0)
177 goto err_out; 177 goto err_out;
178
179 ctx->file_priv = file_priv;
178 ctx->id = ret; 180 ctx->id = ret;
179 181
180 return ctx; 182 return ctx;
181 183
182err_out: 184err_out:
183 do_destroy(ctx); 185 i915_gem_context_unreference(ctx);
184 return ERR_PTR(ret); 186 return ERR_PTR(ret);
185} 187}
186 188
@@ -213,12 +215,16 @@ static int create_default_context(struct drm_i915_private *dev_priv)
213 */ 215 */
214 dev_priv->ring[RCS].default_context = ctx; 216 dev_priv->ring[RCS].default_context = ctx;
215 ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false); 217 ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
216 if (ret) 218 if (ret) {
219 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
217 goto err_destroy; 220 goto err_destroy;
221 }
218 222
219 ret = do_switch(ctx); 223 ret = do_switch(ctx);
220 if (ret) 224 if (ret) {
225 DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
221 goto err_unpin; 226 goto err_unpin;
227 }
222 228
223 DRM_DEBUG_DRIVER("Default HW context loaded\n"); 229 DRM_DEBUG_DRIVER("Default HW context loaded\n");
224 return 0; 230 return 0;
@@ -226,7 +232,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
226err_unpin: 232err_unpin:
227 i915_gem_object_unpin(ctx->obj); 233 i915_gem_object_unpin(ctx->obj);
228err_destroy: 234err_destroy:
229 do_destroy(ctx); 235 i915_gem_context_unreference(ctx);
230 return ret; 236 return ret;
231} 237}
232 238
@@ -236,6 +242,7 @@ void i915_gem_context_init(struct drm_device *dev)
236 242
237 if (!HAS_HW_CONTEXTS(dev)) { 243 if (!HAS_HW_CONTEXTS(dev)) {
238 dev_priv->hw_contexts_disabled = true; 244 dev_priv->hw_contexts_disabled = true;
245 DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
239 return; 246 return;
240 } 247 }
241 248
@@ -248,11 +255,13 @@ void i915_gem_context_init(struct drm_device *dev)
248 255
249 if (dev_priv->hw_context_size > (1<<20)) { 256 if (dev_priv->hw_context_size > (1<<20)) {
250 dev_priv->hw_contexts_disabled = true; 257 dev_priv->hw_contexts_disabled = true;
258 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
251 return; 259 return;
252 } 260 }
253 261
254 if (create_default_context(dev_priv)) { 262 if (create_default_context(dev_priv)) {
255 dev_priv->hw_contexts_disabled = true; 263 dev_priv->hw_contexts_disabled = true;
264 DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
256 return; 265 return;
257 } 266 }
258 267
@@ -262,6 +271,7 @@ void i915_gem_context_init(struct drm_device *dev)
262void i915_gem_context_fini(struct drm_device *dev) 271void i915_gem_context_fini(struct drm_device *dev)
263{ 272{
264 struct drm_i915_private *dev_priv = dev->dev_private; 273 struct drm_i915_private *dev_priv = dev->dev_private;
274 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
265 275
266 if (dev_priv->hw_contexts_disabled) 276 if (dev_priv->hw_contexts_disabled)
267 return; 277 return;
@@ -271,9 +281,16 @@ void i915_gem_context_fini(struct drm_device *dev)
271 * other code, leading to spurious errors. */ 281 * other code, leading to spurious errors. */
272 intel_gpu_reset(dev); 282 intel_gpu_reset(dev);
273 283
274 i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj); 284 i915_gem_object_unpin(dctx->obj);
275 285
276 do_destroy(dev_priv->ring[RCS].default_context); 286 /* When default context is created and switched to, base object refcount
287 * will be 2 (+1 from object creation and +1 from do_switch()).
288 * i915_gem_context_fini() will be called after gpu_idle() has switched
289 * to default context. So we need to unreference the base object once
290 * to offset the do_switch part, so that i915_gem_context_unreference()
291 * can then free the base object correctly. */
292 drm_gem_object_unreference(&dctx->obj->base);
293 i915_gem_context_unreference(dctx);
277} 294}
278 295
279static int context_idr_cleanup(int id, void *p, void *data) 296static int context_idr_cleanup(int id, void *p, void *data)
@@ -282,11 +299,38 @@ static int context_idr_cleanup(int id, void *p, void *data)
282 299
283 BUG_ON(id == DEFAULT_CONTEXT_ID); 300 BUG_ON(id == DEFAULT_CONTEXT_ID);
284 301
285 do_destroy(ctx); 302 i915_gem_context_unreference(ctx);
286
287 return 0; 303 return 0;
288} 304}
289 305
306struct i915_ctx_hang_stats *
307i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
308 struct drm_file *file,
309 u32 id)
310{
311 struct drm_i915_private *dev_priv = ring->dev->dev_private;
312 struct drm_i915_file_private *file_priv = file->driver_priv;
313 struct i915_hw_context *to;
314
315 if (dev_priv->hw_contexts_disabled)
316 return ERR_PTR(-ENOENT);
317
318 if (ring->id != RCS)
319 return ERR_PTR(-EINVAL);
320
321 if (file == NULL)
322 return ERR_PTR(-EINVAL);
323
324 if (id == DEFAULT_CONTEXT_ID)
325 return &file_priv->hang_stats;
326
327 to = i915_gem_context_get(file->driver_priv, id);
328 if (to == NULL)
329 return ERR_PTR(-ENOENT);
330
331 return &to->hang_stats;
332}
333
290void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) 334void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
291{ 335{
292 struct drm_i915_file_private *file_priv = file->driver_priv; 336 struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -325,6 +369,7 @@ mi_set_context(struct intel_ring_buffer *ring,
325 if (ret) 369 if (ret)
326 return ret; 370 return ret;
327 371
372 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
328 if (IS_GEN7(ring->dev)) 373 if (IS_GEN7(ring->dev))
329 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); 374 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
330 else 375 else
@@ -353,13 +398,13 @@ mi_set_context(struct intel_ring_buffer *ring,
353static int do_switch(struct i915_hw_context *to) 398static int do_switch(struct i915_hw_context *to)
354{ 399{
355 struct intel_ring_buffer *ring = to->ring; 400 struct intel_ring_buffer *ring = to->ring;
356 struct drm_i915_gem_object *from_obj = ring->last_context_obj; 401 struct i915_hw_context *from = ring->last_context;
357 u32 hw_flags = 0; 402 u32 hw_flags = 0;
358 int ret; 403 int ret;
359 404
360 BUG_ON(from_obj != NULL && from_obj->pin_count == 0); 405 BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
361 406
362 if (from_obj == to->obj) 407 if (from == to)
363 return 0; 408 return 0;
364 409
365 ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false); 410 ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
@@ -382,7 +427,7 @@ static int do_switch(struct i915_hw_context *to)
382 427
383 if (!to->is_initialized || is_default_context(to)) 428 if (!to->is_initialized || is_default_context(to))
384 hw_flags |= MI_RESTORE_INHIBIT; 429 hw_flags |= MI_RESTORE_INHIBIT;
385 else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */ 430 else if (WARN_ON_ONCE(from == to)) /* not yet expected */
386 hw_flags |= MI_FORCE_RESTORE; 431 hw_flags |= MI_FORCE_RESTORE;
387 432
388 ret = mi_set_context(ring, to, hw_flags); 433 ret = mi_set_context(ring, to, hw_flags);
@@ -397,9 +442,9 @@ static int do_switch(struct i915_hw_context *to)
397 * is a bit suboptimal because the retiring can occur simply after the 442 * is a bit suboptimal because the retiring can occur simply after the
398 * MI_SET_CONTEXT instead of when the next seqno has completed. 443 * MI_SET_CONTEXT instead of when the next seqno has completed.
399 */ 444 */
400 if (from_obj != NULL) { 445 if (from != NULL) {
401 from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 446 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
402 i915_gem_object_move_to_active(from_obj, ring); 447 i915_gem_object_move_to_active(from->obj, ring);
403 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 448 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
404 * whole damn pipeline, we don't need to explicitly mark the 449 * whole damn pipeline, we don't need to explicitly mark the
405 * object dirty. The only exception is that the context must be 450 * object dirty. The only exception is that the context must be
@@ -407,15 +452,26 @@ static int do_switch(struct i915_hw_context *to)
407 * able to defer doing this until we know the object would be 452 * able to defer doing this until we know the object would be
408 * swapped, but there is no way to do that yet. 453 * swapped, but there is no way to do that yet.
409 */ 454 */
410 from_obj->dirty = 1; 455 from->obj->dirty = 1;
411 BUG_ON(from_obj->ring != ring); 456 BUG_ON(from->obj->ring != ring);
412 i915_gem_object_unpin(from_obj); 457
458 ret = i915_add_request(ring, NULL);
459 if (ret) {
460 /* Too late, we've already scheduled a context switch.
461 * Try to undo the change so that the hw state is
462 * consistent with out tracking. In case of emergency,
463 * scream.
464 */
465 WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
466 return ret;
467 }
413 468
414 drm_gem_object_unreference(&from_obj->base); 469 i915_gem_object_unpin(from->obj);
470 i915_gem_context_unreference(from);
415 } 471 }
416 472
417 drm_gem_object_reference(&to->obj->base); 473 i915_gem_context_reference(to);
418 ring->last_context_obj = to->obj; 474 ring->last_context = to;
419 to->is_initialized = true; 475 to->is_initialized = true;
420 476
421 return 0; 477 return 0;
@@ -444,6 +500,8 @@ int i915_switch_context(struct intel_ring_buffer *ring,
444 if (dev_priv->hw_contexts_disabled) 500 if (dev_priv->hw_contexts_disabled)
445 return 0; 501 return 0;
446 502
503 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
504
447 if (ring != &dev_priv->ring[RCS]) 505 if (ring != &dev_priv->ring[RCS])
448 return 0; 506 return 0;
449 507
@@ -512,8 +570,8 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
512 return -ENOENT; 570 return -ENOENT;
513 } 571 }
514 572
515 do_destroy(ctx); 573 idr_remove(&ctx->file_priv->context_idr, ctx->id);
516 574 i915_gem_context_unreference(ctx);
517 mutex_unlock(&dev->struct_mutex); 575 mutex_unlock(&dev->struct_mutex);
518 576
519 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); 577 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 117ce3813681..87a3227e5179 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -786,7 +786,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
786 obj->dirty = 1; 786 obj->dirty = 1;
787 obj->last_write_seqno = intel_ring_get_seqno(ring); 787 obj->last_write_seqno = intel_ring_get_seqno(ring);
788 if (obj->pin_count) /* check for potential scanout */ 788 if (obj->pin_count) /* check for potential scanout */
789 intel_mark_fb_busy(obj); 789 intel_mark_fb_busy(obj, ring);
790 } 790 }
791 791
792 trace_i915_gem_object_change_domain(obj, old_read, old_write); 792 trace_i915_gem_object_change_domain(obj, old_read, old_write);
@@ -796,13 +796,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
796static void 796static void
797i915_gem_execbuffer_retire_commands(struct drm_device *dev, 797i915_gem_execbuffer_retire_commands(struct drm_device *dev,
798 struct drm_file *file, 798 struct drm_file *file,
799 struct intel_ring_buffer *ring) 799 struct intel_ring_buffer *ring,
800 struct drm_i915_gem_object *obj)
800{ 801{
801 /* Unconditionally force add_request to emit a full flush. */ 802 /* Unconditionally force add_request to emit a full flush. */
802 ring->gpu_caches_dirty = true; 803 ring->gpu_caches_dirty = true;
803 804
804 /* Add a breadcrumb for the completion of the batch buffer */ 805 /* Add a breadcrumb for the completion of the batch buffer */
805 (void)i915_add_request(ring, file, NULL); 806 (void)__i915_add_request(ring, file, obj, NULL);
806} 807}
807 808
808static int 809static int
@@ -885,6 +886,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
885 return -EPERM; 886 return -EPERM;
886 } 887 }
887 break; 888 break;
889 case I915_EXEC_VEBOX:
890 ring = &dev_priv->ring[VECS];
891 if (ctx_id != 0) {
892 DRM_DEBUG("Ring %s doesn't support contexts\n",
893 ring->name);
894 return -EPERM;
895 }
896 break;
897
888 default: 898 default:
889 DRM_DEBUG("execbuf with unknown ring: %d\n", 899 DRM_DEBUG("execbuf with unknown ring: %d\n",
890 (int)(args->flags & I915_EXEC_RING_MASK)); 900 (int)(args->flags & I915_EXEC_RING_MASK));
@@ -1074,7 +1084,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1074 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); 1084 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1075 1085
1076 i915_gem_execbuffer_move_to_active(&eb->objects, ring); 1086 i915_gem_execbuffer_move_to_active(&eb->objects, ring);
1077 i915_gem_execbuffer_retire_commands(dev, file, ring); 1087 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1078 1088
1079err: 1089err:
1080 eb_destroy(eb); 1090 eb_destroy(eb);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index bdb0d7717bc7..5101ab6869b4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,8 +28,6 @@
28#include "i915_trace.h" 28#include "i915_trace.h"
29#include "intel_drv.h" 29#include "intel_drv.h"
30 30
31typedef uint32_t gen6_gtt_pte_t;
32
33/* PPGTT stuff */ 31/* PPGTT stuff */
34#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) 32#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
35 33
@@ -44,29 +42,22 @@ typedef uint32_t gen6_gtt_pte_t;
44#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) 42#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
45#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 43#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
46 44
47static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev, 45static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
48 dma_addr_t addr, 46 dma_addr_t addr,
49 enum i915_cache_level level) 47 enum i915_cache_level level)
50{ 48{
51 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 49 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
52 pte |= GEN6_PTE_ADDR_ENCODE(addr); 50 pte |= GEN6_PTE_ADDR_ENCODE(addr);
53 51
54 switch (level) { 52 switch (level) {
55 case I915_CACHE_LLC_MLC: 53 case I915_CACHE_LLC_MLC:
56 /* Haswell doesn't set L3 this way */ 54 pte |= GEN6_PTE_CACHE_LLC_MLC;
57 if (IS_HASWELL(dev))
58 pte |= GEN6_PTE_CACHE_LLC;
59 else
60 pte |= GEN6_PTE_CACHE_LLC_MLC;
61 break; 55 break;
62 case I915_CACHE_LLC: 56 case I915_CACHE_LLC:
63 pte |= GEN6_PTE_CACHE_LLC; 57 pte |= GEN6_PTE_CACHE_LLC;
64 break; 58 break;
65 case I915_CACHE_NONE: 59 case I915_CACHE_NONE:
66 if (IS_HASWELL(dev)) 60 pte |= GEN6_PTE_UNCACHED;
67 pte |= HSW_PTE_UNCACHED;
68 else
69 pte |= GEN6_PTE_UNCACHED;
70 break; 61 break;
71 default: 62 default:
72 BUG(); 63 BUG();
@@ -75,16 +66,48 @@ static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
75 return pte; 66 return pte;
76} 67}
77 68
78static int gen6_ppgtt_enable(struct drm_device *dev) 69#define BYT_PTE_WRITEABLE (1 << 1)
70#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
71
72static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
73 dma_addr_t addr,
74 enum i915_cache_level level)
79{ 75{
80 drm_i915_private_t *dev_priv = dev->dev_private; 76 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
81 uint32_t pd_offset; 77 pte |= GEN6_PTE_ADDR_ENCODE(addr);
82 struct intel_ring_buffer *ring; 78
83 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 79 /* Mark the page as writeable. Other platforms don't have a
80 * setting for read-only/writable, so this matches that behavior.
81 */
82 pte |= BYT_PTE_WRITEABLE;
83
84 if (level != I915_CACHE_NONE)
85 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
86
87 return pte;
88}
89
90static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev,
91 dma_addr_t addr,
92 enum i915_cache_level level)
93{
94 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
95 pte |= GEN6_PTE_ADDR_ENCODE(addr);
96
97 if (level != I915_CACHE_NONE)
98 pte |= GEN6_PTE_CACHE_LLC;
99
100 return pte;
101}
102
103static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
104{
105 struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
84 gen6_gtt_pte_t __iomem *pd_addr; 106 gen6_gtt_pte_t __iomem *pd_addr;
85 uint32_t pd_entry; 107 uint32_t pd_entry;
86 int i; 108 int i;
87 109
110 WARN_ON(ppgtt->pd_offset & 0x3f);
88 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + 111 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
89 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); 112 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
90 for (i = 0; i < ppgtt->num_pd_entries; i++) { 113 for (i = 0; i < ppgtt->num_pd_entries; i++) {
@@ -97,6 +120,19 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
97 writel(pd_entry, pd_addr + i); 120 writel(pd_entry, pd_addr + i);
98 } 121 }
99 readl(pd_addr); 122 readl(pd_addr);
123}
124
125static int gen6_ppgtt_enable(struct drm_device *dev)
126{
127 drm_i915_private_t *dev_priv = dev->dev_private;
128 uint32_t pd_offset;
129 struct intel_ring_buffer *ring;
130 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
131 int i;
132
133 BUG_ON(ppgtt->pd_offset & 0x3f);
134
135 gen6_write_pdes(ppgtt);
100 136
101 pd_offset = ppgtt->pd_offset; 137 pd_offset = ppgtt->pd_offset;
102 pd_offset /= 64; /* in cachelines, */ 138 pd_offset /= 64; /* in cachelines, */
@@ -154,9 +190,9 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
154 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 190 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
155 unsigned last_pte, i; 191 unsigned last_pte, i;
156 192
157 scratch_pte = gen6_pte_encode(ppgtt->dev, 193 scratch_pte = ppgtt->pte_encode(ppgtt->dev,
158 ppgtt->scratch_page_dma_addr, 194 ppgtt->scratch_page_dma_addr,
159 I915_CACHE_LLC); 195 I915_CACHE_LLC);
160 196
161 while (num_entries) { 197 while (num_entries) {
162 last_pte = first_pte + num_entries; 198 last_pte = first_pte + num_entries;
@@ -191,8 +227,8 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
191 dma_addr_t page_addr; 227 dma_addr_t page_addr;
192 228
193 page_addr = sg_page_iter_dma_address(&sg_iter); 229 page_addr = sg_page_iter_dma_address(&sg_iter);
194 pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr, 230 pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr,
195 cache_level); 231 cache_level);
196 if (++act_pte == I915_PPGTT_PT_ENTRIES) { 232 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
197 kunmap_atomic(pt_vaddr); 233 kunmap_atomic(pt_vaddr);
198 act_pt++; 234 act_pt++;
@@ -233,8 +269,15 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
233 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 269 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
234 * entries. For aliasing ppgtt support we just steal them at the end for 270 * entries. For aliasing ppgtt support we just steal them at the end for
235 * now. */ 271 * now. */
236 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); 272 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
237 273
274 if (IS_HASWELL(dev)) {
275 ppgtt->pte_encode = hsw_pte_encode;
276 } else if (IS_VALLEYVIEW(dev)) {
277 ppgtt->pte_encode = byt_pte_encode;
278 } else {
279 ppgtt->pte_encode = gen6_pte_encode;
280 }
238 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; 281 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
239 ppgtt->enable = gen6_ppgtt_enable; 282 ppgtt->enable = gen6_ppgtt_enable;
240 ppgtt->clear_range = gen6_ppgtt_clear_range; 283 ppgtt->clear_range = gen6_ppgtt_clear_range;
@@ -396,7 +439,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
396 dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE, 439 dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
397 dev_priv->gtt.total / PAGE_SIZE); 440 dev_priv->gtt.total / PAGE_SIZE);
398 441
399 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 442 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
400 i915_gem_clflush_object(obj); 443 i915_gem_clflush_object(obj);
401 i915_gem_gtt_bind_object(obj, obj->cache_level); 444 i915_gem_gtt_bind_object(obj, obj->cache_level);
402 } 445 }
@@ -437,7 +480,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
437 480
438 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 481 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
439 addr = sg_page_iter_dma_address(&sg_iter); 482 addr = sg_page_iter_dma_address(&sg_iter);
440 iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]); 483 iowrite32(dev_priv->gtt.pte_encode(dev, addr, level),
484 &gtt_entries[i]);
441 i++; 485 i++;
442 } 486 }
443 487
@@ -449,7 +493,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
449 */ 493 */
450 if (i != 0) 494 if (i != 0)
451 WARN_ON(readl(&gtt_entries[i-1]) 495 WARN_ON(readl(&gtt_entries[i-1])
452 != gen6_pte_encode(dev, addr, level)); 496 != dev_priv->gtt.pte_encode(dev, addr, level));
453 497
454 /* This next bit makes the above posting read even more important. We 498 /* This next bit makes the above posting read even more important. We
455 * want to flush the TLBs only after we're certain all the PTE updates 499 * want to flush the TLBs only after we're certain all the PTE updates
@@ -474,8 +518,9 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
474 first_entry, num_entries, max_entries)) 518 first_entry, num_entries, max_entries))
475 num_entries = max_entries; 519 num_entries = max_entries;
476 520
477 scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma, 521 scratch_pte = dev_priv->gtt.pte_encode(dev,
478 I915_CACHE_LLC); 522 dev_priv->gtt.scratch_page_dma,
523 I915_CACHE_LLC);
479 for (i = 0; i < num_entries; i++) 524 for (i = 0; i < num_entries; i++)
480 iowrite32(scratch_pte, &gtt_base[i]); 525 iowrite32(scratch_pte, &gtt_base[i]);
481 readl(gtt_base); 526 readl(gtt_base);
@@ -586,7 +631,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
586 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; 631 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
587 632
588 /* Mark any preallocated objects as occupied */ 633 /* Mark any preallocated objects as occupied */
589 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 634 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
590 DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n", 635 DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
591 obj->gtt_offset, obj->base.size); 636 obj->gtt_offset, obj->base.size);
592 637
@@ -809,6 +854,13 @@ int i915_gem_gtt_init(struct drm_device *dev)
809 } else { 854 } else {
810 dev_priv->gtt.gtt_probe = gen6_gmch_probe; 855 dev_priv->gtt.gtt_probe = gen6_gmch_probe;
811 dev_priv->gtt.gtt_remove = gen6_gmch_remove; 856 dev_priv->gtt.gtt_remove = gen6_gmch_remove;
857 if (IS_HASWELL(dev)) {
858 dev_priv->gtt.pte_encode = hsw_pte_encode;
859 } else if (IS_VALLEYVIEW(dev)) {
860 dev_priv->gtt.pte_encode = byt_pte_encode;
861 } else {
862 dev_priv->gtt.pte_encode = gen6_pte_encode;
863 }
812 } 864 }
813 865
814 ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total, 866 ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 130d1db27e28..982d4732cecf 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -62,7 +62,10 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
62 * its value of TOLUD. 62 * its value of TOLUD.
63 */ 63 */
64 base = 0; 64 base = 0;
65 if (INTEL_INFO(dev)->gen >= 6) { 65 if (IS_VALLEYVIEW(dev)) {
66 pci_read_config_dword(dev->pdev, 0x5c, &base);
67 base &= ~((1<<20) - 1);
68 } else if (INTEL_INFO(dev)->gen >= 6) {
66 /* Read Base Data of Stolen Memory Register (BDSM) directly. 69 /* Read Base Data of Stolen Memory Register (BDSM) directly.
67 * Note that there is also a MCHBAR miror at 0x1080c0 or 70 * Note that there is also a MCHBAR miror at 0x1080c0 or
68 * we could use device 2:0x5c instead. 71 * we could use device 2:0x5c instead.
@@ -136,6 +139,7 @@ static int i915_setup_compression(struct drm_device *dev, int size)
136err_fb: 139err_fb:
137 drm_mm_put_block(compressed_fb); 140 drm_mm_put_block(compressed_fb);
138err: 141err:
142 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
139 return -ENOSPC; 143 return -ENOSPC;
140} 144}
141 145
@@ -143,7 +147,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
143{ 147{
144 struct drm_i915_private *dev_priv = dev->dev_private; 148 struct drm_i915_private *dev_priv = dev->dev_private;
145 149
146 if (dev_priv->mm.stolen_base == 0) 150 if (!drm_mm_initialized(&dev_priv->mm.stolen))
147 return -ENODEV; 151 return -ENODEV;
148 152
149 if (size < dev_priv->cfb_size) 153 if (size < dev_priv->cfb_size)
@@ -175,6 +179,9 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
175{ 179{
176 struct drm_i915_private *dev_priv = dev->dev_private; 180 struct drm_i915_private *dev_priv = dev->dev_private;
177 181
182 if (!drm_mm_initialized(&dev_priv->mm.stolen))
183 return;
184
178 i915_gem_stolen_cleanup_compression(dev); 185 i915_gem_stolen_cleanup_compression(dev);
179 drm_mm_takedown(&dev_priv->mm.stolen); 186 drm_mm_takedown(&dev_priv->mm.stolen);
180} 187}
@@ -182,6 +189,7 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
182int i915_gem_init_stolen(struct drm_device *dev) 189int i915_gem_init_stolen(struct drm_device *dev)
183{ 190{
184 struct drm_i915_private *dev_priv = dev->dev_private; 191 struct drm_i915_private *dev_priv = dev->dev_private;
192 int bios_reserved = 0;
185 193
186 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); 194 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
187 if (dev_priv->mm.stolen_base == 0) 195 if (dev_priv->mm.stolen_base == 0)
@@ -190,8 +198,12 @@ int i915_gem_init_stolen(struct drm_device *dev)
190 DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n", 198 DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
191 dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base); 199 dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
192 200
201 if (IS_VALLEYVIEW(dev))
202 bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
203
193 /* Basic memrange allocator for stolen space */ 204 /* Basic memrange allocator for stolen space */
194 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size); 205 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
206 bios_reserved);
195 207
196 return 0; 208 return 0;
197} 209}
@@ -270,7 +282,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
270 goto cleanup; 282 goto cleanup;
271 283
272 obj->has_dma_mapping = true; 284 obj->has_dma_mapping = true;
273 obj->pages_pin_count = 1; 285 i915_gem_object_pin_pages(obj);
274 obj->stolen = stolen; 286 obj->stolen = stolen;
275 287
276 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 288 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
@@ -291,7 +303,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
291 struct drm_i915_gem_object *obj; 303 struct drm_i915_gem_object *obj;
292 struct drm_mm_node *stolen; 304 struct drm_mm_node *stolen;
293 305
294 if (dev_priv->mm.stolen_base == 0) 306 if (!drm_mm_initialized(&dev_priv->mm.stolen))
295 return NULL; 307 return NULL;
296 308
297 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); 309 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
@@ -322,7 +334,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
322 struct drm_i915_gem_object *obj; 334 struct drm_i915_gem_object *obj;
323 struct drm_mm_node *stolen; 335 struct drm_mm_node *stolen;
324 336
325 if (dev_priv->mm.stolen_base == 0) 337 if (!drm_mm_initialized(&dev_priv->mm.stolen))
326 return NULL; 338 return NULL;
327 339
328 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", 340 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
@@ -330,7 +342,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
330 342
331 /* KISS and expect everything to be page-aligned */ 343 /* KISS and expect everything to be page-aligned */
332 BUG_ON(stolen_offset & 4095); 344 BUG_ON(stolen_offset & 4095);
333 BUG_ON(gtt_offset & 4095);
334 BUG_ON(size & 4095); 345 BUG_ON(size & 4095);
335 346
336 if (WARN_ON(size == 0)) 347 if (WARN_ON(size == 0))
@@ -351,6 +362,10 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
351 return NULL; 362 return NULL;
352 } 363 }
353 364
365 /* Some objects just need physical mem from stolen space */
366 if (gtt_offset == -1)
367 return obj;
368
354 /* To simplify the initialisation sequence between KMS and GTT, 369 /* To simplify the initialisation sequence between KMS and GTT,
355 * we allow construction of the stolen object prior to 370 * we allow construction of the stolen object prior to
356 * setting up the GTT space. The actual reservation will occur 371 * setting up the GTT space. The actual reservation will occur
@@ -371,7 +386,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
371 obj->gtt_offset = gtt_offset; 386 obj->gtt_offset = gtt_offset;
372 obj->has_global_gtt_mapping = 1; 387 obj->has_global_gtt_mapping = 1;
373 388
374 list_add_tail(&obj->gtt_list, &dev_priv->mm.bound_list); 389 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
375 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 390 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
376 391
377 return obj; 392 return obj;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0aa2ef0d2ae0..3d92a7cef154 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -70,15 +70,6 @@ static const u32 hpd_status_gen4[] = {
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71}; 71};
72 72
73static const u32 hpd_status_i965[] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80};
81
82static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 73static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
@@ -88,13 +79,12 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89}; 80};
90 81
91static void ibx_hpd_irq_setup(struct drm_device *dev);
92static void i915_hpd_irq_setup(struct drm_device *dev);
93
94/* For display hotplug interrupt */ 82/* For display hotplug interrupt */
95static void 83static void
96ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 84ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
97{ 85{
86 assert_spin_locked(&dev_priv->irq_lock);
87
98 if ((dev_priv->irq_mask & mask) != 0) { 88 if ((dev_priv->irq_mask & mask) != 0) {
99 dev_priv->irq_mask &= ~mask; 89 dev_priv->irq_mask &= ~mask;
100 I915_WRITE(DEIMR, dev_priv->irq_mask); 90 I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -105,6 +95,8 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
105static void 95static void
106ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 96ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
107{ 97{
98 assert_spin_locked(&dev_priv->irq_lock);
99
108 if ((dev_priv->irq_mask & mask) != mask) { 100 if ((dev_priv->irq_mask & mask) != mask) {
109 dev_priv->irq_mask |= mask; 101 dev_priv->irq_mask |= mask;
110 I915_WRITE(DEIMR, dev_priv->irq_mask); 102 I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -112,6 +104,215 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
112 } 104 }
113} 105}
114 106
107static bool ivb_can_enable_err_int(struct drm_device *dev)
108{
109 struct drm_i915_private *dev_priv = dev->dev_private;
110 struct intel_crtc *crtc;
111 enum pipe pipe;
112
113 assert_spin_locked(&dev_priv->irq_lock);
114
115 for_each_pipe(pipe) {
116 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
117
118 if (crtc->cpu_fifo_underrun_disabled)
119 return false;
120 }
121
122 return true;
123}
124
125static bool cpt_can_enable_serr_int(struct drm_device *dev)
126{
127 struct drm_i915_private *dev_priv = dev->dev_private;
128 enum pipe pipe;
129 struct intel_crtc *crtc;
130
131 for_each_pipe(pipe) {
132 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
133
134 if (crtc->pch_fifo_underrun_disabled)
135 return false;
136 }
137
138 return true;
139}
140
141static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
142 enum pipe pipe, bool enable)
143{
144 struct drm_i915_private *dev_priv = dev->dev_private;
145 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
146 DE_PIPEB_FIFO_UNDERRUN;
147
148 if (enable)
149 ironlake_enable_display_irq(dev_priv, bit);
150 else
151 ironlake_disable_display_irq(dev_priv, bit);
152}
153
154static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
155 bool enable)
156{
157 struct drm_i915_private *dev_priv = dev->dev_private;
158
159 if (enable) {
160 if (!ivb_can_enable_err_int(dev))
161 return;
162
163 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
164 ERR_INT_FIFO_UNDERRUN_B |
165 ERR_INT_FIFO_UNDERRUN_C);
166
167 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
168 } else {
169 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
170 }
171}
172
173static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
174 bool enable)
175{
176 struct drm_device *dev = crtc->base.dev;
177 struct drm_i915_private *dev_priv = dev->dev_private;
178 uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
179 SDE_TRANSB_FIFO_UNDER;
180
181 if (enable)
182 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
183 else
184 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
185
186 POSTING_READ(SDEIMR);
187}
188
189static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
190 enum transcoder pch_transcoder,
191 bool enable)
192{
193 struct drm_i915_private *dev_priv = dev->dev_private;
194
195 if (enable) {
196 if (!cpt_can_enable_serr_int(dev))
197 return;
198
199 I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
200 SERR_INT_TRANS_B_FIFO_UNDERRUN |
201 SERR_INT_TRANS_C_FIFO_UNDERRUN);
202
203 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
204 } else {
205 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
206 }
207
208 POSTING_READ(SDEIMR);
209}
210
211/**
212 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
213 * @dev: drm device
214 * @pipe: pipe
215 * @enable: true if we want to report FIFO underrun errors, false otherwise
216 *
217 * This function makes us disable or enable CPU fifo underruns for a specific
218 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
219 * reporting for one pipe may also disable all the other CPU error interruts for
220 * the other pipes, due to the fact that there's just one interrupt mask/enable
221 * bit for all the pipes.
222 *
223 * Returns the previous state of underrun reporting.
224 */
225bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
226 enum pipe pipe, bool enable)
227{
228 struct drm_i915_private *dev_priv = dev->dev_private;
229 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
230 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
231 unsigned long flags;
232 bool ret;
233
234 spin_lock_irqsave(&dev_priv->irq_lock, flags);
235
236 ret = !intel_crtc->cpu_fifo_underrun_disabled;
237
238 if (enable == ret)
239 goto done;
240
241 intel_crtc->cpu_fifo_underrun_disabled = !enable;
242
243 if (IS_GEN5(dev) || IS_GEN6(dev))
244 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
245 else if (IS_GEN7(dev))
246 ivybridge_set_fifo_underrun_reporting(dev, enable);
247
248done:
249 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
250 return ret;
251}
252
253/**
254 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
255 * @dev: drm device
256 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
257 * @enable: true if we want to report FIFO underrun errors, false otherwise
258 *
259 * This function makes us disable or enable PCH fifo underruns for a specific
260 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
261 * underrun reporting for one transcoder may also disable all the other PCH
262 * error interruts for the other transcoders, due to the fact that there's just
263 * one interrupt mask/enable bit for all the transcoders.
264 *
265 * Returns the previous state of underrun reporting.
266 */
267bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
268 enum transcoder pch_transcoder,
269 bool enable)
270{
271 struct drm_i915_private *dev_priv = dev->dev_private;
272 enum pipe p;
273 struct drm_crtc *crtc;
274 struct intel_crtc *intel_crtc;
275 unsigned long flags;
276 bool ret;
277
278 if (HAS_PCH_LPT(dev)) {
279 crtc = NULL;
280 for_each_pipe(p) {
281 struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
282 if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
283 crtc = c;
284 break;
285 }
286 }
287 if (!crtc) {
288 DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
289 return false;
290 }
291 } else {
292 crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
293 }
294 intel_crtc = to_intel_crtc(crtc);
295
296 spin_lock_irqsave(&dev_priv->irq_lock, flags);
297
298 ret = !intel_crtc->pch_fifo_underrun_disabled;
299
300 if (enable == ret)
301 goto done;
302
303 intel_crtc->pch_fifo_underrun_disabled = !enable;
304
305 if (HAS_PCH_IBX(dev))
306 ibx_set_fifo_underrun_reporting(intel_crtc, enable);
307 else
308 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
309
310done:
311 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
312 return ret;
313}
314
315
115void 316void
116i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 317i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
117{ 318{
@@ -142,28 +343,21 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
142} 343}
143 344
144/** 345/**
145 * intel_enable_asle - enable ASLE interrupt for OpRegion 346 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
146 */ 347 */
147void intel_enable_asle(struct drm_device *dev) 348static void i915_enable_asle_pipestat(struct drm_device *dev)
148{ 349{
149 drm_i915_private_t *dev_priv = dev->dev_private; 350 drm_i915_private_t *dev_priv = dev->dev_private;
150 unsigned long irqflags; 351 unsigned long irqflags;
151 352
152 /* FIXME: opregion/asle for VLV */ 353 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
153 if (IS_VALLEYVIEW(dev))
154 return; 354 return;
155 355
156 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 356 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
157 357
158 if (HAS_PCH_SPLIT(dev)) 358 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
159 ironlake_enable_display_irq(dev_priv, DE_GSE); 359 if (INTEL_INFO(dev)->gen >= 4)
160 else { 360 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
161 i915_enable_pipestat(dev_priv, 1,
162 PIPE_LEGACY_BLC_EVENT_ENABLE);
163 if (INTEL_INFO(dev)->gen >= 4)
164 i915_enable_pipestat(dev_priv, 0,
165 PIPE_LEGACY_BLC_EVENT_ENABLE);
166 }
167 361
168 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 362 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
169} 363}
@@ -181,10 +375,16 @@ static int
181i915_pipe_enabled(struct drm_device *dev, int pipe) 375i915_pipe_enabled(struct drm_device *dev, int pipe)
182{ 376{
183 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 377 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
184 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
185 pipe);
186 378
187 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; 379 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
380 /* Locking is horribly broken here, but whatever. */
381 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
382 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
383
384 return intel_crtc->active;
385 } else {
386 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
387 }
188} 388}
189 389
190/* Called from drm generic code, passed a 'crtc', which 390/* Called from drm generic code, passed a 'crtc', which
@@ -334,6 +534,21 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
334 crtc); 534 crtc);
335} 535}
336 536
537static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
538{
539 enum drm_connector_status old_status;
540
541 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
542 old_status = connector->status;
543
544 connector->status = connector->funcs->detect(connector, false);
545 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
546 connector->base.id,
547 drm_get_connector_name(connector),
548 old_status, connector->status);
549 return (old_status != connector->status);
550}
551
337/* 552/*
338 * Handle hotplug events outside the interrupt handler proper. 553 * Handle hotplug events outside the interrupt handler proper.
339 */ 554 */
@@ -350,6 +565,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
350 struct drm_connector *connector; 565 struct drm_connector *connector;
351 unsigned long irqflags; 566 unsigned long irqflags;
352 bool hpd_disabled = false; 567 bool hpd_disabled = false;
568 bool changed = false;
569 u32 hpd_event_bits;
353 570
354 /* HPD irq before everything is fully set up. */ 571 /* HPD irq before everything is fully set up. */
355 if (!dev_priv->enable_hotplug_processing) 572 if (!dev_priv->enable_hotplug_processing)
@@ -359,6 +576,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
359 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 576 DRM_DEBUG_KMS("running encoder hotplug functions\n");
360 577
361 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 578 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
579
580 hpd_event_bits = dev_priv->hpd_event_bits;
581 dev_priv->hpd_event_bits = 0;
362 list_for_each_entry(connector, &mode_config->connector_list, head) { 582 list_for_each_entry(connector, &mode_config->connector_list, head) {
363 intel_connector = to_intel_connector(connector); 583 intel_connector = to_intel_connector(connector);
364 intel_encoder = intel_connector->encoder; 584 intel_encoder = intel_connector->encoder;
@@ -373,6 +593,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
373 | DRM_CONNECTOR_POLL_DISCONNECT; 593 | DRM_CONNECTOR_POLL_DISCONNECT;
374 hpd_disabled = true; 594 hpd_disabled = true;
375 } 595 }
596 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
597 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
598 drm_get_connector_name(connector), intel_encoder->hpd_pin);
599 }
376 } 600 }
377 /* if there were no outputs to poll, poll was disabled, 601 /* if there were no outputs to poll, poll was disabled,
378 * therefore make sure it's enabled when disabling HPD on 602 * therefore make sure it's enabled when disabling HPD on
@@ -385,14 +609,20 @@ static void i915_hotplug_work_func(struct work_struct *work)
385 609
386 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 610 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
387 611
388 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 612 list_for_each_entry(connector, &mode_config->connector_list, head) {
389 if (intel_encoder->hot_plug) 613 intel_connector = to_intel_connector(connector);
390 intel_encoder->hot_plug(intel_encoder); 614 intel_encoder = intel_connector->encoder;
391 615 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
616 if (intel_encoder->hot_plug)
617 intel_encoder->hot_plug(intel_encoder);
618 if (intel_hpd_irq_event(dev, connector))
619 changed = true;
620 }
621 }
392 mutex_unlock(&mode_config->mutex); 622 mutex_unlock(&mode_config->mutex);
393 623
394 /* Just fire off a uevent and let userspace tell us what to do */ 624 if (changed)
395 drm_helper_hpd_irq_event(dev); 625 drm_kms_helper_hotplug_event(dev);
396} 626}
397 627
398static void ironlake_handle_rps_change(struct drm_device *dev) 628static void ironlake_handle_rps_change(struct drm_device *dev)
@@ -447,7 +677,6 @@ static void notify_ring(struct drm_device *dev,
447 677
448 wake_up_all(&ring->irq_queue); 678 wake_up_all(&ring->irq_queue);
449 if (i915_enable_hangcheck) { 679 if (i915_enable_hangcheck) {
450 dev_priv->gpu_error.hangcheck_count = 0;
451 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 680 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
452 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 681 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
453 } 682 }
@@ -464,25 +693,48 @@ static void gen6_pm_rps_work(struct work_struct *work)
464 pm_iir = dev_priv->rps.pm_iir; 693 pm_iir = dev_priv->rps.pm_iir;
465 dev_priv->rps.pm_iir = 0; 694 dev_priv->rps.pm_iir = 0;
466 pm_imr = I915_READ(GEN6_PMIMR); 695 pm_imr = I915_READ(GEN6_PMIMR);
467 I915_WRITE(GEN6_PMIMR, 0); 696 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
697 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
468 spin_unlock_irq(&dev_priv->rps.lock); 698 spin_unlock_irq(&dev_priv->rps.lock);
469 699
470 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 700 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
471 return; 701 return;
472 702
473 mutex_lock(&dev_priv->rps.hw_lock); 703 mutex_lock(&dev_priv->rps.hw_lock);
474 704
475 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 705 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
476 new_delay = dev_priv->rps.cur_delay + 1; 706 new_delay = dev_priv->rps.cur_delay + 1;
477 else 707
708 /*
709 * For better performance, jump directly
710 * to RPe if we're below it.
711 */
712 if (IS_VALLEYVIEW(dev_priv->dev) &&
713 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
714 new_delay = dev_priv->rps.rpe_delay;
715 } else
478 new_delay = dev_priv->rps.cur_delay - 1; 716 new_delay = dev_priv->rps.cur_delay - 1;
479 717
480 /* sysfs frequency interfaces may have snuck in while servicing the 718 /* sysfs frequency interfaces may have snuck in while servicing the
481 * interrupt 719 * interrupt
482 */ 720 */
483 if (!(new_delay > dev_priv->rps.max_delay || 721 if (new_delay >= dev_priv->rps.min_delay &&
484 new_delay < dev_priv->rps.min_delay)) { 722 new_delay <= dev_priv->rps.max_delay) {
485 gen6_set_rps(dev_priv->dev, new_delay); 723 if (IS_VALLEYVIEW(dev_priv->dev))
724 valleyview_set_rps(dev_priv->dev, new_delay);
725 else
726 gen6_set_rps(dev_priv->dev, new_delay);
727 }
728
729 if (IS_VALLEYVIEW(dev_priv->dev)) {
730 /*
731 * On VLV, when we enter RC6 we may not be at the minimum
732 * voltage level, so arm a timer to check. It should only
733 * fire when there's activity or once after we've entered
734 * RC6, and then won't be re-armed until the next RPS interrupt.
735 */
736 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
737 msecs_to_jiffies(100));
486 } 738 }
487 739
488 mutex_unlock(&dev_priv->rps.hw_lock); 740 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -529,7 +781,7 @@ static void ivybridge_parity_work(struct work_struct *work)
529 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 781 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
530 782
531 spin_lock_irqsave(&dev_priv->irq_lock, flags); 783 spin_lock_irqsave(&dev_priv->irq_lock, flags);
532 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 784 dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
533 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 785 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
534 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 786 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
535 787
@@ -561,7 +813,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
561 return; 813 return;
562 814
563 spin_lock_irqsave(&dev_priv->irq_lock, flags); 815 spin_lock_irqsave(&dev_priv->irq_lock, flags);
564 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 816 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
565 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 817 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
566 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 818 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
567 819
@@ -573,25 +825,26 @@ static void snb_gt_irq_handler(struct drm_device *dev,
573 u32 gt_iir) 825 u32 gt_iir)
574{ 826{
575 827
576 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | 828 if (gt_iir &
577 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) 829 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
578 notify_ring(dev, &dev_priv->ring[RCS]); 830 notify_ring(dev, &dev_priv->ring[RCS]);
579 if (gt_iir & GEN6_BSD_USER_INTERRUPT) 831 if (gt_iir & GT_BSD_USER_INTERRUPT)
580 notify_ring(dev, &dev_priv->ring[VCS]); 832 notify_ring(dev, &dev_priv->ring[VCS]);
581 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) 833 if (gt_iir & GT_BLT_USER_INTERRUPT)
582 notify_ring(dev, &dev_priv->ring[BCS]); 834 notify_ring(dev, &dev_priv->ring[BCS]);
583 835
584 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | 836 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
585 GT_GEN6_BSD_CS_ERROR_INTERRUPT | 837 GT_BSD_CS_ERROR_INTERRUPT |
586 GT_RENDER_CS_ERROR_INTERRUPT)) { 838 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
587 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 839 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
588 i915_handle_error(dev, false); 840 i915_handle_error(dev, false);
589 } 841 }
590 842
591 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) 843 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
592 ivybridge_handle_parity_error(dev); 844 ivybridge_handle_parity_error(dev);
593} 845}
594 846
847/* Legacy way of handling PM interrupts */
595static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 848static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
596 u32 pm_iir) 849 u32 pm_iir)
597{ 850{
@@ -619,23 +872,25 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
619#define HPD_STORM_DETECT_PERIOD 1000 872#define HPD_STORM_DETECT_PERIOD 1000
620#define HPD_STORM_THRESHOLD 5 873#define HPD_STORM_THRESHOLD 5
621 874
622static inline bool hotplug_irq_storm_detect(struct drm_device *dev, 875static inline void intel_hpd_irq_handler(struct drm_device *dev,
623 u32 hotplug_trigger, 876 u32 hotplug_trigger,
624 const u32 *hpd) 877 const u32 *hpd)
625{ 878{
626 drm_i915_private_t *dev_priv = dev->dev_private; 879 drm_i915_private_t *dev_priv = dev->dev_private;
627 unsigned long irqflags;
628 int i; 880 int i;
629 bool ret = false; 881 bool storm_detected = false;
630 882
631 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 883 if (!hotplug_trigger)
884 return;
632 885
886 spin_lock(&dev_priv->irq_lock);
633 for (i = 1; i < HPD_NUM_PINS; i++) { 887 for (i = 1; i < HPD_NUM_PINS; i++) {
634 888
635 if (!(hpd[i] & hotplug_trigger) || 889 if (!(hpd[i] & hotplug_trigger) ||
636 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 890 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
637 continue; 891 continue;
638 892
893 dev_priv->hpd_event_bits |= (1 << i);
639 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 894 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
640 dev_priv->hpd_stats[i].hpd_last_jiffies 895 dev_priv->hpd_stats[i].hpd_last_jiffies
641 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 896 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
@@ -643,16 +898,20 @@ static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
643 dev_priv->hpd_stats[i].hpd_cnt = 0; 898 dev_priv->hpd_stats[i].hpd_cnt = 0;
644 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 899 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
645 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 900 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
901 dev_priv->hpd_event_bits &= ~(1 << i);
646 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 902 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
647 ret = true; 903 storm_detected = true;
648 } else { 904 } else {
649 dev_priv->hpd_stats[i].hpd_cnt++; 905 dev_priv->hpd_stats[i].hpd_cnt++;
650 } 906 }
651 } 907 }
652 908
653 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 909 if (storm_detected)
910 dev_priv->display.hpd_irq_setup(dev);
911 spin_unlock(&dev_priv->irq_lock);
654 912
655 return ret; 913 queue_work(dev_priv->wq,
914 &dev_priv->hotplug_work);
656} 915}
657 916
658static void gmbus_irq_handler(struct drm_device *dev) 917static void gmbus_irq_handler(struct drm_device *dev)
@@ -669,6 +928,38 @@ static void dp_aux_irq_handler(struct drm_device *dev)
669 wake_up_all(&dev_priv->gmbus_wait_queue); 928 wake_up_all(&dev_priv->gmbus_wait_queue);
670} 929}
671 930
931/* Unlike gen6_queue_rps_work() from which this function is originally derived,
932 * we must be able to deal with other PM interrupts. This is complicated because
933 * of the way in which we use the masks to defer the RPS work (which for
934 * posterity is necessary because of forcewake).
935 */
936static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
937 u32 pm_iir)
938{
939 unsigned long flags;
940
941 spin_lock_irqsave(&dev_priv->rps.lock, flags);
942 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
943 if (dev_priv->rps.pm_iir) {
944 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
945 /* never want to mask useful interrupts. (also posting read) */
946 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
947 /* TODO: if queue_work is slow, move it out of the spinlock */
948 queue_work(dev_priv->wq, &dev_priv->rps.work);
949 }
950 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
951
952 if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
953 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
954 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
955
956 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
957 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
958 i915_handle_error(dev_priv->dev, false);
959 }
960 }
961}
962
672static irqreturn_t valleyview_irq_handler(int irq, void *arg) 963static irqreturn_t valleyview_irq_handler(int irq, void *arg)
673{ 964{
674 struct drm_device *dev = (struct drm_device *) arg; 965 struct drm_device *dev = (struct drm_device *) arg;
@@ -727,12 +1018,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
727 1018
728 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1019 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
729 hotplug_status); 1020 hotplug_status);
730 if (hotplug_trigger) { 1021
731 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) 1022 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
732 i915_hpd_irq_setup(dev); 1023
733 queue_work(dev_priv->wq,
734 &dev_priv->hotplug_work);
735 }
736 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1024 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
737 I915_READ(PORT_HOTPLUG_STAT); 1025 I915_READ(PORT_HOTPLUG_STAT);
738 } 1026 }
@@ -740,7 +1028,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
740 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1028 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
741 gmbus_irq_handler(dev); 1029 gmbus_irq_handler(dev);
742 1030
743 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 1031 if (pm_iir & GEN6_PM_RPS_EVENTS)
744 gen6_queue_rps_work(dev_priv, pm_iir); 1032 gen6_queue_rps_work(dev_priv, pm_iir);
745 1033
746 I915_WRITE(GTIIR, gt_iir); 1034 I915_WRITE(GTIIR, gt_iir);
@@ -758,15 +1046,14 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
758 int pipe; 1046 int pipe;
759 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1047 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
760 1048
761 if (hotplug_trigger) { 1049 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
762 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx)) 1050
763 ibx_hpd_irq_setup(dev); 1051 if (pch_iir & SDE_AUDIO_POWER_MASK) {
764 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 1052 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
765 } 1053 SDE_AUDIO_POWER_SHIFT);
766 if (pch_iir & SDE_AUDIO_POWER_MASK)
767 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1054 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
768 (pch_iir & SDE_AUDIO_POWER_MASK) >> 1055 port_name(port));
769 SDE_AUDIO_POWER_SHIFT); 1056 }
770 1057
771 if (pch_iir & SDE_AUX_MASK) 1058 if (pch_iir & SDE_AUX_MASK)
772 dp_aux_irq_handler(dev); 1059 dp_aux_irq_handler(dev);
@@ -795,10 +1082,64 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
795 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1082 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
796 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1083 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
797 1084
798 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
799 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
800 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1085 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
801 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 1086 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1087 false))
1088 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1089
1090 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1091 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1092 false))
1093 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1094}
1095
1096static void ivb_err_int_handler(struct drm_device *dev)
1097{
1098 struct drm_i915_private *dev_priv = dev->dev_private;
1099 u32 err_int = I915_READ(GEN7_ERR_INT);
1100
1101 if (err_int & ERR_INT_POISON)
1102 DRM_ERROR("Poison interrupt\n");
1103
1104 if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1105 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1106 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1107
1108 if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1109 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1110 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1111
1112 if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1113 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1114 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1115
1116 I915_WRITE(GEN7_ERR_INT, err_int);
1117}
1118
1119static void cpt_serr_int_handler(struct drm_device *dev)
1120{
1121 struct drm_i915_private *dev_priv = dev->dev_private;
1122 u32 serr_int = I915_READ(SERR_INT);
1123
1124 if (serr_int & SERR_INT_POISON)
1125 DRM_ERROR("PCH poison interrupt\n");
1126
1127 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1128 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1129 false))
1130 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1131
1132 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1133 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1134 false))
1135 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1136
1137 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1138 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1139 false))
1140 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1141
1142 I915_WRITE(SERR_INT, serr_int);
802} 1143}
803 1144
804static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1145static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
@@ -807,15 +1148,14 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
807 int pipe; 1148 int pipe;
808 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1149 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
809 1150
810 if (hotplug_trigger) { 1151 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
811 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt)) 1152
812 ibx_hpd_irq_setup(dev); 1153 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
813 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 1154 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1155 SDE_AUDIO_POWER_SHIFT_CPT);
1156 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1157 port_name(port));
814 } 1158 }
815 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
816 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
817 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
818 SDE_AUDIO_POWER_SHIFT_CPT);
819 1159
820 if (pch_iir & SDE_AUX_MASK_CPT) 1160 if (pch_iir & SDE_AUX_MASK_CPT)
821 dp_aux_irq_handler(dev); 1161 dp_aux_irq_handler(dev);
@@ -834,6 +1174,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
834 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1174 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
835 pipe_name(pipe), 1175 pipe_name(pipe),
836 I915_READ(FDI_RX_IIR(pipe))); 1176 I915_READ(FDI_RX_IIR(pipe)));
1177
1178 if (pch_iir & SDE_ERROR_CPT)
1179 cpt_serr_int_handler(dev);
837} 1180}
838 1181
839static irqreturn_t ivybridge_irq_handler(int irq, void *arg) 1182static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
@@ -846,6 +1189,14 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
846 1189
847 atomic_inc(&dev_priv->irq_received); 1190 atomic_inc(&dev_priv->irq_received);
848 1191
1192 /* We get interrupts on unclaimed registers, so check for this before we
1193 * do any I915_{READ,WRITE}. */
1194 if (IS_HASWELL(dev) &&
1195 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1196 DRM_ERROR("Unclaimed register before interrupt\n");
1197 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1198 }
1199
849 /* disable master interrupt before clearing iir */ 1200 /* disable master interrupt before clearing iir */
850 de_ier = I915_READ(DEIER); 1201 de_ier = I915_READ(DEIER);
851 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1202 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -861,6 +1212,15 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
861 POSTING_READ(SDEIER); 1212 POSTING_READ(SDEIER);
862 } 1213 }
863 1214
1215 /* On Haswell, also mask ERR_INT because we don't want to risk
1216 * generating "unclaimed register" interrupts from inside the interrupt
1217 * handler. */
1218 if (IS_HASWELL(dev)) {
1219 spin_lock(&dev_priv->irq_lock);
1220 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1221 spin_unlock(&dev_priv->irq_lock);
1222 }
1223
864 gt_iir = I915_READ(GTIIR); 1224 gt_iir = I915_READ(GTIIR);
865 if (gt_iir) { 1225 if (gt_iir) {
866 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1226 snb_gt_irq_handler(dev, dev_priv, gt_iir);
@@ -870,11 +1230,14 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
870 1230
871 de_iir = I915_READ(DEIIR); 1231 de_iir = I915_READ(DEIIR);
872 if (de_iir) { 1232 if (de_iir) {
1233 if (de_iir & DE_ERR_INT_IVB)
1234 ivb_err_int_handler(dev);
1235
873 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1236 if (de_iir & DE_AUX_CHANNEL_A_IVB)
874 dp_aux_irq_handler(dev); 1237 dp_aux_irq_handler(dev);
875 1238
876 if (de_iir & DE_GSE_IVB) 1239 if (de_iir & DE_GSE_IVB)
877 intel_opregion_gse_intr(dev); 1240 intel_opregion_asle_intr(dev);
878 1241
879 for (i = 0; i < 3; i++) { 1242 for (i = 0; i < 3; i++) {
880 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 1243 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
@@ -901,12 +1264,21 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
901 1264
902 pm_iir = I915_READ(GEN6_PMIIR); 1265 pm_iir = I915_READ(GEN6_PMIIR);
903 if (pm_iir) { 1266 if (pm_iir) {
904 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 1267 if (IS_HASWELL(dev))
1268 hsw_pm_irq_handler(dev_priv, pm_iir);
1269 else if (pm_iir & GEN6_PM_RPS_EVENTS)
905 gen6_queue_rps_work(dev_priv, pm_iir); 1270 gen6_queue_rps_work(dev_priv, pm_iir);
906 I915_WRITE(GEN6_PMIIR, pm_iir); 1271 I915_WRITE(GEN6_PMIIR, pm_iir);
907 ret = IRQ_HANDLED; 1272 ret = IRQ_HANDLED;
908 } 1273 }
909 1274
1275 if (IS_HASWELL(dev)) {
1276 spin_lock(&dev_priv->irq_lock);
1277 if (ivb_can_enable_err_int(dev))
1278 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1279 spin_unlock(&dev_priv->irq_lock);
1280 }
1281
910 I915_WRITE(DEIER, de_ier); 1282 I915_WRITE(DEIER, de_ier);
911 POSTING_READ(DEIER); 1283 POSTING_READ(DEIER);
912 if (!HAS_PCH_NOP(dev)) { 1284 if (!HAS_PCH_NOP(dev)) {
@@ -921,9 +1293,10 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
921 struct drm_i915_private *dev_priv, 1293 struct drm_i915_private *dev_priv,
922 u32 gt_iir) 1294 u32 gt_iir)
923{ 1295{
924 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 1296 if (gt_iir &
1297 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
925 notify_ring(dev, &dev_priv->ring[RCS]); 1298 notify_ring(dev, &dev_priv->ring[RCS]);
926 if (gt_iir & GT_BSD_USER_INTERRUPT) 1299 if (gt_iir & ILK_BSD_USER_INTERRUPT)
927 notify_ring(dev, &dev_priv->ring[VCS]); 1300 notify_ring(dev, &dev_priv->ring[VCS]);
928} 1301}
929 1302
@@ -968,7 +1341,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
968 dp_aux_irq_handler(dev); 1341 dp_aux_irq_handler(dev);
969 1342
970 if (de_iir & DE_GSE) 1343 if (de_iir & DE_GSE)
971 intel_opregion_gse_intr(dev); 1344 intel_opregion_asle_intr(dev);
972 1345
973 if (de_iir & DE_PIPEA_VBLANK) 1346 if (de_iir & DE_PIPEA_VBLANK)
974 drm_handle_vblank(dev, 0); 1347 drm_handle_vblank(dev, 0);
@@ -976,6 +1349,17 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
976 if (de_iir & DE_PIPEB_VBLANK) 1349 if (de_iir & DE_PIPEB_VBLANK)
977 drm_handle_vblank(dev, 1); 1350 drm_handle_vblank(dev, 1);
978 1351
1352 if (de_iir & DE_POISON)
1353 DRM_ERROR("Poison interrupt\n");
1354
1355 if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1356 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1357 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1358
1359 if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1360 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1361 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1362
979 if (de_iir & DE_PLANEA_FLIP_DONE) { 1363 if (de_iir & DE_PLANEA_FLIP_DONE) {
980 intel_prepare_page_flip(dev, 0); 1364 intel_prepare_page_flip(dev, 0);
981 intel_finish_page_flip_plane(dev, 0); 1365 intel_finish_page_flip_plane(dev, 0);
@@ -1002,7 +1386,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1002 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1386 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1003 ironlake_handle_rps_change(dev); 1387 ironlake_handle_rps_change(dev);
1004 1388
1005 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 1389 if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
1006 gen6_queue_rps_work(dev_priv, pm_iir); 1390 gen6_queue_rps_work(dev_priv, pm_iir);
1007 1391
1008 I915_WRITE(GTIIR, gt_iir); 1392 I915_WRITE(GTIIR, gt_iir);
@@ -1222,11 +1606,13 @@ i915_error_state_free(struct kref *error_ref)
1222 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 1606 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1223 i915_error_object_free(error->ring[i].batchbuffer); 1607 i915_error_object_free(error->ring[i].batchbuffer);
1224 i915_error_object_free(error->ring[i].ringbuffer); 1608 i915_error_object_free(error->ring[i].ringbuffer);
1609 i915_error_object_free(error->ring[i].ctx);
1225 kfree(error->ring[i].requests); 1610 kfree(error->ring[i].requests);
1226 } 1611 }
1227 1612
1228 kfree(error->active_bo); 1613 kfree(error->active_bo);
1229 kfree(error->overlay); 1614 kfree(error->overlay);
1615 kfree(error->display);
1230 kfree(error); 1616 kfree(error);
1231} 1617}
1232static void capture_bo(struct drm_i915_error_buffer *err, 1618static void capture_bo(struct drm_i915_error_buffer *err,
@@ -1273,7 +1659,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1273 struct drm_i915_gem_object *obj; 1659 struct drm_i915_gem_object *obj;
1274 int i = 0; 1660 int i = 0;
1275 1661
1276 list_for_each_entry(obj, head, gtt_list) { 1662 list_for_each_entry(obj, head, global_list) {
1277 if (obj->pin_count == 0) 1663 if (obj->pin_count == 0)
1278 continue; 1664 continue;
1279 1665
@@ -1415,7 +1801,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1415 if (ring->id != RCS || !error->ccid) 1801 if (ring->id != RCS || !error->ccid)
1416 return; 1802 return;
1417 1803
1418 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 1804 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1419 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) { 1805 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1420 ering->ctx = i915_error_object_create_sized(dev_priv, 1806 ering->ctx = i915_error_object_create_sized(dev_priv,
1421 obj, 1); 1807 obj, 1);
@@ -1552,7 +1938,7 @@ static void i915_capture_error_state(struct drm_device *dev)
1552 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 1938 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1553 i++; 1939 i++;
1554 error->active_bo_count = i; 1940 error->active_bo_count = i;
1555 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) 1941 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1556 if (obj->pin_count) 1942 if (obj->pin_count)
1557 i++; 1943 i++;
1558 error->pinned_bo_count = i - error->active_bo_count; 1944 error->pinned_bo_count = i - error->active_bo_count;
@@ -1932,38 +2318,28 @@ ring_last_seqno(struct intel_ring_buffer *ring)
1932 struct drm_i915_gem_request, list)->seqno; 2318 struct drm_i915_gem_request, list)->seqno;
1933} 2319}
1934 2320
1935static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 2321static bool
2322ring_idle(struct intel_ring_buffer *ring, u32 seqno)
1936{ 2323{
1937 if (list_empty(&ring->request_list) || 2324 return (list_empty(&ring->request_list) ||
1938 i915_seqno_passed(ring->get_seqno(ring, false), 2325 i915_seqno_passed(seqno, ring_last_seqno(ring)));
1939 ring_last_seqno(ring))) {
1940 /* Issue a wake-up to catch stuck h/w. */
1941 if (waitqueue_active(&ring->irq_queue)) {
1942 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1943 ring->name);
1944 wake_up_all(&ring->irq_queue);
1945 *err = true;
1946 }
1947 return true;
1948 }
1949 return false;
1950} 2326}
1951 2327
1952static bool semaphore_passed(struct intel_ring_buffer *ring) 2328static struct intel_ring_buffer *
2329semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
1953{ 2330{
1954 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2331 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1955 u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 2332 u32 cmd, ipehr, acthd, acthd_min;
1956 struct intel_ring_buffer *signaller;
1957 u32 cmd, ipehr, acthd_min;
1958 2333
1959 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2334 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
1960 if ((ipehr & ~(0x3 << 16)) != 2335 if ((ipehr & ~(0x3 << 16)) !=
1961 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 2336 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
1962 return false; 2337 return NULL;
1963 2338
1964 /* ACTHD is likely pointing to the dword after the actual command, 2339 /* ACTHD is likely pointing to the dword after the actual command,
1965 * so scan backwards until we find the MBOX. 2340 * so scan backwards until we find the MBOX.
1966 */ 2341 */
2342 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
1967 acthd_min = max((int)acthd - 3 * 4, 0); 2343 acthd_min = max((int)acthd - 3 * 4, 0);
1968 do { 2344 do {
1969 cmd = ioread32(ring->virtual_start + acthd); 2345 cmd = ioread32(ring->virtual_start + acthd);
@@ -1972,124 +2348,216 @@ static bool semaphore_passed(struct intel_ring_buffer *ring)
1972 2348
1973 acthd -= 4; 2349 acthd -= 4;
1974 if (acthd < acthd_min) 2350 if (acthd < acthd_min)
1975 return false; 2351 return NULL;
1976 } while (1); 2352 } while (1);
1977 2353
1978 signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2354 *seqno = ioread32(ring->virtual_start+acthd+4)+1;
1979 return i915_seqno_passed(signaller->get_seqno(signaller, false), 2355 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
1980 ioread32(ring->virtual_start+acthd+4)+1);
1981} 2356}
1982 2357
1983static bool kick_ring(struct intel_ring_buffer *ring) 2358static int semaphore_passed(struct intel_ring_buffer *ring)
1984{ 2359{
1985 struct drm_device *dev = ring->dev; 2360 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1986 struct drm_i915_private *dev_priv = dev->dev_private; 2361 struct intel_ring_buffer *signaller;
1987 u32 tmp = I915_READ_CTL(ring); 2362 u32 seqno, ctl;
1988 if (tmp & RING_WAIT) {
1989 DRM_ERROR("Kicking stuck wait on %s\n",
1990 ring->name);
1991 I915_WRITE_CTL(ring, tmp);
1992 return true;
1993 }
1994 2363
1995 if (INTEL_INFO(dev)->gen >= 6 && 2364 ring->hangcheck.deadlock = true;
1996 tmp & RING_WAIT_SEMAPHORE && 2365
1997 semaphore_passed(ring)) { 2366 signaller = semaphore_waits_for(ring, &seqno);
1998 DRM_ERROR("Kicking stuck semaphore on %s\n", 2367 if (signaller == NULL || signaller->hangcheck.deadlock)
1999 ring->name); 2368 return -1;
2000 I915_WRITE_CTL(ring, tmp); 2369
2001 return true; 2370 /* cursory check for an unkickable deadlock */
2002 } 2371 ctl = I915_READ_CTL(signaller);
2003 return false; 2372 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2373 return -1;
2374
2375 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2004} 2376}
2005 2377
2006static bool i915_hangcheck_hung(struct drm_device *dev) 2378static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2007{ 2379{
2008 drm_i915_private_t *dev_priv = dev->dev_private; 2380 struct intel_ring_buffer *ring;
2009 2381 int i;
2010 if (dev_priv->gpu_error.hangcheck_count++ > 1) {
2011 bool hung = true;
2012 2382
2013 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 2383 for_each_ring(ring, dev_priv, i)
2014 i915_handle_error(dev, true); 2384 ring->hangcheck.deadlock = false;
2385}
2015 2386
2016 if (!IS_GEN2(dev)) { 2387static enum intel_ring_hangcheck_action
2017 struct intel_ring_buffer *ring; 2388ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2018 int i; 2389{
2390 struct drm_device *dev = ring->dev;
2391 struct drm_i915_private *dev_priv = dev->dev_private;
2392 u32 tmp;
2019 2393
2020 /* Is the chip hanging on a WAIT_FOR_EVENT? 2394 if (ring->hangcheck.acthd != acthd)
2021 * If so we can simply poke the RB_WAIT bit 2395 return active;
2022 * and break the hang. This should work on
2023 * all but the second generation chipsets.
2024 */
2025 for_each_ring(ring, dev_priv, i)
2026 hung &= !kick_ring(ring);
2027 }
2028 2396
2397 if (IS_GEN2(dev))
2029 return hung; 2398 return hung;
2399
2400 /* Is the chip hanging on a WAIT_FOR_EVENT?
2401 * If so we can simply poke the RB_WAIT bit
2402 * and break the hang. This should work on
2403 * all but the second generation chipsets.
2404 */
2405 tmp = I915_READ_CTL(ring);
2406 if (tmp & RING_WAIT) {
2407 DRM_ERROR("Kicking stuck wait on %s\n",
2408 ring->name);
2409 I915_WRITE_CTL(ring, tmp);
2410 return kick;
2030 } 2411 }
2031 2412
2032 return false; 2413 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2414 switch (semaphore_passed(ring)) {
2415 default:
2416 return hung;
2417 case 1:
2418 DRM_ERROR("Kicking stuck semaphore on %s\n",
2419 ring->name);
2420 I915_WRITE_CTL(ring, tmp);
2421 return kick;
2422 case 0:
2423 return wait;
2424 }
2425 }
2426
2427 return hung;
2033} 2428}
2034 2429
2035/** 2430/**
2036 * This is called when the chip hasn't reported back with completed 2431 * This is called when the chip hasn't reported back with completed
2037 * batchbuffers in a long time. The first time this is called we simply record 2432 * batchbuffers in a long time. We keep track per ring seqno progress and
2038 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses 2433 * if there are no progress, hangcheck score for that ring is increased.
2039 * again, we assume the chip is wedged and try to fix it. 2434 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2435 * we kick the ring. If we see no progress on three subsequent calls
2436 * we assume chip is wedged and try to fix it by resetting the chip.
2040 */ 2437 */
2041void i915_hangcheck_elapsed(unsigned long data) 2438void i915_hangcheck_elapsed(unsigned long data)
2042{ 2439{
2043 struct drm_device *dev = (struct drm_device *)data; 2440 struct drm_device *dev = (struct drm_device *)data;
2044 drm_i915_private_t *dev_priv = dev->dev_private; 2441 drm_i915_private_t *dev_priv = dev->dev_private;
2045 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
2046 struct intel_ring_buffer *ring; 2442 struct intel_ring_buffer *ring;
2047 bool err = false, idle;
2048 int i; 2443 int i;
2444 int busy_count = 0, rings_hung = 0;
2445 bool stuck[I915_NUM_RINGS] = { 0 };
2446#define BUSY 1
2447#define KICK 5
2448#define HUNG 20
2449#define FIRE 30
2049 2450
2050 if (!i915_enable_hangcheck) 2451 if (!i915_enable_hangcheck)
2051 return; 2452 return;
2052 2453
2053 memset(acthd, 0, sizeof(acthd));
2054 idle = true;
2055 for_each_ring(ring, dev_priv, i) { 2454 for_each_ring(ring, dev_priv, i) {
2056 idle &= i915_hangcheck_ring_idle(ring, &err); 2455 u32 seqno, acthd;
2057 acthd[i] = intel_ring_get_active_head(ring); 2456 bool busy = true;
2058 } 2457
2458 semaphore_clear_deadlocks(dev_priv);
2459
2460 seqno = ring->get_seqno(ring, false);
2461 acthd = intel_ring_get_active_head(ring);
2462
2463 if (ring->hangcheck.seqno == seqno) {
2464 if (ring_idle(ring, seqno)) {
2465 if (waitqueue_active(&ring->irq_queue)) {
2466 /* Issue a wake-up to catch stuck h/w. */
2467 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2468 ring->name);
2469 wake_up_all(&ring->irq_queue);
2470 ring->hangcheck.score += HUNG;
2471 } else
2472 busy = false;
2473 } else {
2474 int score;
2475
2476 /* We always increment the hangcheck score
2477 * if the ring is busy and still processing
2478 * the same request, so that no single request
2479 * can run indefinitely (such as a chain of
2480 * batches). The only time we do not increment
2481 * the hangcheck score on this ring, if this
2482 * ring is in a legitimate wait for another
2483 * ring. In that case the waiting ring is a
2484 * victim and we want to be sure we catch the
2485 * right culprit. Then every time we do kick
2486 * the ring, add a small increment to the
2487 * score so that we can catch a batch that is
2488 * being repeatedly kicked and so responsible
2489 * for stalling the machine.
2490 */
2491 ring->hangcheck.action = ring_stuck(ring,
2492 acthd);
2493
2494 switch (ring->hangcheck.action) {
2495 case wait:
2496 score = 0;
2497 break;
2498 case active:
2499 score = BUSY;
2500 break;
2501 case kick:
2502 score = KICK;
2503 break;
2504 case hung:
2505 score = HUNG;
2506 stuck[i] = true;
2507 break;
2508 }
2509 ring->hangcheck.score += score;
2510 }
2511 } else {
2512 /* Gradually reduce the count so that we catch DoS
2513 * attempts across multiple batches.
2514 */
2515 if (ring->hangcheck.score > 0)
2516 ring->hangcheck.score--;
2517 }
2059 2518
2060 /* If all work is done then ACTHD clearly hasn't advanced. */ 2519 ring->hangcheck.seqno = seqno;
2061 if (idle) { 2520 ring->hangcheck.acthd = acthd;
2062 if (err) { 2521 busy_count += busy;
2063 if (i915_hangcheck_hung(dev)) 2522 }
2064 return;
2065 2523
2066 goto repeat; 2524 for_each_ring(ring, dev_priv, i) {
2525 if (ring->hangcheck.score > FIRE) {
2526 DRM_ERROR("%s on %s\n",
2527 stuck[i] ? "stuck" : "no progress",
2528 ring->name);
2529 rings_hung++;
2067 } 2530 }
2068
2069 dev_priv->gpu_error.hangcheck_count = 0;
2070 return;
2071 } 2531 }
2072 2532
2073 i915_get_extra_instdone(dev, instdone); 2533 if (rings_hung)
2074 if (memcmp(dev_priv->gpu_error.last_acthd, acthd, 2534 return i915_handle_error(dev, true);
2075 sizeof(acthd)) == 0 &&
2076 memcmp(dev_priv->gpu_error.prev_instdone, instdone,
2077 sizeof(instdone)) == 0) {
2078 if (i915_hangcheck_hung(dev))
2079 return;
2080 } else {
2081 dev_priv->gpu_error.hangcheck_count = 0;
2082 2535
2083 memcpy(dev_priv->gpu_error.last_acthd, acthd, 2536 if (busy_count)
2084 sizeof(acthd)); 2537 /* Reset timer case chip hangs without another request
2085 memcpy(dev_priv->gpu_error.prev_instdone, instdone, 2538 * being added */
2086 sizeof(instdone)); 2539 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2087 } 2540 round_jiffies_up(jiffies +
2541 DRM_I915_HANGCHECK_JIFFIES));
2542}
2543
2544static void ibx_irq_preinstall(struct drm_device *dev)
2545{
2546 struct drm_i915_private *dev_priv = dev->dev_private;
2547
2548 if (HAS_PCH_NOP(dev))
2549 return;
2088 2550
2089repeat: 2551 /* south display irq */
2090 /* Reset timer case chip hangs without another request being added */ 2552 I915_WRITE(SDEIMR, 0xffffffff);
2091 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2553 /*
2092 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2554 * SDEIER is also touched by the interrupt handler to work around missed
2555 * PCH interrupts. Hence we can't update it after the interrupt handler
2556 * is enabled - instead we unconditionally enable all PCH interrupt
2557 * sources here, but then only unmask them as needed with SDEIMR.
2558 */
2559 I915_WRITE(SDEIER, 0xffffffff);
2560 POSTING_READ(SDEIER);
2093} 2561}
2094 2562
2095/* drm_dma.h hooks 2563/* drm_dma.h hooks
@@ -2113,19 +2581,34 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
2113 I915_WRITE(GTIER, 0x0); 2581 I915_WRITE(GTIER, 0x0);
2114 POSTING_READ(GTIER); 2582 POSTING_READ(GTIER);
2115 2583
2116 if (HAS_PCH_NOP(dev)) 2584 ibx_irq_preinstall(dev);
2117 return; 2585}
2118 2586
2119 /* south display irq */ 2587static void ivybridge_irq_preinstall(struct drm_device *dev)
2120 I915_WRITE(SDEIMR, 0xffffffff); 2588{
2121 /* 2589 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2122 * SDEIER is also touched by the interrupt handler to work around missed 2590
2123 * PCH interrupts. Hence we can't update it after the interrupt handler 2591 atomic_set(&dev_priv->irq_received, 0);
2124 * is enabled - instead we unconditionally enable all PCH interrupt 2592
2125 * sources here, but then only unmask them as needed with SDEIMR. 2593 I915_WRITE(HWSTAM, 0xeffe);
2126 */ 2594
2127 I915_WRITE(SDEIER, 0xffffffff); 2595 /* XXX hotplug from PCH */
2128 POSTING_READ(SDEIER); 2596
2597 I915_WRITE(DEIMR, 0xffffffff);
2598 I915_WRITE(DEIER, 0x0);
2599 POSTING_READ(DEIER);
2600
2601 /* and GT */
2602 I915_WRITE(GTIMR, 0xffffffff);
2603 I915_WRITE(GTIER, 0x0);
2604 POSTING_READ(GTIER);
2605
2606 /* Power management */
2607 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2608 I915_WRITE(GEN6_PMIER, 0x0);
2609 POSTING_READ(GEN6_PMIER);
2610
2611 ibx_irq_preinstall(dev);
2129} 2612}
2130 2613
2131static void valleyview_irq_preinstall(struct drm_device *dev) 2614static void valleyview_irq_preinstall(struct drm_device *dev)
@@ -2201,33 +2684,41 @@ static void ibx_irq_postinstall(struct drm_device *dev)
2201 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2684 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2202 u32 mask; 2685 u32 mask;
2203 2686
2204 if (HAS_PCH_IBX(dev))
2205 mask = SDE_GMBUS | SDE_AUX_MASK;
2206 else
2207 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
2208
2209 if (HAS_PCH_NOP(dev)) 2687 if (HAS_PCH_NOP(dev))
2210 return; 2688 return;
2211 2689
2690 if (HAS_PCH_IBX(dev)) {
2691 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2692 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2693 } else {
2694 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2695
2696 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2697 }
2698
2212 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2699 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2213 I915_WRITE(SDEIMR, ~mask); 2700 I915_WRITE(SDEIMR, ~mask);
2214} 2701}
2215 2702
2216static int ironlake_irq_postinstall(struct drm_device *dev) 2703static int ironlake_irq_postinstall(struct drm_device *dev)
2217{ 2704{
2705 unsigned long irqflags;
2706
2218 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2707 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2219 /* enable kind of interrupts always enabled */ 2708 /* enable kind of interrupts always enabled */
2220 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2709 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2221 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2710 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2222 DE_AUX_CHANNEL_A; 2711 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2223 u32 render_irqs; 2712 DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
2713 u32 gt_irqs;
2224 2714
2225 dev_priv->irq_mask = ~display_mask; 2715 dev_priv->irq_mask = ~display_mask;
2226 2716
2227 /* should always can generate irq */ 2717 /* should always can generate irq */
2228 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2718 I915_WRITE(DEIIR, I915_READ(DEIIR));
2229 I915_WRITE(DEIMR, dev_priv->irq_mask); 2719 I915_WRITE(DEIMR, dev_priv->irq_mask);
2230 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); 2720 I915_WRITE(DEIER, display_mask |
2721 DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
2231 POSTING_READ(DEIER); 2722 POSTING_READ(DEIER);
2232 2723
2233 dev_priv->gt_irq_mask = ~0; 2724 dev_priv->gt_irq_mask = ~0;
@@ -2235,26 +2726,28 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
2235 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2726 I915_WRITE(GTIIR, I915_READ(GTIIR));
2236 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2727 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2237 2728
2729 gt_irqs = GT_RENDER_USER_INTERRUPT;
2730
2238 if (IS_GEN6(dev)) 2731 if (IS_GEN6(dev))
2239 render_irqs = 2732 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2240 GT_USER_INTERRUPT |
2241 GEN6_BSD_USER_INTERRUPT |
2242 GEN6_BLITTER_USER_INTERRUPT;
2243 else 2733 else
2244 render_irqs = 2734 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2245 GT_USER_INTERRUPT | 2735 ILK_BSD_USER_INTERRUPT;
2246 GT_PIPE_NOTIFY | 2736
2247 GT_BSD_USER_INTERRUPT; 2737 I915_WRITE(GTIER, gt_irqs);
2248 I915_WRITE(GTIER, render_irqs);
2249 POSTING_READ(GTIER); 2738 POSTING_READ(GTIER);
2250 2739
2251 ibx_irq_postinstall(dev); 2740 ibx_irq_postinstall(dev);
2252 2741
2253 if (IS_IRONLAKE_M(dev)) { 2742 if (IS_IRONLAKE_M(dev)) {
2254 /* Clear & enable PCU event interrupts */ 2743 /* Enable PCU event interrupts
2255 I915_WRITE(DEIIR, DE_PCU_EVENT); 2744 *
2256 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); 2745 * spinlocking not required here for correctness since interrupt
2746 * setup is guaranteed to run in single-threaded context. But we
2747 * need it to make the assert_spin_locked happy. */
2748 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2257 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 2749 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2750 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2258 } 2751 }
2259 2752
2260 return 0; 2753 return 0;
@@ -2269,12 +2762,15 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
2269 DE_PLANEC_FLIP_DONE_IVB | 2762 DE_PLANEC_FLIP_DONE_IVB |
2270 DE_PLANEB_FLIP_DONE_IVB | 2763 DE_PLANEB_FLIP_DONE_IVB |
2271 DE_PLANEA_FLIP_DONE_IVB | 2764 DE_PLANEA_FLIP_DONE_IVB |
2272 DE_AUX_CHANNEL_A_IVB; 2765 DE_AUX_CHANNEL_A_IVB |
2273 u32 render_irqs; 2766 DE_ERR_INT_IVB;
2767 u32 pm_irqs = GEN6_PM_RPS_EVENTS;
2768 u32 gt_irqs;
2274 2769
2275 dev_priv->irq_mask = ~display_mask; 2770 dev_priv->irq_mask = ~display_mask;
2276 2771
2277 /* should always can generate irq */ 2772 /* should always can generate irq */
2773 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2278 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2774 I915_WRITE(DEIIR, I915_READ(DEIIR));
2279 I915_WRITE(DEIMR, dev_priv->irq_mask); 2775 I915_WRITE(DEIMR, dev_priv->irq_mask);
2280 I915_WRITE(DEIER, 2776 I915_WRITE(DEIER,
@@ -2284,16 +2780,32 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
2284 DE_PIPEA_VBLANK_IVB); 2780 DE_PIPEA_VBLANK_IVB);
2285 POSTING_READ(DEIER); 2781 POSTING_READ(DEIER);
2286 2782
2287 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 2783 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2288 2784
2289 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2785 I915_WRITE(GTIIR, I915_READ(GTIIR));
2290 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2786 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2291 2787
2292 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 2788 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2293 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 2789 GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2294 I915_WRITE(GTIER, render_irqs); 2790 I915_WRITE(GTIER, gt_irqs);
2295 POSTING_READ(GTIER); 2791 POSTING_READ(GTIER);
2296 2792
2793 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2794 if (HAS_VEBOX(dev))
2795 pm_irqs |= PM_VEBOX_USER_INTERRUPT |
2796 PM_VEBOX_CS_ERROR_INTERRUPT;
2797
2798 /* Our enable/disable rps functions may touch these registers so
2799 * make sure to set a known state for only the non-RPS bits.
2800 * The RMW is extra paranoia since this should be called after being set
2801 * to a known state in preinstall.
2802 * */
2803 I915_WRITE(GEN6_PMIMR,
2804 (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
2805 I915_WRITE(GEN6_PMIER,
2806 (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
2807 POSTING_READ(GEN6_PMIER);
2808
2297 ibx_irq_postinstall(dev); 2809 ibx_irq_postinstall(dev);
2298 2810
2299 return 0; 2811 return 0;
@@ -2302,10 +2814,9 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
2302static int valleyview_irq_postinstall(struct drm_device *dev) 2814static int valleyview_irq_postinstall(struct drm_device *dev)
2303{ 2815{
2304 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2816 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2817 u32 gt_irqs;
2305 u32 enable_mask; 2818 u32 enable_mask;
2306 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2819 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2307 u32 render_irqs;
2308 u16 msid;
2309 2820
2310 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2821 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2311 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2822 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -2321,13 +2832,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2321 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2832 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2322 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2833 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2323 2834
2324 /* Hack for broken MSIs on VLV */
2325 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
2326 pci_read_config_word(dev->pdev, 0x98, &msid);
2327 msid &= 0xff; /* mask out delivery bits */
2328 msid |= (1<<14);
2329 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2330
2331 I915_WRITE(PORT_HOTPLUG_EN, 0); 2835 I915_WRITE(PORT_HOTPLUG_EN, 0);
2332 POSTING_READ(PORT_HOTPLUG_EN); 2836 POSTING_READ(PORT_HOTPLUG_EN);
2333 2837
@@ -2348,9 +2852,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2348 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2852 I915_WRITE(GTIIR, I915_READ(GTIIR));
2349 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2853 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2350 2854
2351 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 2855 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2352 GEN6_BLITTER_USER_INTERRUPT; 2856 GT_BLT_USER_INTERRUPT;
2353 I915_WRITE(GTIER, render_irqs); 2857 I915_WRITE(GTIER, gt_irqs);
2354 POSTING_READ(GTIER); 2858 POSTING_READ(GTIER);
2355 2859
2356 /* ack & enable invalid PTE error interrupts */ 2860 /* ack & enable invalid PTE error interrupts */
@@ -2402,6 +2906,8 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
2402 I915_WRITE(DEIMR, 0xffffffff); 2906 I915_WRITE(DEIMR, 0xffffffff);
2403 I915_WRITE(DEIER, 0x0); 2907 I915_WRITE(DEIER, 0x0);
2404 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2908 I915_WRITE(DEIIR, I915_READ(DEIIR));
2909 if (IS_GEN7(dev))
2910 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2405 2911
2406 I915_WRITE(GTIMR, 0xffffffff); 2912 I915_WRITE(GTIMR, 0xffffffff);
2407 I915_WRITE(GTIER, 0x0); 2913 I915_WRITE(GTIER, 0x0);
@@ -2413,6 +2919,8 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
2413 I915_WRITE(SDEIMR, 0xffffffff); 2919 I915_WRITE(SDEIMR, 0xffffffff);
2414 I915_WRITE(SDEIER, 0x0); 2920 I915_WRITE(SDEIER, 0x0);
2415 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2921 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2922 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2923 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2416} 2924}
2417 2925
2418static void i8xx_irq_preinstall(struct drm_device * dev) 2926static void i8xx_irq_preinstall(struct drm_device * dev)
@@ -2626,7 +3134,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
2626 I915_WRITE(IER, enable_mask); 3134 I915_WRITE(IER, enable_mask);
2627 POSTING_READ(IER); 3135 POSTING_READ(IER);
2628 3136
2629 intel_opregion_enable_asle(dev); 3137 i915_enable_asle_pipestat(dev);
2630 3138
2631 return 0; 3139 return 0;
2632} 3140}
@@ -2715,12 +3223,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
2715 3223
2716 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3224 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2717 hotplug_status); 3225 hotplug_status);
2718 if (hotplug_trigger) { 3226
2719 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) 3227 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
2720 i915_hpd_irq_setup(dev); 3228
2721 queue_work(dev_priv->wq,
2722 &dev_priv->hotplug_work);
2723 }
2724 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3229 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2725 POSTING_READ(PORT_HOTPLUG_STAT); 3230 POSTING_READ(PORT_HOTPLUG_STAT);
2726 } 3231 }
@@ -2860,7 +3365,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
2860 I915_WRITE(PORT_HOTPLUG_EN, 0); 3365 I915_WRITE(PORT_HOTPLUG_EN, 0);
2861 POSTING_READ(PORT_HOTPLUG_EN); 3366 POSTING_READ(PORT_HOTPLUG_EN);
2862 3367
2863 intel_opregion_enable_asle(dev); 3368 i915_enable_asle_pipestat(dev);
2864 3369
2865 return 0; 3370 return 0;
2866} 3371}
@@ -2872,6 +3377,8 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
2872 struct intel_encoder *intel_encoder; 3377 struct intel_encoder *intel_encoder;
2873 u32 hotplug_en; 3378 u32 hotplug_en;
2874 3379
3380 assert_spin_locked(&dev_priv->irq_lock);
3381
2875 if (I915_HAS_HOTPLUG(dev)) { 3382 if (I915_HAS_HOTPLUG(dev)) {
2876 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3383 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2877 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3384 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
@@ -2952,17 +3459,14 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
2952 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3459 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2953 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3460 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2954 HOTPLUG_INT_STATUS_G4X : 3461 HOTPLUG_INT_STATUS_G4X :
2955 HOTPLUG_INT_STATUS_I965); 3462 HOTPLUG_INT_STATUS_I915);
2956 3463
2957 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3464 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2958 hotplug_status); 3465 hotplug_status);
2959 if (hotplug_trigger) { 3466
2960 if (hotplug_irq_storm_detect(dev, hotplug_trigger, 3467 intel_hpd_irq_handler(dev, hotplug_trigger,
2961 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965)) 3468 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
2962 i915_hpd_irq_setup(dev); 3469
2963 queue_work(dev_priv->wq,
2964 &dev_priv->hotplug_work);
2965 }
2966 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3470 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2967 I915_READ(PORT_HOTPLUG_STAT); 3471 I915_READ(PORT_HOTPLUG_STAT);
2968 } 3472 }
@@ -3113,9 +3617,9 @@ void intel_irq_init(struct drm_device *dev)
3113 dev->driver->disable_vblank = valleyview_disable_vblank; 3617 dev->driver->disable_vblank = valleyview_disable_vblank;
3114 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3618 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3115 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 3619 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3116 /* Share pre & uninstall handlers with ILK/SNB */ 3620 /* Share uninstall handlers with ILK/SNB */
3117 dev->driver->irq_handler = ivybridge_irq_handler; 3621 dev->driver->irq_handler = ivybridge_irq_handler;
3118 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3622 dev->driver->irq_preinstall = ivybridge_irq_preinstall;
3119 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 3623 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3120 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3624 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3121 dev->driver->enable_vblank = ivybridge_enable_vblank; 3625 dev->driver->enable_vblank = ivybridge_enable_vblank;
@@ -3158,6 +3662,7 @@ void intel_hpd_init(struct drm_device *dev)
3158 struct drm_i915_private *dev_priv = dev->dev_private; 3662 struct drm_i915_private *dev_priv = dev->dev_private;
3159 struct drm_mode_config *mode_config = &dev->mode_config; 3663 struct drm_mode_config *mode_config = &dev->mode_config;
3160 struct drm_connector *connector; 3664 struct drm_connector *connector;
3665 unsigned long irqflags;
3161 int i; 3666 int i;
3162 3667
3163 for (i = 1; i < HPD_NUM_PINS; i++) { 3668 for (i = 1; i < HPD_NUM_PINS; i++) {
@@ -3170,6 +3675,11 @@ void intel_hpd_init(struct drm_device *dev)
3170 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3675 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3171 connector->polled = DRM_CONNECTOR_POLL_HPD; 3676 connector->polled = DRM_CONNECTOR_POLL_HPD;
3172 } 3677 }
3678
3679 /* Interrupt setup is already guaranteed to be single-threaded, this is
3680 * just to make the assert_spin_locked checks happy. */
3681 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3173 if (dev_priv->display.hpd_irq_setup) 3682 if (dev_priv->display.hpd_irq_setup)
3174 dev_priv->display.hpd_irq_setup(dev); 3683 dev_priv->display.hpd_irq_setup(dev);
3684 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3175} 3685}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2d6b62e42daf..f2326fc60ac9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -147,15 +147,9 @@
147#define VGA_MSR_MEM_EN (1<<1) 147#define VGA_MSR_MEM_EN (1<<1)
148#define VGA_MSR_CGA_MODE (1<<0) 148#define VGA_MSR_CGA_MODE (1<<0)
149 149
150/* 150#define VGA_SR_INDEX 0x3c4
151 * SR01 is the only VGA register touched on non-UMS setups.
152 * VLV doesn't do UMS, so the sequencer index/data registers
153 * are the only VGA registers which need to include
154 * display_mmio_offset.
155 */
156#define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4)
157#define SR01 1 151#define SR01 1
158#define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5) 152#define VGA_SR_DATA 0x3c5
159 153
160#define VGA_AR_INDEX 0x3c0 154#define VGA_AR_INDEX 0x3c0
161#define VGA_AR_VID_EN (1<<5) 155#define VGA_AR_VID_EN (1<<5)
@@ -265,13 +259,19 @@
265#define MI_SEMAPHORE_UPDATE (1<<21) 259#define MI_SEMAPHORE_UPDATE (1<<21)
266#define MI_SEMAPHORE_COMPARE (1<<20) 260#define MI_SEMAPHORE_COMPARE (1<<20)
267#define MI_SEMAPHORE_REGISTER (1<<18) 261#define MI_SEMAPHORE_REGISTER (1<<18)
268#define MI_SEMAPHORE_SYNC_RV (2<<16) 262#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
269#define MI_SEMAPHORE_SYNC_RB (0<<16) 263#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
270#define MI_SEMAPHORE_SYNC_VR (0<<16) 264#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
271#define MI_SEMAPHORE_SYNC_VB (2<<16) 265#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
272#define MI_SEMAPHORE_SYNC_BR (2<<16) 266#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
273#define MI_SEMAPHORE_SYNC_BV (0<<16) 267#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
274#define MI_SEMAPHORE_SYNC_INVALID (1<<0) 268#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
269#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
270#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
271#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
272#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
273#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
274#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
275/* 275/*
276 * 3D instructions used by the kernel 276 * 3D instructions used by the kernel
277 */ 277 */
@@ -342,33 +342,74 @@
342#define DEBUG_RESET_DISPLAY (1<<9) 342#define DEBUG_RESET_DISPLAY (1<<9)
343 343
344/* 344/*
345 * DPIO - a special bus for various display related registers to hide behind: 345 * IOSF sideband
346 * 0x800c: m1, m2, n, p1, p2, k dividers 346 */
347 * 0x8014: REF and SFR select 347#define VLV_IOSF_DOORBELL_REQ (VLV_DISPLAY_BASE + 0x2100)
348 * 0x8014: N divider, VCO select 348#define IOSF_DEVFN_SHIFT 24
349 * 0x801c/3c: core clock bits 349#define IOSF_OPCODE_SHIFT 16
350 * 0x8048/68: low pass filter coefficients 350#define IOSF_PORT_SHIFT 8
351 * 0x8100: fast clock controls 351#define IOSF_BYTE_ENABLES_SHIFT 4
352#define IOSF_BAR_SHIFT 1
353#define IOSF_SB_BUSY (1<<0)
354#define IOSF_PORT_PUNIT 0x4
355#define IOSF_PORT_NC 0x11
356#define IOSF_PORT_DPIO 0x12
357#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
358#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
359
360#define PUNIT_OPCODE_REG_READ 6
361#define PUNIT_OPCODE_REG_WRITE 7
362
363#define PUNIT_REG_GPU_LFM 0xd3
364#define PUNIT_REG_GPU_FREQ_REQ 0xd4
365#define PUNIT_REG_GPU_FREQ_STS 0xd8
366#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
367
368#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
369#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
370
371#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c
372#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3
373#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8
374#define FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT 11
375#define FB_GFX_FGUARANTEED_FREQ_FUSE_MASK 0x0007f800
376#define IOSF_NC_FB_GFX_FMAX_FUSE_HI 0x34
377#define FB_FMAX_VMIN_FREQ_HI_MASK 0x00000007
378#define IOSF_NC_FB_GFX_FMAX_FUSE_LO 0x30
379#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
380#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
381
382/*
383 * DPIO - a special bus for various display related registers to hide behind
352 * 384 *
353 * DPIO is VLV only. 385 * DPIO is VLV only.
386 *
387 * Note: digital port B is DDI0, digital pot C is DDI1
354 */ 388 */
355#define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100) 389#define DPIO_DEVFN 0
356#define DPIO_RID (0<<24) 390#define DPIO_OPCODE_REG_WRITE 1
357#define DPIO_OP_WRITE (1<<16) 391#define DPIO_OPCODE_REG_READ 0
358#define DPIO_OP_READ (0<<16) 392
359#define DPIO_PORTID (0x12<<8)
360#define DPIO_BYTE (0xf<<4)
361#define DPIO_BUSY (1<<0) /* status only */
362#define DPIO_DATA (VLV_DISPLAY_BASE + 0x2104)
363#define DPIO_REG (VLV_DISPLAY_BASE + 0x2108)
364#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110) 393#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
365#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ 394#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
366#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ 395#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
367#define DPIO_SFR_BYPASS (1<<1) 396#define DPIO_SFR_BYPASS (1<<1)
368#define DPIO_RESET (1<<0) 397#define DPIO_RESET (1<<0)
369 398
399#define _DPIO_TX3_SWING_CTL4_A 0x690
400#define _DPIO_TX3_SWING_CTL4_B 0x2a90
401#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \
402 _DPIO_TX3_SWING_CTL4_B)
403
404/*
405 * Per pipe/PLL DPIO regs
406 */
370#define _DPIO_DIV_A 0x800c 407#define _DPIO_DIV_A 0x800c
371#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */ 408#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
409#define DPIO_POST_DIV_DAC 0
410#define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */
411#define DPIO_POST_DIV_LVDS1 2
412#define DPIO_POST_DIV_LVDS2 3
372#define DPIO_K_SHIFT (24) /* 4 bits */ 413#define DPIO_K_SHIFT (24) /* 4 bits */
373#define DPIO_P1_SHIFT (21) /* 3 bits */ 414#define DPIO_P1_SHIFT (21) /* 3 bits */
374#define DPIO_P2_SHIFT (16) /* 5 bits */ 415#define DPIO_P2_SHIFT (16) /* 5 bits */
@@ -394,14 +435,111 @@
394#define _DPIO_CORE_CLK_B 0x803c 435#define _DPIO_CORE_CLK_B 0x803c
395#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B) 436#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
396 437
397#define _DPIO_LFP_COEFF_A 0x8048 438#define _DPIO_IREF_CTL_A 0x8040
398#define _DPIO_LFP_COEFF_B 0x8068 439#define _DPIO_IREF_CTL_B 0x8060
399#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B) 440#define DPIO_IREF_CTL(pipe) _PIPE(pipe, _DPIO_IREF_CTL_A, _DPIO_IREF_CTL_B)
441
442#define DPIO_IREF_BCAST 0xc044
443#define _DPIO_IREF_A 0x8044
444#define _DPIO_IREF_B 0x8064
445#define DPIO_IREF(pipe) _PIPE(pipe, _DPIO_IREF_A, _DPIO_IREF_B)
446
447#define _DPIO_PLL_CML_A 0x804c
448#define _DPIO_PLL_CML_B 0x806c
449#define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B)
450
451#define _DPIO_LPF_COEFF_A 0x8048
452#define _DPIO_LPF_COEFF_B 0x8068
453#define DPIO_LPF_COEFF(pipe) _PIPE(pipe, _DPIO_LPF_COEFF_A, _DPIO_LPF_COEFF_B)
454
455#define DPIO_CALIBRATION 0x80ac
400 456
401#define DPIO_FASTCLK_DISABLE 0x8100 457#define DPIO_FASTCLK_DISABLE 0x8100
402 458
403#define DPIO_DATA_CHANNEL1 0x8220 459/*
404#define DPIO_DATA_CHANNEL2 0x8420 460 * Per DDI channel DPIO regs
461 */
462
463#define _DPIO_PCS_TX_0 0x8200
464#define _DPIO_PCS_TX_1 0x8400
465#define DPIO_PCS_TX_LANE2_RESET (1<<16)
466#define DPIO_PCS_TX_LANE1_RESET (1<<7)
467#define DPIO_PCS_TX(port) _PORT(port, _DPIO_PCS_TX_0, _DPIO_PCS_TX_1)
468
469#define _DPIO_PCS_CLK_0 0x8204
470#define _DPIO_PCS_CLK_1 0x8404
471#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
472#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
473#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
474#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
475#define DPIO_PCS_CLK(port) _PORT(port, _DPIO_PCS_CLK_0, _DPIO_PCS_CLK_1)
476
477#define _DPIO_PCS_CTL_OVR1_A 0x8224
478#define _DPIO_PCS_CTL_OVR1_B 0x8424
479#define DPIO_PCS_CTL_OVER1(port) _PORT(port, _DPIO_PCS_CTL_OVR1_A, \
480 _DPIO_PCS_CTL_OVR1_B)
481
482#define _DPIO_PCS_STAGGER0_A 0x822c
483#define _DPIO_PCS_STAGGER0_B 0x842c
484#define DPIO_PCS_STAGGER0(port) _PORT(port, _DPIO_PCS_STAGGER0_A, \
485 _DPIO_PCS_STAGGER0_B)
486
487#define _DPIO_PCS_STAGGER1_A 0x8230
488#define _DPIO_PCS_STAGGER1_B 0x8430
489#define DPIO_PCS_STAGGER1(port) _PORT(port, _DPIO_PCS_STAGGER1_A, \
490 _DPIO_PCS_STAGGER1_B)
491
492#define _DPIO_PCS_CLOCKBUF0_A 0x8238
493#define _DPIO_PCS_CLOCKBUF0_B 0x8438
494#define DPIO_PCS_CLOCKBUF0(port) _PORT(port, _DPIO_PCS_CLOCKBUF0_A, \
495 _DPIO_PCS_CLOCKBUF0_B)
496
497#define _DPIO_PCS_CLOCKBUF8_A 0x825c
498#define _DPIO_PCS_CLOCKBUF8_B 0x845c
499#define DPIO_PCS_CLOCKBUF8(port) _PORT(port, _DPIO_PCS_CLOCKBUF8_A, \
500 _DPIO_PCS_CLOCKBUF8_B)
501
502#define _DPIO_TX_SWING_CTL2_A 0x8288
503#define _DPIO_TX_SWING_CTL2_B 0x8488
504#define DPIO_TX_SWING_CTL2(port) _PORT(port, _DPIO_TX_SWING_CTL2_A, \
505 _DPIO_TX_SWING_CTL2_B)
506
507#define _DPIO_TX_SWING_CTL3_A 0x828c
508#define _DPIO_TX_SWING_CTL3_B 0x848c
509#define DPIO_TX_SWING_CTL3(port) _PORT(port, _DPIO_TX_SWING_CTL3_A, \
510 _DPIO_TX_SWING_CTL3_B)
511
512#define _DPIO_TX_SWING_CTL4_A 0x8290
513#define _DPIO_TX_SWING_CTL4_B 0x8490
514#define DPIO_TX_SWING_CTL4(port) _PORT(port, _DPIO_TX_SWING_CTL4_A, \
515 _DPIO_TX_SWING_CTL4_B)
516
517#define _DPIO_TX_OCALINIT_0 0x8294
518#define _DPIO_TX_OCALINIT_1 0x8494
519#define DPIO_TX_OCALINIT_EN (1<<31)
520#define DPIO_TX_OCALINIT(port) _PORT(port, _DPIO_TX_OCALINIT_0, \
521 _DPIO_TX_OCALINIT_1)
522
523#define _DPIO_TX_CTL_0 0x82ac
524#define _DPIO_TX_CTL_1 0x84ac
525#define DPIO_TX_CTL(port) _PORT(port, _DPIO_TX_CTL_0, _DPIO_TX_CTL_1)
526
527#define _DPIO_TX_LANE_0 0x82b8
528#define _DPIO_TX_LANE_1 0x84b8
529#define DPIO_TX_LANE(port) _PORT(port, _DPIO_TX_LANE_0, _DPIO_TX_LANE_1)
530
531#define _DPIO_DATA_CHANNEL1 0x8220
532#define _DPIO_DATA_CHANNEL2 0x8420
533#define DPIO_DATA_CHANNEL(port) _PORT(port, _DPIO_DATA_CHANNEL1, _DPIO_DATA_CHANNEL2)
534
535#define _DPIO_PORT0_PCS0 0x0220
536#define _DPIO_PORT0_PCS1 0x0420
537#define _DPIO_PORT1_PCS2 0x2620
538#define _DPIO_PORT1_PCS3 0x2820
539#define DPIO_DATA_LANE_A(port) _PORT(port, _DPIO_PORT0_PCS0, _DPIO_PORT1_PCS2)
540#define DPIO_DATA_LANE_B(port) _PORT(port, _DPIO_PORT0_PCS1, _DPIO_PORT1_PCS3)
541#define DPIO_DATA_CHANNEL1 0x8220
542#define DPIO_DATA_CHANNEL2 0x8420
405 543
406/* 544/*
407 * Fence registers 545 * Fence registers
@@ -443,6 +581,7 @@
443#define RENDER_RING_BASE 0x02000 581#define RENDER_RING_BASE 0x02000
444#define BSD_RING_BASE 0x04000 582#define BSD_RING_BASE 0x04000
445#define GEN6_BSD_RING_BASE 0x12000 583#define GEN6_BSD_RING_BASE 0x12000
584#define VEBOX_RING_BASE 0x1a000
446#define BLT_RING_BASE 0x22000 585#define BLT_RING_BASE 0x22000
447#define RING_TAIL(base) ((base)+0x30) 586#define RING_TAIL(base) ((base)+0x30)
448#define RING_HEAD(base) ((base)+0x34) 587#define RING_HEAD(base) ((base)+0x34)
@@ -450,12 +589,20 @@
450#define RING_CTL(base) ((base)+0x3c) 589#define RING_CTL(base) ((base)+0x3c)
451#define RING_SYNC_0(base) ((base)+0x40) 590#define RING_SYNC_0(base) ((base)+0x40)
452#define RING_SYNC_1(base) ((base)+0x44) 591#define RING_SYNC_1(base) ((base)+0x44)
453#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) 592#define RING_SYNC_2(base) ((base)+0x48)
454#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) 593#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
455#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE)) 594#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
456#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE)) 595#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE))
457#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE)) 596#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
458#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE)) 597#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
598#define GEN6_VVESYNC (RING_SYNC_2(GEN6_BSD_RING_BASE))
599#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
600#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
601#define GEN6_BVESYNC (RING_SYNC_2(BLT_RING_BASE))
602#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE))
603#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
604#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
605#define GEN6_NOSYNC 0
459#define RING_MAX_IDLE(base) ((base)+0x54) 606#define RING_MAX_IDLE(base) ((base)+0x54)
460#define RING_HWS_PGA(base) ((base)+0x80) 607#define RING_HWS_PGA(base) ((base)+0x80)
461#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 608#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
@@ -467,6 +614,7 @@
467#define DONE_REG 0x40b0 614#define DONE_REG 0x40b0
468#define BSD_HWS_PGA_GEN7 (0x04180) 615#define BSD_HWS_PGA_GEN7 (0x04180)
469#define BLT_HWS_PGA_GEN7 (0x04280) 616#define BLT_HWS_PGA_GEN7 (0x04280)
617#define VEBOX_HWS_PGA_GEN7 (0x04380)
470#define RING_ACTHD(base) ((base)+0x74) 618#define RING_ACTHD(base) ((base)+0x74)
471#define RING_NOPID(base) ((base)+0x94) 619#define RING_NOPID(base) ((base)+0x94)
472#define RING_IMR(base) ((base)+0xa8) 620#define RING_IMR(base) ((base)+0xa8)
@@ -527,7 +675,11 @@
527 675
528#define ERROR_GEN6 0x040a0 676#define ERROR_GEN6 0x040a0
529#define GEN7_ERR_INT 0x44040 677#define GEN7_ERR_INT 0x44040
530#define ERR_INT_MMIO_UNCLAIMED (1<<13) 678#define ERR_INT_POISON (1<<31)
679#define ERR_INT_MMIO_UNCLAIMED (1<<13)
680#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
681#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
682#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
531 683
532#define FPGA_DBG 0x42300 684#define FPGA_DBG 0x42300
533#define FPGA_DBG_RM_NOCLAIM (1<<31) 685#define FPGA_DBG_RM_NOCLAIM (1<<31)
@@ -583,24 +735,7 @@
583#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4) 735#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
584#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) 736#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
585#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) 737#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
586#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 738#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120)
587#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
588#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
589#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
590#define I915_HWB_OOM_INTERRUPT (1<<13)
591#define I915_SYNC_STATUS_INTERRUPT (1<<12)
592#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
593#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
594#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
595#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
596#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
597#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
598#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
599#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
600#define I915_DEBUG_INTERRUPT (1<<2)
601#define I915_USER_INTERRUPT (1<<1)
602#define I915_ASLE_INTERRUPT (1<<0)
603#define I915_BSD_USER_INTERRUPT (1<<25)
604#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ 739#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
605#define EIR 0x020b0 740#define EIR 0x020b0
606#define EMR 0x020b4 741#define EMR 0x020b4
@@ -712,28 +847,6 @@
712#define CACHE_MODE_1 0x7004 /* IVB+ */ 847#define CACHE_MODE_1 0x7004 /* IVB+ */
713#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) 848#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
714 849
715/* GEN6 interrupt control
716 * Note that the per-ring interrupt bits do alias with the global interrupt bits
717 * in GTIMR. */
718#define GEN6_RENDER_HWSTAM 0x2098
719#define GEN6_RENDER_IMR 0x20a8
720#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
721#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7)
722#define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6)
723#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5)
724#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4)
725#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3)
726#define GEN6_RENDER_SYNC_STATUS (1 << 2)
727#define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1)
728#define GEN6_RENDER_USER_INTERRUPT (1 << 0)
729
730#define GEN6_BLITTER_HWSTAM 0x22098
731#define GEN6_BLITTER_IMR 0x220a8
732#define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26)
733#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
734#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
735#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
736
737#define GEN6_BLITTER_ECOSKPD 0x221d0 850#define GEN6_BLITTER_ECOSKPD 0x221d0
738#define GEN6_BLITTER_LOCK_SHIFT 16 851#define GEN6_BLITTER_LOCK_SHIFT 16
739#define GEN6_BLITTER_FBC_NOTIFY (1<<3) 852#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
@@ -744,9 +857,52 @@
744#define GEN6_BSD_SLEEP_INDICATOR (1 << 3) 857#define GEN6_BSD_SLEEP_INDICATOR (1 << 3)
745#define GEN6_BSD_GO_INDICATOR (1 << 4) 858#define GEN6_BSD_GO_INDICATOR (1 << 4)
746 859
747#define GEN6_BSD_HWSTAM 0x12098 860/* On modern GEN architectures interrupt control consists of two sets
748#define GEN6_BSD_IMR 0x120a8 861 * of registers. The first set pertains to the ring generating the
749#define GEN6_BSD_USER_INTERRUPT (1 << 12) 862 * interrupt. The second control is for the functional block generating the
863 * interrupt. These are PM, GT, DE, etc.
864 *
865 * Luckily *knocks on wood* all the ring interrupt bits match up with the
866 * GT interrupt bits, so we don't need to duplicate the defines.
867 *
868 * These defines should cover us well from SNB->HSW with minor exceptions
869 * it can also work on ILK.
870 */
871#define GT_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
872#define GT_BLT_CS_ERROR_INTERRUPT (1 << 25)
873#define GT_BLT_USER_INTERRUPT (1 << 22)
874#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
875#define GT_BSD_USER_INTERRUPT (1 << 12)
876#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
877#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
878#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
879#define GT_RENDER_SYNC_STATUS_INTERRUPT (1 << 2)
880#define GT_RENDER_DEBUG_INTERRUPT (1 << 1)
881#define GT_RENDER_USER_INTERRUPT (1 << 0)
882
883#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
884#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
885
886/* These are all the "old" interrupts */
887#define ILK_BSD_USER_INTERRUPT (1<<5)
888#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
889#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
890#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
891#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
892#define I915_HWB_OOM_INTERRUPT (1<<13)
893#define I915_SYNC_STATUS_INTERRUPT (1<<12)
894#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
895#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
896#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
897#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
898#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
899#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
900#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
901#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
902#define I915_DEBUG_INTERRUPT (1<<2)
903#define I915_USER_INTERRUPT (1<<1)
904#define I915_ASLE_INTERRUPT (1<<0)
905#define I915_BSD_USER_INTERRUPT (1 << 25)
750 906
751#define GEN6_BSD_RNCID 0x12198 907#define GEN6_BSD_RNCID 0x12198
752 908
@@ -807,7 +963,9 @@
807#define DPFC_CTL_EN (1<<31) 963#define DPFC_CTL_EN (1<<31)
808#define DPFC_CTL_PLANEA (0<<30) 964#define DPFC_CTL_PLANEA (0<<30)
809#define DPFC_CTL_PLANEB (1<<30) 965#define DPFC_CTL_PLANEB (1<<30)
966#define IVB_DPFC_CTL_PLANE_SHIFT (29)
810#define DPFC_CTL_FENCE_EN (1<<29) 967#define DPFC_CTL_FENCE_EN (1<<29)
968#define IVB_DPFC_CTL_FENCE_EN (1<<28)
811#define DPFC_CTL_PERSISTENT_MODE (1<<25) 969#define DPFC_CTL_PERSISTENT_MODE (1<<25)
812#define DPFC_SR_EN (1<<10) 970#define DPFC_SR_EN (1<<10)
813#define DPFC_CTL_LIMIT_1X (0<<6) 971#define DPFC_CTL_LIMIT_1X (0<<6)
@@ -840,6 +998,7 @@
840#define ILK_DPFC_CHICKEN 0x43224 998#define ILK_DPFC_CHICKEN 0x43224
841#define ILK_FBC_RT_BASE 0x2128 999#define ILK_FBC_RT_BASE 0x2128
842#define ILK_FBC_RT_VALID (1<<0) 1000#define ILK_FBC_RT_VALID (1<<0)
1001#define SNB_FBC_FRONT_BUFFER (1<<1)
843 1002
844#define ILK_DISPLAY_CHICKEN1 0x42000 1003#define ILK_DISPLAY_CHICKEN1 0x42000
845#define ILK_FBCQ_DIS (1<<22) 1004#define ILK_FBCQ_DIS (1<<22)
@@ -855,6 +1014,25 @@
855#define SNB_CPU_FENCE_ENABLE (1<<29) 1014#define SNB_CPU_FENCE_ENABLE (1<<29)
856#define DPFC_CPU_FENCE_OFFSET 0x100104 1015#define DPFC_CPU_FENCE_OFFSET 0x100104
857 1016
1017/* Framebuffer compression for Ivybridge */
1018#define IVB_FBC_RT_BASE 0x7020
1019
1020#define IPS_CTL 0x43408
1021#define IPS_ENABLE (1 << 31)
1022
1023#define MSG_FBC_REND_STATE 0x50380
1024#define FBC_REND_NUKE (1<<2)
1025#define FBC_REND_CACHE_CLEAN (1<<1)
1026
1027#define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0
1028#define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4
1029#define HSW_BYPASS_FBC_QUEUE (1<<22)
1030#define HSW_PIPE_SLICE_CHICKEN_1(pipe) _PIPE(pipe, + \
1031 _HSW_PIPE_SLICE_CHICKEN_1_A, + \
1032 _HSW_PIPE_SLICE_CHICKEN_1_B)
1033
1034#define HSW_CLKGATE_DISABLE_PART_1 0x46500
1035#define HSW_DPFC_GATING_DISABLE (1<<23)
858 1036
859/* 1037/*
860 * GPIO regs 1038 * GPIO regs
@@ -963,7 +1141,10 @@
963#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 1141#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
964#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ 1142#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
965#define DPLL_LOCK_VLV (1<<15) 1143#define DPLL_LOCK_VLV (1<<15)
1144#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
966#define DPLL_INTEGRATED_CLOCK_VLV (1<<13) 1145#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
1146#define DPLL_PORTC_READY_MASK (0xf << 4)
1147#define DPLL_PORTB_READY_MASK (0xf)
967 1148
968#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 1149#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
969/* 1150/*
@@ -1073,7 +1254,7 @@
1073#define DSTATE_PLL_D3_OFF (1<<3) 1254#define DSTATE_PLL_D3_OFF (1<<3)
1074#define DSTATE_GFX_CLOCK_GATING (1<<1) 1255#define DSTATE_GFX_CLOCK_GATING (1<<1)
1075#define DSTATE_DOT_CLOCK_GATING (1<<0) 1256#define DSTATE_DOT_CLOCK_GATING (1<<0)
1076#define DSPCLK_GATE_D 0x6200 1257#define DSPCLK_GATE_D (dev_priv->info->display_mmio_offset + 0x6200)
1077# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ 1258# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
1078# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ 1259# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
1079# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ 1260# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
@@ -1186,6 +1367,8 @@
1186#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500) 1367#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500)
1187#define FW_CSPWRDWNEN (1<<15) 1368#define FW_CSPWRDWNEN (1<<15)
1188 1369
1370#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504)
1371
1189/* 1372/*
1190 * Palette regs 1373 * Palette regs
1191 */ 1374 */
@@ -1535,14 +1718,13 @@
1535 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ 1718 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
1536 GEN7_CXT_GT1_SIZE(ctx_reg) + \ 1719 GEN7_CXT_GT1_SIZE(ctx_reg) + \
1537 GEN7_CXT_VFSTATE_SIZE(ctx_reg)) 1720 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
1538#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f) 1721/* Haswell does have the CXT_SIZE register however it does not appear to be
1539#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7) 1722 * valid. Now, docs explain in dwords what is in the context object. The full
1540#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff) 1723 * size is 70720 bytes, however, the power context and execlist context will
1541#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \ 1724 * never be saved (power context is stored elsewhere, and execlists don't work
1542 HSW_CXT_RING_SIZE(ctx_reg) + \ 1725 * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
1543 HSW_CXT_RENDER_SIZE(ctx_reg) + \ 1726 */
1544 GEN7_CXT_VFSTATE_SIZE(ctx_reg)) 1727#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
1545
1546 1728
1547/* 1729/*
1548 * Overlay regs 1730 * Overlay regs
@@ -1691,6 +1873,12 @@
1691/* SDVO is different across gen3/4 */ 1873/* SDVO is different across gen3/4 */
1692#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3) 1874#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
1693#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2) 1875#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
1876/*
1877 * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
1878 * since reality corrobates that they're the same as on gen3. But keep these
1879 * bits here (and the comment!) to help any other lost wanderers back onto the
1880 * right tracks.
1881 */
1694#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4) 1882#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
1695#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2) 1883#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
1696#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7) 1884#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
@@ -1702,13 +1890,6 @@
1702 PORTC_HOTPLUG_INT_STATUS | \ 1890 PORTC_HOTPLUG_INT_STATUS | \
1703 PORTD_HOTPLUG_INT_STATUS) 1891 PORTD_HOTPLUG_INT_STATUS)
1704 1892
1705#define HOTPLUG_INT_STATUS_I965 (CRT_HOTPLUG_INT_STATUS | \
1706 SDVOB_HOTPLUG_INT_STATUS_I965 | \
1707 SDVOC_HOTPLUG_INT_STATUS_I965 | \
1708 PORTB_HOTPLUG_INT_STATUS | \
1709 PORTC_HOTPLUG_INT_STATUS | \
1710 PORTD_HOTPLUG_INT_STATUS)
1711
1712#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \ 1893#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
1713 SDVOB_HOTPLUG_INT_STATUS_I915 | \ 1894 SDVOB_HOTPLUG_INT_STATUS_I915 | \
1714 SDVOC_HOTPLUG_INT_STATUS_I915 | \ 1895 SDVOC_HOTPLUG_INT_STATUS_I915 | \
@@ -1967,6 +2148,10 @@
1967#define BLM_PIPE_A (0 << 29) 2148#define BLM_PIPE_A (0 << 29)
1968#define BLM_PIPE_B (1 << 29) 2149#define BLM_PIPE_B (1 << 29)
1969#define BLM_PIPE_C (2 << 29) /* ivb + */ 2150#define BLM_PIPE_C (2 << 29) /* ivb + */
2151#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */
2152#define BLM_TRANSCODER_B BLM_PIPE_B
2153#define BLM_TRANSCODER_C BLM_PIPE_C
2154#define BLM_TRANSCODER_EDP (3 << 29)
1970#define BLM_PIPE(pipe) ((pipe) << 29) 2155#define BLM_PIPE(pipe) ((pipe) << 29)
1971#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */ 2156#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
1972#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26) 2157#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
@@ -2540,9 +2725,7 @@
2540#define DP_PRE_EMPHASIS_SHIFT 22 2725#define DP_PRE_EMPHASIS_SHIFT 22
2541 2726
2542/* How many wires to use. I guess 3 was too hard */ 2727/* How many wires to use. I guess 3 was too hard */
2543#define DP_PORT_WIDTH_1 (0 << 19) 2728#define DP_PORT_WIDTH(width) (((width) - 1) << 19)
2544#define DP_PORT_WIDTH_2 (1 << 19)
2545#define DP_PORT_WIDTH_4 (3 << 19)
2546#define DP_PORT_WIDTH_MASK (7 << 19) 2729#define DP_PORT_WIDTH_MASK (7 << 19)
2547 2730
2548/* Mystic DPCD version 1.1 special mode */ 2731/* Mystic DPCD version 1.1 special mode */
@@ -2646,18 +2829,20 @@
2646 * which is after the LUTs, so we want the bytes for our color format. 2829 * which is after the LUTs, so we want the bytes for our color format.
2647 * For our current usage, this is always 3, one byte for R, G and B. 2830 * For our current usage, this is always 3, one byte for R, G and B.
2648 */ 2831 */
2649#define _PIPEA_GMCH_DATA_M 0x70050 2832#define _PIPEA_DATA_M_G4X 0x70050
2650#define _PIPEB_GMCH_DATA_M 0x71050 2833#define _PIPEB_DATA_M_G4X 0x71050
2651 2834
2652/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ 2835/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
2653#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ 2836#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
2837#define TU_SIZE_SHIFT 25
2654#define TU_SIZE_MASK (0x3f << 25) 2838#define TU_SIZE_MASK (0x3f << 25)
2655 2839
2656#define DATA_LINK_M_N_MASK (0xffffff) 2840#define DATA_LINK_M_N_MASK (0xffffff)
2657#define DATA_LINK_N_MAX (0x800000) 2841#define DATA_LINK_N_MAX (0x800000)
2658 2842
2659#define _PIPEA_GMCH_DATA_N 0x70054 2843#define _PIPEA_DATA_N_G4X 0x70054
2660#define _PIPEB_GMCH_DATA_N 0x71054 2844#define _PIPEB_DATA_N_G4X 0x71054
2845#define PIPE_GMCH_DATA_N_MASK (0xffffff)
2661 2846
2662/* 2847/*
2663 * Computing Link M and N values for the Display Port link 2848 * Computing Link M and N values for the Display Port link
@@ -2670,16 +2855,18 @@
2670 * Attributes and VB-ID. 2855 * Attributes and VB-ID.
2671 */ 2856 */
2672 2857
2673#define _PIPEA_DP_LINK_M 0x70060 2858#define _PIPEA_LINK_M_G4X 0x70060
2674#define _PIPEB_DP_LINK_M 0x71060 2859#define _PIPEB_LINK_M_G4X 0x71060
2860#define PIPEA_DP_LINK_M_MASK (0xffffff)
2675 2861
2676#define _PIPEA_DP_LINK_N 0x70064 2862#define _PIPEA_LINK_N_G4X 0x70064
2677#define _PIPEB_DP_LINK_N 0x71064 2863#define _PIPEB_LINK_N_G4X 0x71064
2864#define PIPEA_DP_LINK_N_MASK (0xffffff)
2678 2865
2679#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M) 2866#define PIPE_DATA_M_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
2680#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N) 2867#define PIPE_DATA_N_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
2681#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M) 2868#define PIPE_LINK_M_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
2682#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N) 2869#define PIPE_LINK_N_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
2683 2870
2684/* Display & cursor control */ 2871/* Display & cursor control */
2685 2872
@@ -2715,6 +2902,7 @@
2715#define PIPECONF_INTERLACED_ILK (3 << 21) 2902#define PIPECONF_INTERLACED_ILK (3 << 21)
2716#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ 2903#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
2717#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ 2904#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
2905#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
2718#define PIPECONF_CXSR_DOWNCLOCK (1<<16) 2906#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
2719#define PIPECONF_COLOR_RANGE_SELECT (1 << 13) 2907#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
2720#define PIPECONF_BPC_MASK (0x7 << 5) 2908#define PIPECONF_BPC_MASK (0x7 << 5)
@@ -2915,6 +3103,10 @@
2915#define WM3S_LP_IVB 0x45128 3103#define WM3S_LP_IVB 0x45128
2916#define WM1S_LP_EN (1<<31) 3104#define WM1S_LP_EN (1<<31)
2917 3105
3106#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
3107 (WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
3108 ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur))
3109
2918/* Memory latency timer register */ 3110/* Memory latency timer register */
2919#define MLTR_ILK 0x11222 3111#define MLTR_ILK 0x11222
2920#define MLTR_WM1_SHIFT 0 3112#define MLTR_WM1_SHIFT 0
@@ -3294,7 +3486,7 @@
3294#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) 3486#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
3295#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) 3487#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
3296 3488
3297#define _SPACNTR 0x72180 3489#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
3298#define SP_ENABLE (1<<31) 3490#define SP_ENABLE (1<<31)
3299#define SP_GEAMMA_ENABLE (1<<30) 3491#define SP_GEAMMA_ENABLE (1<<30)
3300#define SP_PIXFORMAT_MASK (0xf<<26) 3492#define SP_PIXFORMAT_MASK (0xf<<26)
@@ -3313,30 +3505,30 @@
3313#define SP_YUV_ORDER_YVYU (2<<16) 3505#define SP_YUV_ORDER_YVYU (2<<16)
3314#define SP_YUV_ORDER_VYUY (3<<16) 3506#define SP_YUV_ORDER_VYUY (3<<16)
3315#define SP_TILED (1<<10) 3507#define SP_TILED (1<<10)
3316#define _SPALINOFF 0x72184 3508#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
3317#define _SPASTRIDE 0x72188 3509#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
3318#define _SPAPOS 0x7218c 3510#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
3319#define _SPASIZE 0x72190 3511#define _SPASIZE (VLV_DISPLAY_BASE + 0x72190)
3320#define _SPAKEYMINVAL 0x72194 3512#define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194)
3321#define _SPAKEYMSK 0x72198 3513#define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198)
3322#define _SPASURF 0x7219c 3514#define _SPASURF (VLV_DISPLAY_BASE + 0x7219c)
3323#define _SPAKEYMAXVAL 0x721a0 3515#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
3324#define _SPATILEOFF 0x721a4 3516#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
3325#define _SPACONSTALPHA 0x721a8 3517#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
3326#define _SPAGAMC 0x721f4 3518#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
3327 3519
3328#define _SPBCNTR 0x72280 3520#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
3329#define _SPBLINOFF 0x72284 3521#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284)
3330#define _SPBSTRIDE 0x72288 3522#define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288)
3331#define _SPBPOS 0x7228c 3523#define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c)
3332#define _SPBSIZE 0x72290 3524#define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290)
3333#define _SPBKEYMINVAL 0x72294 3525#define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294)
3334#define _SPBKEYMSK 0x72298 3526#define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298)
3335#define _SPBSURF 0x7229c 3527#define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c)
3336#define _SPBKEYMAXVAL 0x722a0 3528#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
3337#define _SPBTILEOFF 0x722a4 3529#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
3338#define _SPBCONSTALPHA 0x722a8 3530#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
3339#define _SPBGAMC 0x722f4 3531#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
3340 3532
3341#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR) 3533#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR)
3342#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF) 3534#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF)
@@ -3474,6 +3666,15 @@
3474#define _LGC_PALETTE_B 0x4a800 3666#define _LGC_PALETTE_B 0x4a800
3475#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) 3667#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
3476 3668
3669#define _GAMMA_MODE_A 0x4a480
3670#define _GAMMA_MODE_B 0x4ac80
3671#define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
3672#define GAMMA_MODE_MODE_MASK (3 << 0)
3673#define GAMMA_MODE_MODE_8BIT (0 << 0)
3674#define GAMMA_MODE_MODE_10BIT (1 << 0)
3675#define GAMMA_MODE_MODE_12BIT (2 << 0)
3676#define GAMMA_MODE_MODE_SPLIT (3 << 0)
3677
3477/* interrupts */ 3678/* interrupts */
3478#define DE_MASTER_IRQ_CONTROL (1 << 31) 3679#define DE_MASTER_IRQ_CONTROL (1 << 31)
3479#define DE_SPRITEB_FLIP_DONE (1 << 29) 3680#define DE_SPRITEB_FLIP_DONE (1 << 29)
@@ -3502,7 +3703,7 @@
3502#define DE_PIPEA_FIFO_UNDERRUN (1 << 0) 3703#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
3503 3704
3504/* More Ivybridge lolz */ 3705/* More Ivybridge lolz */
3505#define DE_ERR_DEBUG_IVB (1<<30) 3706#define DE_ERR_INT_IVB (1<<30)
3506#define DE_GSE_IVB (1<<29) 3707#define DE_GSE_IVB (1<<29)
3507#define DE_PCH_EVENT_IVB (1<<28) 3708#define DE_PCH_EVENT_IVB (1<<28)
3508#define DE_DP_A_HOTPLUG_IVB (1<<27) 3709#define DE_DP_A_HOTPLUG_IVB (1<<27)
@@ -3525,21 +3726,6 @@
3525#define DEIIR 0x44008 3726#define DEIIR 0x44008
3526#define DEIER 0x4400c 3727#define DEIER 0x4400c
3527 3728
3528/* GT interrupt.
3529 * Note that for gen6+ the ring-specific interrupt bits do alias with the
3530 * corresponding bits in the per-ring interrupt control registers. */
3531#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
3532#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25)
3533#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22)
3534#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15)
3535#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
3536#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */
3537#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5)
3538#define GT_PIPE_NOTIFY (1 << 4)
3539#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3)
3540#define GT_SYNC_STATUS (1 << 2)
3541#define GT_USER_INTERRUPT (1 << 0)
3542
3543#define GTISR 0x44010 3729#define GTISR 0x44010
3544#define GTIMR 0x44014 3730#define GTIMR 0x44014
3545#define GTIIR 0x44018 3731#define GTIIR 0x44018
@@ -3569,6 +3755,9 @@
3569# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) 3755# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
3570# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) 3756# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
3571 3757
3758#define CHICKEN_PAR1_1 0x42080
3759#define FORCE_ARB_IDLE_PLANES (1 << 14)
3760
3572#define DISP_ARB_CTL 0x45000 3761#define DISP_ARB_CTL 0x45000
3573#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 3762#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
3574#define DISP_FBC_WM_DIS (1<<15) 3763#define DISP_FBC_WM_DIS (1<<15)
@@ -3661,6 +3850,7 @@
3661 SDE_PORTC_HOTPLUG_CPT | \ 3850 SDE_PORTC_HOTPLUG_CPT | \
3662 SDE_PORTB_HOTPLUG_CPT) 3851 SDE_PORTB_HOTPLUG_CPT)
3663#define SDE_GMBUS_CPT (1 << 17) 3852#define SDE_GMBUS_CPT (1 << 17)
3853#define SDE_ERROR_CPT (1 << 16)
3664#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10) 3854#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
3665#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9) 3855#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
3666#define SDE_FDI_RXC_CPT (1 << 8) 3856#define SDE_FDI_RXC_CPT (1 << 8)
@@ -3685,6 +3875,12 @@
3685#define SDEIIR 0xc4008 3875#define SDEIIR 0xc4008
3686#define SDEIER 0xc400c 3876#define SDEIER 0xc400c
3687 3877
3878#define SERR_INT 0xc4040
3879#define SERR_INT_POISON (1<<31)
3880#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
3881#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
3882#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
3883
3688/* digital port hotplug */ 3884/* digital port hotplug */
3689#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ 3885#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
3690#define PORTD_HOTPLUG_ENABLE (1 << 20) 3886#define PORTD_HOTPLUG_ENABLE (1 << 20)
@@ -3734,15 +3930,15 @@
3734 3930
3735#define _PCH_DPLL_A 0xc6014 3931#define _PCH_DPLL_A 0xc6014
3736#define _PCH_DPLL_B 0xc6018 3932#define _PCH_DPLL_B 0xc6018
3737#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) 3933#define PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
3738 3934
3739#define _PCH_FPA0 0xc6040 3935#define _PCH_FPA0 0xc6040
3740#define FP_CB_TUNE (0x3<<22) 3936#define FP_CB_TUNE (0x3<<22)
3741#define _PCH_FPA1 0xc6044 3937#define _PCH_FPA1 0xc6044
3742#define _PCH_FPB0 0xc6048 3938#define _PCH_FPB0 0xc6048
3743#define _PCH_FPB1 0xc604c 3939#define _PCH_FPB1 0xc604c
3744#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0) 3940#define PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
3745#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1) 3941#define PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
3746 3942
3747#define PCH_DPLL_TEST 0xc606c 3943#define PCH_DPLL_TEST 0xc606c
3748 3944
@@ -3782,46 +3978,40 @@
3782#define PCH_SSC4_AUX_PARMS 0xc6214 3978#define PCH_SSC4_AUX_PARMS 0xc6214
3783 3979
3784#define PCH_DPLL_SEL 0xc7000 3980#define PCH_DPLL_SEL 0xc7000
3785#define TRANSA_DPLL_ENABLE (1<<3) 3981#define TRANS_DPLLB_SEL(pipe) (1 << (pipe * 4))
3786#define TRANSA_DPLLB_SEL (1<<0) 3982#define TRANS_DPLLA_SEL(pipe) 0
3787#define TRANSA_DPLLA_SEL 0 3983#define TRANS_DPLL_ENABLE(pipe) (1 << (pipe * 4 + 3))
3788#define TRANSB_DPLL_ENABLE (1<<7)
3789#define TRANSB_DPLLB_SEL (1<<4)
3790#define TRANSB_DPLLA_SEL (0)
3791#define TRANSC_DPLL_ENABLE (1<<11)
3792#define TRANSC_DPLLB_SEL (1<<8)
3793#define TRANSC_DPLLA_SEL (0)
3794 3984
3795/* transcoder */ 3985/* transcoder */
3796 3986
3797#define _TRANS_HTOTAL_A 0xe0000 3987#define _PCH_TRANS_HTOTAL_A 0xe0000
3798#define TRANS_HTOTAL_SHIFT 16 3988#define TRANS_HTOTAL_SHIFT 16
3799#define TRANS_HACTIVE_SHIFT 0 3989#define TRANS_HACTIVE_SHIFT 0
3800#define _TRANS_HBLANK_A 0xe0004 3990#define _PCH_TRANS_HBLANK_A 0xe0004
3801#define TRANS_HBLANK_END_SHIFT 16 3991#define TRANS_HBLANK_END_SHIFT 16
3802#define TRANS_HBLANK_START_SHIFT 0 3992#define TRANS_HBLANK_START_SHIFT 0
3803#define _TRANS_HSYNC_A 0xe0008 3993#define _PCH_TRANS_HSYNC_A 0xe0008
3804#define TRANS_HSYNC_END_SHIFT 16 3994#define TRANS_HSYNC_END_SHIFT 16
3805#define TRANS_HSYNC_START_SHIFT 0 3995#define TRANS_HSYNC_START_SHIFT 0
3806#define _TRANS_VTOTAL_A 0xe000c 3996#define _PCH_TRANS_VTOTAL_A 0xe000c
3807#define TRANS_VTOTAL_SHIFT 16 3997#define TRANS_VTOTAL_SHIFT 16
3808#define TRANS_VACTIVE_SHIFT 0 3998#define TRANS_VACTIVE_SHIFT 0
3809#define _TRANS_VBLANK_A 0xe0010 3999#define _PCH_TRANS_VBLANK_A 0xe0010
3810#define TRANS_VBLANK_END_SHIFT 16 4000#define TRANS_VBLANK_END_SHIFT 16
3811#define TRANS_VBLANK_START_SHIFT 0 4001#define TRANS_VBLANK_START_SHIFT 0
3812#define _TRANS_VSYNC_A 0xe0014 4002#define _PCH_TRANS_VSYNC_A 0xe0014
3813#define TRANS_VSYNC_END_SHIFT 16 4003#define TRANS_VSYNC_END_SHIFT 16
3814#define TRANS_VSYNC_START_SHIFT 0 4004#define TRANS_VSYNC_START_SHIFT 0
3815#define _TRANS_VSYNCSHIFT_A 0xe0028 4005#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
3816 4006
3817#define _TRANSA_DATA_M1 0xe0030 4007#define _PCH_TRANSA_DATA_M1 0xe0030
3818#define _TRANSA_DATA_N1 0xe0034 4008#define _PCH_TRANSA_DATA_N1 0xe0034
3819#define _TRANSA_DATA_M2 0xe0038 4009#define _PCH_TRANSA_DATA_M2 0xe0038
3820#define _TRANSA_DATA_N2 0xe003c 4010#define _PCH_TRANSA_DATA_N2 0xe003c
3821#define _TRANSA_DP_LINK_M1 0xe0040 4011#define _PCH_TRANSA_LINK_M1 0xe0040
3822#define _TRANSA_DP_LINK_N1 0xe0044 4012#define _PCH_TRANSA_LINK_N1 0xe0044
3823#define _TRANSA_DP_LINK_M2 0xe0048 4013#define _PCH_TRANSA_LINK_M2 0xe0048
3824#define _TRANSA_DP_LINK_N2 0xe004c 4014#define _PCH_TRANSA_LINK_N2 0xe004c
3825 4015
3826/* Per-transcoder DIP controls */ 4016/* Per-transcoder DIP controls */
3827 4017
@@ -3890,44 +4080,45 @@
3890#define HSW_TVIDEO_DIP_VSC_DATA(trans) \ 4080#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
3891 _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B) 4081 _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
3892 4082
3893#define _TRANS_HTOTAL_B 0xe1000 4083#define _PCH_TRANS_HTOTAL_B 0xe1000
3894#define _TRANS_HBLANK_B 0xe1004 4084#define _PCH_TRANS_HBLANK_B 0xe1004
3895#define _TRANS_HSYNC_B 0xe1008 4085#define _PCH_TRANS_HSYNC_B 0xe1008
3896#define _TRANS_VTOTAL_B 0xe100c 4086#define _PCH_TRANS_VTOTAL_B 0xe100c
3897#define _TRANS_VBLANK_B 0xe1010 4087#define _PCH_TRANS_VBLANK_B 0xe1010
3898#define _TRANS_VSYNC_B 0xe1014 4088#define _PCH_TRANS_VSYNC_B 0xe1014
3899#define _TRANS_VSYNCSHIFT_B 0xe1028 4089#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
3900 4090
3901#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B) 4091#define PCH_TRANS_HTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
3902#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B) 4092#define PCH_TRANS_HBLANK(pipe) _PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
3903#define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B) 4093#define PCH_TRANS_HSYNC(pipe) _PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
3904#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B) 4094#define PCH_TRANS_VTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
3905#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B) 4095#define PCH_TRANS_VBLANK(pipe) _PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
3906#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B) 4096#define PCH_TRANS_VSYNC(pipe) _PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
3907#define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \ 4097#define PCH_TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, \
3908 _TRANS_VSYNCSHIFT_B) 4098 _PCH_TRANS_VSYNCSHIFT_B)
3909 4099
3910#define _TRANSB_DATA_M1 0xe1030 4100#define _PCH_TRANSB_DATA_M1 0xe1030
3911#define _TRANSB_DATA_N1 0xe1034 4101#define _PCH_TRANSB_DATA_N1 0xe1034
3912#define _TRANSB_DATA_M2 0xe1038 4102#define _PCH_TRANSB_DATA_M2 0xe1038
3913#define _TRANSB_DATA_N2 0xe103c 4103#define _PCH_TRANSB_DATA_N2 0xe103c
3914#define _TRANSB_DP_LINK_M1 0xe1040 4104#define _PCH_TRANSB_LINK_M1 0xe1040
3915#define _TRANSB_DP_LINK_N1 0xe1044 4105#define _PCH_TRANSB_LINK_N1 0xe1044
3916#define _TRANSB_DP_LINK_M2 0xe1048 4106#define _PCH_TRANSB_LINK_M2 0xe1048
3917#define _TRANSB_DP_LINK_N2 0xe104c 4107#define _PCH_TRANSB_LINK_N2 0xe104c
3918 4108
3919#define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1) 4109#define PCH_TRANS_DATA_M1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
3920#define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1) 4110#define PCH_TRANS_DATA_N1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
3921#define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2) 4111#define PCH_TRANS_DATA_M2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
3922#define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2) 4112#define PCH_TRANS_DATA_N2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
3923#define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1) 4113#define PCH_TRANS_LINK_M1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
3924#define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1) 4114#define PCH_TRANS_LINK_N1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
3925#define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2) 4115#define PCH_TRANS_LINK_M2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
3926#define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2) 4116#define PCH_TRANS_LINK_N2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
3927 4117
3928#define _TRANSACONF 0xf0008 4118#define _PCH_TRANSACONF 0xf0008
3929#define _TRANSBCONF 0xf1008 4119#define _PCH_TRANSBCONF 0xf1008
3930#define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF) 4120#define PCH_TRANSCONF(pipe) _PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
4121#define LPT_TRANSCONF _PCH_TRANSACONF /* lpt has only one transcoder */
3931#define TRANS_DISABLE (0<<31) 4122#define TRANS_DISABLE (0<<31)
3932#define TRANS_ENABLE (1<<31) 4123#define TRANS_ENABLE (1<<31)
3933#define TRANS_STATE_MASK (1<<30) 4124#define TRANS_STATE_MASK (1<<30)
@@ -4011,10 +4202,9 @@
4011#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) 4202#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
4012#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) 4203#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
4013#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22) 4204#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
4014#define FDI_DP_PORT_WIDTH_X1 (0<<19) 4205#define FDI_DP_PORT_WIDTH_SHIFT 19
4015#define FDI_DP_PORT_WIDTH_X2 (1<<19) 4206#define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT)
4016#define FDI_DP_PORT_WIDTH_X3 (2<<19) 4207#define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT)
4017#define FDI_DP_PORT_WIDTH_X4 (3<<19)
4018#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) 4208#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
4019/* Ironlake: hardwired to 1 */ 4209/* Ironlake: hardwired to 1 */
4020#define FDI_TX_PLL_ENABLE (1<<14) 4210#define FDI_TX_PLL_ENABLE (1<<14)
@@ -4039,7 +4229,6 @@
4039/* train, dp width same as FDI_TX */ 4229/* train, dp width same as FDI_TX */
4040#define FDI_FS_ERRC_ENABLE (1<<27) 4230#define FDI_FS_ERRC_ENABLE (1<<27)
4041#define FDI_FE_ERRC_ENABLE (1<<26) 4231#define FDI_FE_ERRC_ENABLE (1<<26)
4042#define FDI_DP_PORT_WIDTH_X8 (7<<19)
4043#define FDI_RX_POLARITY_REVERSED_LPT (1<<16) 4232#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
4044#define FDI_8BPC (0<<16) 4233#define FDI_8BPC (0<<16)
4045#define FDI_10BPC (1<<16) 4234#define FDI_10BPC (1<<16)
@@ -4061,9 +4250,6 @@
4061#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8) 4250#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
4062#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) 4251#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
4063#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) 4252#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
4064/* LPT */
4065#define FDI_PORT_WIDTH_2X_LPT (1<<19)
4066#define FDI_PORT_WIDTH_1X_LPT (0<<19)
4067 4253
4068#define _FDI_RXA_MISC 0xf0010 4254#define _FDI_RXA_MISC 0xf0010
4069#define _FDI_RXB_MISC 0xf1010 4255#define _FDI_RXB_MISC 0xf1010
@@ -4309,6 +4495,7 @@
4309#define GEN6_RC_CTL_RC6_ENABLE (1<<18) 4495#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
4310#define GEN6_RC_CTL_RC1e_ENABLE (1<<20) 4496#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
4311#define GEN6_RC_CTL_RC7_ENABLE (1<<22) 4497#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
4498#define GEN7_RC_CTL_TO_MODE (1<<28)
4312#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27) 4499#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
4313#define GEN6_RC_CTL_HW_ENABLE (1<<31) 4500#define GEN6_RC_CTL_HW_ENABLE (1<<31)
4314#define GEN6_RP_DOWN_TIMEOUT 0xA010 4501#define GEN6_RP_DOWN_TIMEOUT 0xA010
@@ -4370,7 +4557,7 @@
4370#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4) 4557#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
4371#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2) 4558#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
4372#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1) 4559#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
4373#define GEN6_PM_DEFERRED_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \ 4560#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
4374 GEN6_PM_RP_DOWN_THRESHOLD | \ 4561 GEN6_PM_RP_DOWN_THRESHOLD | \
4375 GEN6_PM_RP_DOWN_TIMEOUT) 4562 GEN6_PM_RP_DOWN_TIMEOUT)
4376 4563
@@ -4392,20 +4579,6 @@
4392#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 4579#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
4393#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 4580#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
4394 4581
4395#define VLV_IOSF_DOORBELL_REQ 0x182100
4396#define IOSF_DEVFN_SHIFT 24
4397#define IOSF_OPCODE_SHIFT 16
4398#define IOSF_PORT_SHIFT 8
4399#define IOSF_BYTE_ENABLES_SHIFT 4
4400#define IOSF_BAR_SHIFT 1
4401#define IOSF_SB_BUSY (1<<0)
4402#define IOSF_PORT_PUNIT 0x4
4403#define VLV_IOSF_DATA 0x182104
4404#define VLV_IOSF_ADDR 0x182108
4405
4406#define PUNIT_OPCODE_REG_READ 6
4407#define PUNIT_OPCODE_REG_WRITE 7
4408
4409#define GEN6_GT_CORE_STATUS 0x138060 4582#define GEN6_GT_CORE_STATUS 0x138060
4410#define GEN6_CORE_CPD_STATE_MASK (7<<4) 4583#define GEN6_CORE_CPD_STATE_MASK (7<<4)
4411#define GEN6_RCn_MASK 7 4584#define GEN6_RCn_MASK 7
@@ -4602,9 +4775,6 @@
4602#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) 4775#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
4603#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) 4776#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
4604#define TRANS_DDI_BFI_ENABLE (1<<4) 4777#define TRANS_DDI_BFI_ENABLE (1<<4)
4605#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
4606#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
4607#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
4608 4778
4609/* DisplayPort Transport Control */ 4779/* DisplayPort Transport Control */
4610#define DP_TP_CTL_A 0x64040 4780#define DP_TP_CTL_A 0x64040
@@ -4648,9 +4818,7 @@
4648#define DDI_BUF_PORT_REVERSAL (1<<16) 4818#define DDI_BUF_PORT_REVERSAL (1<<16)
4649#define DDI_BUF_IS_IDLE (1<<7) 4819#define DDI_BUF_IS_IDLE (1<<7)
4650#define DDI_A_4_LANES (1<<4) 4820#define DDI_A_4_LANES (1<<4)
4651#define DDI_PORT_WIDTH_X1 (0<<1) 4821#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
4652#define DDI_PORT_WIDTH_X2 (1<<1)
4653#define DDI_PORT_WIDTH_X4 (3<<1)
4654#define DDI_INIT_DISPLAY_DETECTED (1<<0) 4822#define DDI_INIT_DISPLAY_DETECTED (1<<0)
4655 4823
4656/* DDI Buffer Translations */ 4824/* DDI Buffer Translations */
@@ -4774,6 +4942,9 @@
4774#define SFUSE_STRAP_DDIC_DETECTED (1<<1) 4942#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
4775#define SFUSE_STRAP_DDID_DETECTED (1<<0) 4943#define SFUSE_STRAP_DDID_DETECTED (1<<0)
4776 4944
4945#define WM_MISC 0x45260
4946#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
4947
4777#define WM_DBG 0x45280 4948#define WM_DBG 0x45280
4778#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0) 4949#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
4779#define WM_DBG_DISALLOW_MAXFIFO (1<<1) 4950#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
@@ -4787,6 +4958,9 @@
4787#define _PIPE_A_CSC_COEFF_RV_GV 0x49020 4958#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
4788#define _PIPE_A_CSC_COEFF_BV 0x49024 4959#define _PIPE_A_CSC_COEFF_BV 0x49024
4789#define _PIPE_A_CSC_MODE 0x49028 4960#define _PIPE_A_CSC_MODE 0x49028
4961#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
4962#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
4963#define CSC_MODE_YUV_TO_RGB (1 << 0)
4790#define _PIPE_A_CSC_PREOFF_HI 0x49030 4964#define _PIPE_A_CSC_PREOFF_HI 0x49030
4791#define _PIPE_A_CSC_PREOFF_ME 0x49034 4965#define _PIPE_A_CSC_PREOFF_ME 0x49034
4792#define _PIPE_A_CSC_PREOFF_LO 0x49038 4966#define _PIPE_A_CSC_PREOFF_LO 0x49038
@@ -4808,10 +4982,6 @@
4808#define _PIPE_B_CSC_POSTOFF_ME 0x49144 4982#define _PIPE_B_CSC_POSTOFF_ME 0x49144
4809#define _PIPE_B_CSC_POSTOFF_LO 0x49148 4983#define _PIPE_B_CSC_POSTOFF_LO 0x49148
4810 4984
4811#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
4812#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
4813#define CSC_MODE_YUV_TO_RGB (1 << 0)
4814
4815#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY) 4985#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
4816#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY) 4986#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
4817#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU) 4987#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 41f0fdecfbdc..88b9a663944f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -192,6 +192,7 @@ static void i915_restore_vga(struct drm_device *dev)
192static void i915_save_display(struct drm_device *dev) 192static void i915_save_display(struct drm_device *dev)
193{ 193{
194 struct drm_i915_private *dev_priv = dev->dev_private; 194 struct drm_i915_private *dev_priv = dev->dev_private;
195 unsigned long flags;
195 196
196 /* Display arbitration control */ 197 /* Display arbitration control */
197 if (INTEL_INFO(dev)->gen <= 4) 198 if (INTEL_INFO(dev)->gen <= 4)
@@ -202,6 +203,8 @@ static void i915_save_display(struct drm_device *dev)
202 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 203 if (!drm_core_check_feature(dev, DRIVER_MODESET))
203 i915_save_display_reg(dev); 204 i915_save_display_reg(dev);
204 205
206 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
207
205 /* LVDS state */ 208 /* LVDS state */
206 if (HAS_PCH_SPLIT(dev)) { 209 if (HAS_PCH_SPLIT(dev)) {
207 dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); 210 dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
@@ -222,6 +225,8 @@ static void i915_save_display(struct drm_device *dev)
222 dev_priv->regfile.saveLVDS = I915_READ(LVDS); 225 dev_priv->regfile.saveLVDS = I915_READ(LVDS);
223 } 226 }
224 227
228 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
229
225 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) 230 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
226 dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL); 231 dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
227 232
@@ -257,6 +262,7 @@ static void i915_restore_display(struct drm_device *dev)
257{ 262{
258 struct drm_i915_private *dev_priv = dev->dev_private; 263 struct drm_i915_private *dev_priv = dev->dev_private;
259 u32 mask = 0xffffffff; 264 u32 mask = 0xffffffff;
265 unsigned long flags;
260 266
261 /* Display arbitration */ 267 /* Display arbitration */
262 if (INTEL_INFO(dev)->gen <= 4) 268 if (INTEL_INFO(dev)->gen <= 4)
@@ -265,6 +271,8 @@ static void i915_restore_display(struct drm_device *dev)
265 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 271 if (!drm_core_check_feature(dev, DRIVER_MODESET))
266 i915_restore_display_reg(dev); 272 i915_restore_display_reg(dev);
267 273
274 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
275
268 /* LVDS state */ 276 /* LVDS state */
269 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 277 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
270 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); 278 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
@@ -304,6 +312,8 @@ static void i915_restore_display(struct drm_device *dev)
304 I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); 312 I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
305 } 313 }
306 314
315 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
316
307 /* only restore FBC info on the platform that supports FBC*/ 317 /* only restore FBC info on the platform that supports FBC*/
308 intel_disable_fbc(dev); 318 intel_disable_fbc(dev);
309 if (I915_HAS_FBC(dev)) { 319 if (I915_HAS_FBC(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index d5e1890678f9..6875b5654c63 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -212,7 +212,13 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
212 int ret; 212 int ret;
213 213
214 mutex_lock(&dev_priv->rps.hw_lock); 214 mutex_lock(&dev_priv->rps.hw_lock);
215 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; 215 if (IS_VALLEYVIEW(dev_priv->dev)) {
216 u32 freq;
217 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
218 ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
219 } else {
220 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
221 }
216 mutex_unlock(&dev_priv->rps.hw_lock); 222 mutex_unlock(&dev_priv->rps.hw_lock);
217 223
218 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 224 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -226,7 +232,10 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
226 int ret; 232 int ret;
227 233
228 mutex_lock(&dev_priv->rps.hw_lock); 234 mutex_lock(&dev_priv->rps.hw_lock);
229 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 235 if (IS_VALLEYVIEW(dev_priv->dev))
236 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
237 else
238 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
230 mutex_unlock(&dev_priv->rps.hw_lock); 239 mutex_unlock(&dev_priv->rps.hw_lock);
231 240
232 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 241 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -246,16 +255,25 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
246 if (ret) 255 if (ret)
247 return ret; 256 return ret;
248 257
249 val /= GT_FREQUENCY_MULTIPLIER;
250
251 mutex_lock(&dev_priv->rps.hw_lock); 258 mutex_lock(&dev_priv->rps.hw_lock);
252 259
253 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 260 if (IS_VALLEYVIEW(dev_priv->dev)) {
254 hw_max = dev_priv->rps.hw_max; 261 val = vlv_freq_opcode(dev_priv->mem_freq, val);
255 non_oc_max = (rp_state_cap & 0xff); 262
256 hw_min = ((rp_state_cap & 0xff0000) >> 16); 263 hw_max = valleyview_rps_max_freq(dev_priv);
264 hw_min = valleyview_rps_min_freq(dev_priv);
265 non_oc_max = hw_max;
266 } else {
267 val /= GT_FREQUENCY_MULTIPLIER;
268
269 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
270 hw_max = dev_priv->rps.hw_max;
271 non_oc_max = (rp_state_cap & 0xff);
272 hw_min = ((rp_state_cap & 0xff0000) >> 16);
273 }
257 274
258 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { 275 if (val < hw_min || val > hw_max ||
276 val < dev_priv->rps.min_delay) {
259 mutex_unlock(&dev_priv->rps.hw_lock); 277 mutex_unlock(&dev_priv->rps.hw_lock);
260 return -EINVAL; 278 return -EINVAL;
261 } 279 }
@@ -264,8 +282,12 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
264 DRM_DEBUG("User requested overclocking to %d\n", 282 DRM_DEBUG("User requested overclocking to %d\n",
265 val * GT_FREQUENCY_MULTIPLIER); 283 val * GT_FREQUENCY_MULTIPLIER);
266 284
267 if (dev_priv->rps.cur_delay > val) 285 if (dev_priv->rps.cur_delay > val) {
268 gen6_set_rps(dev_priv->dev, val); 286 if (IS_VALLEYVIEW(dev_priv->dev))
287 valleyview_set_rps(dev_priv->dev, val);
288 else
289 gen6_set_rps(dev_priv->dev, val);
290 }
269 291
270 dev_priv->rps.max_delay = val; 292 dev_priv->rps.max_delay = val;
271 293
@@ -282,7 +304,10 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
282 int ret; 304 int ret;
283 305
284 mutex_lock(&dev_priv->rps.hw_lock); 306 mutex_lock(&dev_priv->rps.hw_lock);
285 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 307 if (IS_VALLEYVIEW(dev_priv->dev))
308 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
309 else
310 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
286 mutex_unlock(&dev_priv->rps.hw_lock); 311 mutex_unlock(&dev_priv->rps.hw_lock);
287 312
288 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 313 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -302,21 +327,32 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
302 if (ret) 327 if (ret)
303 return ret; 328 return ret;
304 329
305 val /= GT_FREQUENCY_MULTIPLIER;
306
307 mutex_lock(&dev_priv->rps.hw_lock); 330 mutex_lock(&dev_priv->rps.hw_lock);
308 331
309 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 332 if (IS_VALLEYVIEW(dev)) {
310 hw_max = dev_priv->rps.hw_max; 333 val = vlv_freq_opcode(dev_priv->mem_freq, val);
311 hw_min = ((rp_state_cap & 0xff0000) >> 16); 334
335 hw_max = valleyview_rps_max_freq(dev_priv);
336 hw_min = valleyview_rps_min_freq(dev_priv);
337 } else {
338 val /= GT_FREQUENCY_MULTIPLIER;
339
340 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
341 hw_max = dev_priv->rps.hw_max;
342 hw_min = ((rp_state_cap & 0xff0000) >> 16);
343 }
312 344
313 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { 345 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
314 mutex_unlock(&dev_priv->rps.hw_lock); 346 mutex_unlock(&dev_priv->rps.hw_lock);
315 return -EINVAL; 347 return -EINVAL;
316 } 348 }
317 349
318 if (dev_priv->rps.cur_delay < val) 350 if (dev_priv->rps.cur_delay < val) {
319 gen6_set_rps(dev_priv->dev, val); 351 if (IS_VALLEYVIEW(dev))
352 valleyview_set_rps(dev, val);
353 else
354 gen6_set_rps(dev_priv->dev, val);
355 }
320 356
321 dev_priv->rps.min_delay = val; 357 dev_priv->rps.min_delay = val;
322 358
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index 985a09716237..967da4772c44 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -41,7 +41,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
41 return false; 41 return false;
42 42
43 if (HAS_PCH_SPLIT(dev)) 43 if (HAS_PCH_SPLIT(dev))
44 dpll_reg = _PCH_DPLL(pipe); 44 dpll_reg = PCH_DPLL(pipe);
45 else 45 else
46 dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; 46 dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
47 47
@@ -148,13 +148,13 @@ void i915_save_display_reg(struct drm_device *dev)
148 dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); 148 dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
149 dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); 149 dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
150 150
151 dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF); 151 dev_priv->regfile.saveTRANSACONF = I915_READ(_PCH_TRANSACONF);
152 dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); 152 dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_PCH_TRANS_HTOTAL_A);
153 dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); 153 dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_PCH_TRANS_HBLANK_A);
154 dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); 154 dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_PCH_TRANS_HSYNC_A);
155 dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); 155 dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_PCH_TRANS_VTOTAL_A);
156 dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); 156 dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_PCH_TRANS_VBLANK_A);
157 dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); 157 dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_PCH_TRANS_VSYNC_A);
158 } 158 }
159 159
160 dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR); 160 dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
@@ -205,13 +205,13 @@ void i915_save_display_reg(struct drm_device *dev)
205 dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); 205 dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
206 dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); 206 dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
207 207
208 dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF); 208 dev_priv->regfile.saveTRANSBCONF = I915_READ(_PCH_TRANSBCONF);
209 dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); 209 dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_PCH_TRANS_HTOTAL_B);
210 dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); 210 dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_PCH_TRANS_HBLANK_B);
211 dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); 211 dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_PCH_TRANS_HSYNC_B);
212 dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); 212 dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_PCH_TRANS_VTOTAL_B);
213 dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); 213 dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_PCH_TRANS_VBLANK_B);
214 dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); 214 dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_PCH_TRANS_VSYNC_B);
215 } 215 }
216 216
217 dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR); 217 dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
@@ -259,14 +259,14 @@ void i915_save_display_reg(struct drm_device *dev)
259 dev_priv->regfile.saveDP_B = I915_READ(DP_B); 259 dev_priv->regfile.saveDP_B = I915_READ(DP_B);
260 dev_priv->regfile.saveDP_C = I915_READ(DP_C); 260 dev_priv->regfile.saveDP_C = I915_READ(DP_C);
261 dev_priv->regfile.saveDP_D = I915_READ(DP_D); 261 dev_priv->regfile.saveDP_D = I915_READ(DP_D);
262 dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); 262 dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_DATA_M_G4X);
263 dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); 263 dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_DATA_M_G4X);
264 dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); 264 dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_DATA_N_G4X);
265 dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); 265 dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_DATA_N_G4X);
266 dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); 266 dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_LINK_M_G4X);
267 dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); 267 dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_LINK_M_G4X);
268 dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); 268 dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_LINK_N_G4X);
269 dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); 269 dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_LINK_N_G4X);
270 } 270 }
271 /* FIXME: regfile.save TV & SDVO state */ 271 /* FIXME: regfile.save TV & SDVO state */
272 272
@@ -282,14 +282,14 @@ void i915_restore_display_reg(struct drm_device *dev)
282 282
283 /* Display port ratios (must be done before clock is set) */ 283 /* Display port ratios (must be done before clock is set) */
284 if (SUPPORTS_INTEGRATED_DP(dev)) { 284 if (SUPPORTS_INTEGRATED_DP(dev)) {
285 I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M); 285 I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
286 I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M); 286 I915_WRITE(_PIPEB_DATA_M_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
287 I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N); 287 I915_WRITE(_PIPEA_DATA_N_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
288 I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N); 288 I915_WRITE(_PIPEB_DATA_N_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
289 I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M); 289 I915_WRITE(_PIPEA_LINK_M_G4X, dev_priv->regfile.savePIPEA_DP_LINK_M);
290 I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M); 290 I915_WRITE(_PIPEB_LINK_M_G4X, dev_priv->regfile.savePIPEB_DP_LINK_M);
291 I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N); 291 I915_WRITE(_PIPEA_LINK_N_G4X, dev_priv->regfile.savePIPEA_DP_LINK_N);
292 I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N); 292 I915_WRITE(_PIPEB_LINK_N_G4X, dev_priv->regfile.savePIPEB_DP_LINK_N);
293 } 293 }
294 294
295 /* Fences */ 295 /* Fences */
@@ -379,13 +379,13 @@ void i915_restore_display_reg(struct drm_device *dev)
379 I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ); 379 I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
380 I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS); 380 I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
381 381
382 I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF); 382 I915_WRITE(_PCH_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
383 I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A); 383 I915_WRITE(_PCH_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
384 I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A); 384 I915_WRITE(_PCH_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
385 I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A); 385 I915_WRITE(_PCH_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
386 I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A); 386 I915_WRITE(_PCH_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
387 I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A); 387 I915_WRITE(_PCH_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
388 I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A); 388 I915_WRITE(_PCH_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
389 } 389 }
390 390
391 /* Restore plane info */ 391 /* Restore plane info */
@@ -448,13 +448,13 @@ void i915_restore_display_reg(struct drm_device *dev)
448 I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ); 448 I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
449 I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS); 449 I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
450 450
451 I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF); 451 I915_WRITE(_PCH_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
452 I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B); 452 I915_WRITE(_PCH_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
453 I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B); 453 I915_WRITE(_PCH_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
454 I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B); 454 I915_WRITE(_PCH_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
455 I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B); 455 I915_WRITE(_PCH_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
456 I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B); 456 I915_WRITE(_PCH_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
457 I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B); 457 I915_WRITE(_PCH_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
458 } 458 }
459 459
460 /* Restore plane info */ 460 /* Restore plane info */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 95070b2124c6..53f2bed8bc5f 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -212,7 +212,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
212 if (!lvds_options) 212 if (!lvds_options)
213 return; 213 return;
214 214
215 dev_priv->lvds_dither = lvds_options->pixel_dither; 215 dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
216 if (lvds_options->panel_type == 0xff) 216 if (lvds_options->panel_type == 0xff)
217 return; 217 return;
218 218
@@ -226,7 +226,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
226 if (!lvds_lfp_data_ptrs) 226 if (!lvds_lfp_data_ptrs)
227 return; 227 return;
228 228
229 dev_priv->lvds_vbt = 1; 229 dev_priv->vbt.lvds_vbt = 1;
230 230
231 panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, 231 panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
232 lvds_lfp_data_ptrs, 232 lvds_lfp_data_ptrs,
@@ -238,7 +238,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
238 238
239 fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing); 239 fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
240 240
241 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; 241 dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
242 242
243 DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); 243 DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
244 drm_mode_debug_printmodeline(panel_fixed_mode); 244 drm_mode_debug_printmodeline(panel_fixed_mode);
@@ -274,9 +274,9 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
274 /* check the resolution, just to be sure */ 274 /* check the resolution, just to be sure */
275 if (fp_timing->x_res == panel_fixed_mode->hdisplay && 275 if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
276 fp_timing->y_res == panel_fixed_mode->vdisplay) { 276 fp_timing->y_res == panel_fixed_mode->vdisplay) {
277 dev_priv->bios_lvds_val = fp_timing->lvds_reg_val; 277 dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
278 DRM_DEBUG_KMS("VBT initial LVDS value %x\n", 278 DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
279 dev_priv->bios_lvds_val); 279 dev_priv->vbt.bios_lvds_val);
280 } 280 }
281 } 281 }
282} 282}
@@ -316,7 +316,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
316 316
317 fill_detail_timing_data(panel_fixed_mode, dvo_timing + index); 317 fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
318 318
319 dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; 319 dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
320 320
321 DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n"); 321 DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
322 drm_mode_debug_printmodeline(panel_fixed_mode); 322 drm_mode_debug_printmodeline(panel_fixed_mode);
@@ -345,20 +345,20 @@ parse_general_features(struct drm_i915_private *dev_priv,
345 345
346 general = find_section(bdb, BDB_GENERAL_FEATURES); 346 general = find_section(bdb, BDB_GENERAL_FEATURES);
347 if (general) { 347 if (general) {
348 dev_priv->int_tv_support = general->int_tv_support; 348 dev_priv->vbt.int_tv_support = general->int_tv_support;
349 dev_priv->int_crt_support = general->int_crt_support; 349 dev_priv->vbt.int_crt_support = general->int_crt_support;
350 dev_priv->lvds_use_ssc = general->enable_ssc; 350 dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
351 dev_priv->lvds_ssc_freq = 351 dev_priv->vbt.lvds_ssc_freq =
352 intel_bios_ssc_frequency(dev, general->ssc_freq); 352 intel_bios_ssc_frequency(dev, general->ssc_freq);
353 dev_priv->display_clock_mode = general->display_clock_mode; 353 dev_priv->vbt.display_clock_mode = general->display_clock_mode;
354 dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; 354 dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
355 DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", 355 DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
356 dev_priv->int_tv_support, 356 dev_priv->vbt.int_tv_support,
357 dev_priv->int_crt_support, 357 dev_priv->vbt.int_crt_support,
358 dev_priv->lvds_use_ssc, 358 dev_priv->vbt.lvds_use_ssc,
359 dev_priv->lvds_ssc_freq, 359 dev_priv->vbt.lvds_ssc_freq,
360 dev_priv->display_clock_mode, 360 dev_priv->vbt.display_clock_mode,
361 dev_priv->fdi_rx_polarity_inverted); 361 dev_priv->vbt.fdi_rx_polarity_inverted);
362 } 362 }
363} 363}
364 364
@@ -375,7 +375,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
375 int bus_pin = general->crt_ddc_gmbus_pin; 375 int bus_pin = general->crt_ddc_gmbus_pin;
376 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); 376 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
377 if (intel_gmbus_is_port_valid(bus_pin)) 377 if (intel_gmbus_is_port_valid(bus_pin))
378 dev_priv->crt_ddc_pin = bus_pin; 378 dev_priv->vbt.crt_ddc_pin = bus_pin;
379 } else { 379 } else {
380 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", 380 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
381 block_size); 381 block_size);
@@ -486,7 +486,7 @@ parse_driver_features(struct drm_i915_private *dev_priv,
486 486
487 if (SUPPORTS_EDP(dev) && 487 if (SUPPORTS_EDP(dev) &&
488 driver->lvds_config == BDB_DRIVER_FEATURE_EDP) 488 driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
489 dev_priv->edp.support = 1; 489 dev_priv->vbt.edp_support = 1;
490 490
491 if (driver->dual_frequency) 491 if (driver->dual_frequency)
492 dev_priv->render_reclock_avail = true; 492 dev_priv->render_reclock_avail = true;
@@ -501,20 +501,20 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
501 501
502 edp = find_section(bdb, BDB_EDP); 502 edp = find_section(bdb, BDB_EDP);
503 if (!edp) { 503 if (!edp) {
504 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) 504 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support)
505 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n"); 505 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
506 return; 506 return;
507 } 507 }
508 508
509 switch ((edp->color_depth >> (panel_type * 2)) & 3) { 509 switch ((edp->color_depth >> (panel_type * 2)) & 3) {
510 case EDP_18BPP: 510 case EDP_18BPP:
511 dev_priv->edp.bpp = 18; 511 dev_priv->vbt.edp_bpp = 18;
512 break; 512 break;
513 case EDP_24BPP: 513 case EDP_24BPP:
514 dev_priv->edp.bpp = 24; 514 dev_priv->vbt.edp_bpp = 24;
515 break; 515 break;
516 case EDP_30BPP: 516 case EDP_30BPP:
517 dev_priv->edp.bpp = 30; 517 dev_priv->vbt.edp_bpp = 30;
518 break; 518 break;
519 } 519 }
520 520
@@ -522,48 +522,48 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
522 edp_pps = &edp->power_seqs[panel_type]; 522 edp_pps = &edp->power_seqs[panel_type];
523 edp_link_params = &edp->link_params[panel_type]; 523 edp_link_params = &edp->link_params[panel_type];
524 524
525 dev_priv->edp.pps = *edp_pps; 525 dev_priv->vbt.edp_pps = *edp_pps;
526 526
527 dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : 527 dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
528 DP_LINK_BW_1_62; 528 DP_LINK_BW_1_62;
529 switch (edp_link_params->lanes) { 529 switch (edp_link_params->lanes) {
530 case 0: 530 case 0:
531 dev_priv->edp.lanes = 1; 531 dev_priv->vbt.edp_lanes = 1;
532 break; 532 break;
533 case 1: 533 case 1:
534 dev_priv->edp.lanes = 2; 534 dev_priv->vbt.edp_lanes = 2;
535 break; 535 break;
536 case 3: 536 case 3:
537 default: 537 default:
538 dev_priv->edp.lanes = 4; 538 dev_priv->vbt.edp_lanes = 4;
539 break; 539 break;
540 } 540 }
541 switch (edp_link_params->preemphasis) { 541 switch (edp_link_params->preemphasis) {
542 case 0: 542 case 0:
543 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; 543 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
544 break; 544 break;
545 case 1: 545 case 1:
546 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; 546 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
547 break; 547 break;
548 case 2: 548 case 2:
549 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; 549 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
550 break; 550 break;
551 case 3: 551 case 3:
552 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; 552 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
553 break; 553 break;
554 } 554 }
555 switch (edp_link_params->vswing) { 555 switch (edp_link_params->vswing) {
556 case 0: 556 case 0:
557 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; 557 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
558 break; 558 break;
559 case 1: 559 case 1:
560 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; 560 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
561 break; 561 break;
562 case 2: 562 case 2:
563 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; 563 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
564 break; 564 break;
565 case 3: 565 case 3:
566 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; 566 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
567 break; 567 break;
568 } 568 }
569} 569}
@@ -611,13 +611,13 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
611 DRM_DEBUG_KMS("no child dev is parsed from VBT\n"); 611 DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
612 return; 612 return;
613 } 613 }
614 dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL); 614 dev_priv->vbt.child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
615 if (!dev_priv->child_dev) { 615 if (!dev_priv->vbt.child_dev) {
616 DRM_DEBUG_KMS("No memory space for child device\n"); 616 DRM_DEBUG_KMS("No memory space for child device\n");
617 return; 617 return;
618 } 618 }
619 619
620 dev_priv->child_dev_num = count; 620 dev_priv->vbt.child_dev_num = count;
621 count = 0; 621 count = 0;
622 for (i = 0; i < child_device_num; i++) { 622 for (i = 0; i < child_device_num; i++) {
623 p_child = &(p_defs->devices[i]); 623 p_child = &(p_defs->devices[i]);
@@ -625,7 +625,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
625 /* skip the device block if device type is invalid */ 625 /* skip the device block if device type is invalid */
626 continue; 626 continue;
627 } 627 }
628 child_dev_ptr = dev_priv->child_dev + count; 628 child_dev_ptr = dev_priv->vbt.child_dev + count;
629 count++; 629 count++;
630 memcpy((void *)child_dev_ptr, (void *)p_child, 630 memcpy((void *)child_dev_ptr, (void *)p_child,
631 sizeof(*p_child)); 631 sizeof(*p_child));
@@ -638,23 +638,23 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
638{ 638{
639 struct drm_device *dev = dev_priv->dev; 639 struct drm_device *dev = dev_priv->dev;
640 640
641 dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; 641 dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
642 642
643 /* LFP panel data */ 643 /* LFP panel data */
644 dev_priv->lvds_dither = 1; 644 dev_priv->vbt.lvds_dither = 1;
645 dev_priv->lvds_vbt = 0; 645 dev_priv->vbt.lvds_vbt = 0;
646 646
647 /* SDVO panel data */ 647 /* SDVO panel data */
648 dev_priv->sdvo_lvds_vbt_mode = NULL; 648 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
649 649
650 /* general features */ 650 /* general features */
651 dev_priv->int_tv_support = 1; 651 dev_priv->vbt.int_tv_support = 1;
652 dev_priv->int_crt_support = 1; 652 dev_priv->vbt.int_crt_support = 1;
653 653
654 /* Default to using SSC */ 654 /* Default to using SSC */
655 dev_priv->lvds_use_ssc = 1; 655 dev_priv->vbt.lvds_use_ssc = 1;
656 dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); 656 dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
657 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); 657 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
658} 658}
659 659
660static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) 660static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 58b4a53715cd..3acec8c48166 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -84,6 +84,28 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
84 return true; 84 return true;
85} 85}
86 86
87static void intel_crt_get_config(struct intel_encoder *encoder,
88 struct intel_crtc_config *pipe_config)
89{
90 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
91 struct intel_crt *crt = intel_encoder_to_crt(encoder);
92 u32 tmp, flags = 0;
93
94 tmp = I915_READ(crt->adpa_reg);
95
96 if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
97 flags |= DRM_MODE_FLAG_PHSYNC;
98 else
99 flags |= DRM_MODE_FLAG_NHSYNC;
100
101 if (tmp & ADPA_VSYNC_ACTIVE_HIGH)
102 flags |= DRM_MODE_FLAG_PVSYNC;
103 else
104 flags |= DRM_MODE_FLAG_NVSYNC;
105
106 pipe_config->adjusted_mode.flags |= flags;
107}
108
87/* Note: The caller is required to filter out dpms modes not supported by the 109/* Note: The caller is required to filter out dpms modes not supported by the
88 * platform. */ 110 * platform. */
89static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) 111static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -127,7 +149,7 @@ static void intel_enable_crt(struct intel_encoder *encoder)
127 intel_crt_set_dpms(encoder, crt->connector->base.dpms); 149 intel_crt_set_dpms(encoder, crt->connector->base.dpms);
128} 150}
129 151
130 152/* Special dpms function to support cloning between dvo/sdvo/crt. */
131static void intel_crt_dpms(struct drm_connector *connector, int mode) 153static void intel_crt_dpms(struct drm_connector *connector, int mode)
132{ 154{
133 struct drm_device *dev = connector->dev; 155 struct drm_device *dev = connector->dev;
@@ -158,6 +180,8 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode)
158 else 180 else
159 encoder->connectors_active = true; 181 encoder->connectors_active = true;
160 182
183 /* We call connector dpms manually below in case pipe dpms doesn't
184 * change due to cloning. */
161 if (mode < old_dpms) { 185 if (mode < old_dpms) {
162 /* From off to on, enable the pipe first. */ 186 /* From off to on, enable the pipe first. */
163 intel_crtc_update_dpms(crtc); 187 intel_crtc_update_dpms(crtc);
@@ -207,6 +231,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
207 if (HAS_PCH_SPLIT(dev)) 231 if (HAS_PCH_SPLIT(dev))
208 pipe_config->has_pch_encoder = true; 232 pipe_config->has_pch_encoder = true;
209 233
234 /* LPT FDI RX only supports 8bpc. */
235 if (HAS_PCH_LPT(dev))
236 pipe_config->pipe_bpp = 24;
237
210 return true; 238 return true;
211} 239}
212 240
@@ -431,7 +459,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
431 459
432 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); 460 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
433 461
434 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); 462 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
435 edid = intel_crt_get_edid(connector, i2c); 463 edid = intel_crt_get_edid(connector, i2c);
436 464
437 if (edid) { 465 if (edid) {
@@ -637,7 +665,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
637 int ret; 665 int ret;
638 struct i2c_adapter *i2c; 666 struct i2c_adapter *i2c;
639 667
640 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); 668 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
641 ret = intel_crt_ddc_get_modes(connector, i2c); 669 ret = intel_crt_ddc_get_modes(connector, i2c);
642 if (ret || !IS_G4X(dev)) 670 if (ret || !IS_G4X(dev))
643 return ret; 671 return ret;
@@ -774,6 +802,7 @@ void intel_crt_init(struct drm_device *dev)
774 crt->base.compute_config = intel_crt_compute_config; 802 crt->base.compute_config = intel_crt_compute_config;
775 crt->base.disable = intel_disable_crt; 803 crt->base.disable = intel_disable_crt;
776 crt->base.enable = intel_enable_crt; 804 crt->base.enable = intel_enable_crt;
805 crt->base.get_config = intel_crt_get_config;
777 if (I915_HAS_HOTPLUG(dev)) 806 if (I915_HAS_HOTPLUG(dev))
778 crt->base.hpd_pin = HPD_CRT; 807 crt->base.hpd_pin = HPD_CRT;
779 if (HAS_DDI(dev)) 808 if (HAS_DDI(dev))
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index fb961bb81903..324211ac9c55 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -174,6 +174,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
174 * mode set "sequence for CRT port" document: 174 * mode set "sequence for CRT port" document:
175 * - TP1 to TP2 time with the default value 175 * - TP1 to TP2 time with the default value
176 * - FDI delay to 90h 176 * - FDI delay to 90h
177 *
178 * WaFDIAutoLinkSetTimingOverrride:hsw
177 */ 179 */
178 I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) | 180 I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
179 FDI_RX_PWRDN_LANE0_VAL(2) | 181 FDI_RX_PWRDN_LANE0_VAL(2) |
@@ -181,7 +183,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
181 183
182 /* Enable the PCH Receiver FDI PLL */ 184 /* Enable the PCH Receiver FDI PLL */
183 rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | 185 rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
184 FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19); 186 FDI_RX_PLL_ENABLE |
187 FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
185 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 188 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
186 POSTING_READ(_FDI_RXA_CTL); 189 POSTING_READ(_FDI_RXA_CTL);
187 udelay(220); 190 udelay(220);
@@ -209,7 +212,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
209 * port reversal bit */ 212 * port reversal bit */
210 I915_WRITE(DDI_BUF_CTL(PORT_E), 213 I915_WRITE(DDI_BUF_CTL(PORT_E),
211 DDI_BUF_CTL_ENABLE | 214 DDI_BUF_CTL_ENABLE |
212 ((intel_crtc->fdi_lanes - 1) << 1) | 215 ((intel_crtc->config.fdi_lanes - 1) << 1) |
213 hsw_ddi_buf_ctl_values[i / 2]); 216 hsw_ddi_buf_ctl_values[i / 2]);
214 POSTING_READ(DDI_BUF_CTL(PORT_E)); 217 POSTING_READ(DDI_BUF_CTL(PORT_E));
215 218
@@ -278,392 +281,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
278 DRM_ERROR("FDI link training failed!\n"); 281 DRM_ERROR("FDI link training failed!\n");
279} 282}
280 283
281/* WRPLL clock dividers */
282struct wrpll_tmds_clock {
283 u32 clock;
284 u16 p; /* Post divider */
285 u16 n2; /* Feedback divider */
286 u16 r2; /* Reference divider */
287};
288
289/* Table of matching values for WRPLL clocks programming for each frequency.
290 * The code assumes this table is sorted. */
291static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
292 {19750, 38, 25, 18},
293 {20000, 48, 32, 18},
294 {21000, 36, 21, 15},
295 {21912, 42, 29, 17},
296 {22000, 36, 22, 15},
297 {23000, 36, 23, 15},
298 {23500, 40, 40, 23},
299 {23750, 26, 16, 14},
300 {24000, 36, 24, 15},
301 {25000, 36, 25, 15},
302 {25175, 26, 40, 33},
303 {25200, 30, 21, 15},
304 {26000, 36, 26, 15},
305 {27000, 30, 21, 14},
306 {27027, 18, 100, 111},
307 {27500, 30, 29, 19},
308 {28000, 34, 30, 17},
309 {28320, 26, 30, 22},
310 {28322, 32, 42, 25},
311 {28750, 24, 23, 18},
312 {29000, 30, 29, 18},
313 {29750, 32, 30, 17},
314 {30000, 30, 25, 15},
315 {30750, 30, 41, 24},
316 {31000, 30, 31, 18},
317 {31500, 30, 28, 16},
318 {32000, 30, 32, 18},
319 {32500, 28, 32, 19},
320 {33000, 24, 22, 15},
321 {34000, 28, 30, 17},
322 {35000, 26, 32, 19},
323 {35500, 24, 30, 19},
324 {36000, 26, 26, 15},
325 {36750, 26, 46, 26},
326 {37000, 24, 23, 14},
327 {37762, 22, 40, 26},
328 {37800, 20, 21, 15},
329 {38000, 24, 27, 16},
330 {38250, 24, 34, 20},
331 {39000, 24, 26, 15},
332 {40000, 24, 32, 18},
333 {40500, 20, 21, 14},
334 {40541, 22, 147, 89},
335 {40750, 18, 19, 14},
336 {41000, 16, 17, 14},
337 {41500, 22, 44, 26},
338 {41540, 22, 44, 26},
339 {42000, 18, 21, 15},
340 {42500, 22, 45, 26},
341 {43000, 20, 43, 27},
342 {43163, 20, 24, 15},
343 {44000, 18, 22, 15},
344 {44900, 20, 108, 65},
345 {45000, 20, 25, 15},
346 {45250, 20, 52, 31},
347 {46000, 18, 23, 15},
348 {46750, 20, 45, 26},
349 {47000, 20, 40, 23},
350 {48000, 18, 24, 15},
351 {49000, 18, 49, 30},
352 {49500, 16, 22, 15},
353 {50000, 18, 25, 15},
354 {50500, 18, 32, 19},
355 {51000, 18, 34, 20},
356 {52000, 18, 26, 15},
357 {52406, 14, 34, 25},
358 {53000, 16, 22, 14},
359 {54000, 16, 24, 15},
360 {54054, 16, 173, 108},
361 {54500, 14, 24, 17},
362 {55000, 12, 22, 18},
363 {56000, 14, 45, 31},
364 {56250, 16, 25, 15},
365 {56750, 14, 25, 17},
366 {57000, 16, 27, 16},
367 {58000, 16, 43, 25},
368 {58250, 16, 38, 22},
369 {58750, 16, 40, 23},
370 {59000, 14, 26, 17},
371 {59341, 14, 40, 26},
372 {59400, 16, 44, 25},
373 {60000, 16, 32, 18},
374 {60500, 12, 39, 29},
375 {61000, 14, 49, 31},
376 {62000, 14, 37, 23},
377 {62250, 14, 42, 26},
378 {63000, 12, 21, 15},
379 {63500, 14, 28, 17},
380 {64000, 12, 27, 19},
381 {65000, 14, 32, 19},
382 {65250, 12, 29, 20},
383 {65500, 12, 32, 22},
384 {66000, 12, 22, 15},
385 {66667, 14, 38, 22},
386 {66750, 10, 21, 17},
387 {67000, 14, 33, 19},
388 {67750, 14, 58, 33},
389 {68000, 14, 30, 17},
390 {68179, 14, 46, 26},
391 {68250, 14, 46, 26},
392 {69000, 12, 23, 15},
393 {70000, 12, 28, 18},
394 {71000, 12, 30, 19},
395 {72000, 12, 24, 15},
396 {73000, 10, 23, 17},
397 {74000, 12, 23, 14},
398 {74176, 8, 100, 91},
399 {74250, 10, 22, 16},
400 {74481, 12, 43, 26},
401 {74500, 10, 29, 21},
402 {75000, 12, 25, 15},
403 {75250, 10, 39, 28},
404 {76000, 12, 27, 16},
405 {77000, 12, 53, 31},
406 {78000, 12, 26, 15},
407 {78750, 12, 28, 16},
408 {79000, 10, 38, 26},
409 {79500, 10, 28, 19},
410 {80000, 12, 32, 18},
411 {81000, 10, 21, 14},
412 {81081, 6, 100, 111},
413 {81624, 8, 29, 24},
414 {82000, 8, 17, 14},
415 {83000, 10, 40, 26},
416 {83950, 10, 28, 18},
417 {84000, 10, 28, 18},
418 {84750, 6, 16, 17},
419 {85000, 6, 17, 18},
420 {85250, 10, 30, 19},
421 {85750, 10, 27, 17},
422 {86000, 10, 43, 27},
423 {87000, 10, 29, 18},
424 {88000, 10, 44, 27},
425 {88500, 10, 41, 25},
426 {89000, 10, 28, 17},
427 {89012, 6, 90, 91},
428 {89100, 10, 33, 20},
429 {90000, 10, 25, 15},
430 {91000, 10, 32, 19},
431 {92000, 10, 46, 27},
432 {93000, 10, 31, 18},
433 {94000, 10, 40, 23},
434 {94500, 10, 28, 16},
435 {95000, 10, 44, 25},
436 {95654, 10, 39, 22},
437 {95750, 10, 39, 22},
438 {96000, 10, 32, 18},
439 {97000, 8, 23, 16},
440 {97750, 8, 42, 29},
441 {98000, 8, 45, 31},
442 {99000, 8, 22, 15},
443 {99750, 8, 34, 23},
444 {100000, 6, 20, 18},
445 {100500, 6, 19, 17},
446 {101000, 6, 37, 33},
447 {101250, 8, 21, 14},
448 {102000, 6, 17, 15},
449 {102250, 6, 25, 22},
450 {103000, 8, 29, 19},
451 {104000, 8, 37, 24},
452 {105000, 8, 28, 18},
453 {106000, 8, 22, 14},
454 {107000, 8, 46, 29},
455 {107214, 8, 27, 17},
456 {108000, 8, 24, 15},
457 {108108, 8, 173, 108},
458 {109000, 6, 23, 19},
459 {110000, 6, 22, 18},
460 {110013, 6, 22, 18},
461 {110250, 8, 49, 30},
462 {110500, 8, 36, 22},
463 {111000, 8, 23, 14},
464 {111264, 8, 150, 91},
465 {111375, 8, 33, 20},
466 {112000, 8, 63, 38},
467 {112500, 8, 25, 15},
468 {113100, 8, 57, 34},
469 {113309, 8, 42, 25},
470 {114000, 8, 27, 16},
471 {115000, 6, 23, 18},
472 {116000, 8, 43, 25},
473 {117000, 8, 26, 15},
474 {117500, 8, 40, 23},
475 {118000, 6, 38, 29},
476 {119000, 8, 30, 17},
477 {119500, 8, 46, 26},
478 {119651, 8, 39, 22},
479 {120000, 8, 32, 18},
480 {121000, 6, 39, 29},
481 {121250, 6, 31, 23},
482 {121750, 6, 23, 17},
483 {122000, 6, 42, 31},
484 {122614, 6, 30, 22},
485 {123000, 6, 41, 30},
486 {123379, 6, 37, 27},
487 {124000, 6, 51, 37},
488 {125000, 6, 25, 18},
489 {125250, 4, 13, 14},
490 {125750, 4, 27, 29},
491 {126000, 6, 21, 15},
492 {127000, 6, 24, 17},
493 {127250, 6, 41, 29},
494 {128000, 6, 27, 19},
495 {129000, 6, 43, 30},
496 {129859, 4, 25, 26},
497 {130000, 6, 26, 18},
498 {130250, 6, 42, 29},
499 {131000, 6, 32, 22},
500 {131500, 6, 38, 26},
501 {131850, 6, 41, 28},
502 {132000, 6, 22, 15},
503 {132750, 6, 28, 19},
504 {133000, 6, 34, 23},
505 {133330, 6, 37, 25},
506 {134000, 6, 61, 41},
507 {135000, 6, 21, 14},
508 {135250, 6, 167, 111},
509 {136000, 6, 62, 41},
510 {137000, 6, 35, 23},
511 {138000, 6, 23, 15},
512 {138500, 6, 40, 26},
513 {138750, 6, 37, 24},
514 {139000, 6, 34, 22},
515 {139050, 6, 34, 22},
516 {139054, 6, 34, 22},
517 {140000, 6, 28, 18},
518 {141000, 6, 36, 23},
519 {141500, 6, 22, 14},
520 {142000, 6, 30, 19},
521 {143000, 6, 27, 17},
522 {143472, 4, 17, 16},
523 {144000, 6, 24, 15},
524 {145000, 6, 29, 18},
525 {146000, 6, 47, 29},
526 {146250, 6, 26, 16},
527 {147000, 6, 49, 30},
528 {147891, 6, 23, 14},
529 {148000, 6, 23, 14},
530 {148250, 6, 28, 17},
531 {148352, 4, 100, 91},
532 {148500, 6, 33, 20},
533 {149000, 6, 48, 29},
534 {150000, 6, 25, 15},
535 {151000, 4, 19, 17},
536 {152000, 6, 27, 16},
537 {152280, 6, 44, 26},
538 {153000, 6, 34, 20},
539 {154000, 6, 53, 31},
540 {155000, 6, 31, 18},
541 {155250, 6, 50, 29},
542 {155750, 6, 45, 26},
543 {156000, 6, 26, 15},
544 {157000, 6, 61, 35},
545 {157500, 6, 28, 16},
546 {158000, 6, 65, 37},
547 {158250, 6, 44, 25},
548 {159000, 6, 53, 30},
549 {159500, 6, 39, 22},
550 {160000, 6, 32, 18},
551 {161000, 4, 31, 26},
552 {162000, 4, 18, 15},
553 {162162, 4, 131, 109},
554 {162500, 4, 53, 44},
555 {163000, 4, 29, 24},
556 {164000, 4, 17, 14},
557 {165000, 4, 22, 18},
558 {166000, 4, 32, 26},
559 {167000, 4, 26, 21},
560 {168000, 4, 46, 37},
561 {169000, 4, 104, 83},
562 {169128, 4, 64, 51},
563 {169500, 4, 39, 31},
564 {170000, 4, 34, 27},
565 {171000, 4, 19, 15},
566 {172000, 4, 51, 40},
567 {172750, 4, 32, 25},
568 {172800, 4, 32, 25},
569 {173000, 4, 41, 32},
570 {174000, 4, 49, 38},
571 {174787, 4, 22, 17},
572 {175000, 4, 35, 27},
573 {176000, 4, 30, 23},
574 {177000, 4, 38, 29},
575 {178000, 4, 29, 22},
576 {178500, 4, 37, 28},
577 {179000, 4, 53, 40},
578 {179500, 4, 73, 55},
579 {180000, 4, 20, 15},
580 {181000, 4, 55, 41},
581 {182000, 4, 31, 23},
582 {183000, 4, 42, 31},
583 {184000, 4, 30, 22},
584 {184750, 4, 26, 19},
585 {185000, 4, 37, 27},
586 {186000, 4, 51, 37},
587 {187000, 4, 36, 26},
588 {188000, 4, 32, 23},
589 {189000, 4, 21, 15},
590 {190000, 4, 38, 27},
591 {190960, 4, 41, 29},
592 {191000, 4, 41, 29},
593 {192000, 4, 27, 19},
594 {192250, 4, 37, 26},
595 {193000, 4, 20, 14},
596 {193250, 4, 53, 37},
597 {194000, 4, 23, 16},
598 {194208, 4, 23, 16},
599 {195000, 4, 26, 18},
600 {196000, 4, 45, 31},
601 {197000, 4, 35, 24},
602 {197750, 4, 41, 28},
603 {198000, 4, 22, 15},
604 {198500, 4, 25, 17},
605 {199000, 4, 28, 19},
606 {200000, 4, 37, 25},
607 {201000, 4, 61, 41},
608 {202000, 4, 112, 75},
609 {202500, 4, 21, 14},
610 {203000, 4, 146, 97},
611 {204000, 4, 62, 41},
612 {204750, 4, 44, 29},
613 {205000, 4, 38, 25},
614 {206000, 4, 29, 19},
615 {207000, 4, 23, 15},
616 {207500, 4, 40, 26},
617 {208000, 4, 37, 24},
618 {208900, 4, 48, 31},
619 {209000, 4, 48, 31},
620 {209250, 4, 31, 20},
621 {210000, 4, 28, 18},
622 {211000, 4, 25, 16},
623 {212000, 4, 22, 14},
624 {213000, 4, 30, 19},
625 {213750, 4, 38, 24},
626 {214000, 4, 46, 29},
627 {214750, 4, 35, 22},
628 {215000, 4, 43, 27},
629 {216000, 4, 24, 15},
630 {217000, 4, 37, 23},
631 {218000, 4, 42, 26},
632 {218250, 4, 42, 26},
633 {218750, 4, 34, 21},
634 {219000, 4, 47, 29},
635 {220000, 4, 44, 27},
636 {220640, 4, 49, 30},
637 {220750, 4, 36, 22},
638 {221000, 4, 36, 22},
639 {222000, 4, 23, 14},
640 {222525, 4, 28, 17},
641 {222750, 4, 33, 20},
642 {227000, 4, 37, 22},
643 {230250, 4, 29, 17},
644 {233500, 4, 38, 22},
645 {235000, 4, 40, 23},
646 {238000, 4, 30, 17},
647 {241500, 2, 17, 19},
648 {245250, 2, 20, 22},
649 {247750, 2, 22, 24},
650 {253250, 2, 15, 16},
651 {256250, 2, 18, 19},
652 {262500, 2, 31, 32},
653 {267250, 2, 66, 67},
654 {268500, 2, 94, 95},
655 {270000, 2, 14, 14},
656 {272500, 2, 77, 76},
657 {273750, 2, 57, 56},
658 {280750, 2, 24, 23},
659 {281250, 2, 23, 22},
660 {286000, 2, 17, 16},
661 {291750, 2, 26, 24},
662 {296703, 2, 56, 51},
663 {297000, 2, 22, 20},
664 {298000, 2, 21, 19},
665};
666
667static void intel_ddi_mode_set(struct drm_encoder *encoder, 284static void intel_ddi_mode_set(struct drm_encoder *encoder,
668 struct drm_display_mode *mode, 285 struct drm_display_mode *mode,
669 struct drm_display_mode *adjusted_mode) 286 struct drm_display_mode *adjusted_mode)
@@ -675,7 +292,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
675 int pipe = intel_crtc->pipe; 292 int pipe = intel_crtc->pipe;
676 int type = intel_encoder->type; 293 int type = intel_encoder->type;
677 294
678 DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n", 295 DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
679 port_name(port), pipe_name(pipe)); 296 port_name(port), pipe_name(pipe));
680 297
681 intel_crtc->eld_vld = false; 298 intel_crtc->eld_vld = false;
@@ -686,22 +303,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
686 303
687 intel_dp->DP = intel_dig_port->port_reversal | 304 intel_dp->DP = intel_dig_port->port_reversal |
688 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; 305 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
689 switch (intel_dp->lane_count) { 306 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
690 case 1:
691 intel_dp->DP |= DDI_PORT_WIDTH_X1;
692 break;
693 case 2:
694 intel_dp->DP |= DDI_PORT_WIDTH_X2;
695 break;
696 case 4:
697 intel_dp->DP |= DDI_PORT_WIDTH_X4;
698 break;
699 default:
700 intel_dp->DP |= DDI_PORT_WIDTH_X4;
701 WARN(1, "Unexpected DP lane count %d\n",
702 intel_dp->lane_count);
703 break;
704 }
705 307
706 if (intel_dp->has_audio) { 308 if (intel_dp->has_audio) {
707 DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n", 309 DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
@@ -748,8 +350,8 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
748 } 350 }
749 351
750 if (num_encoders != 1) 352 if (num_encoders != 1)
751 WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders, 353 WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders,
752 intel_crtc->pipe); 354 pipe_name(intel_crtc->pipe));
753 355
754 BUG_ON(ret == NULL); 356 BUG_ON(ret == NULL);
755 return ret; 357 return ret;
@@ -802,30 +404,227 @@ void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
802 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE; 404 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
803} 405}
804 406
805static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2) 407#define LC_FREQ 2700
408#define LC_FREQ_2K (LC_FREQ * 2000)
409
410#define P_MIN 2
411#define P_MAX 64
412#define P_INC 2
413
414/* Constraints for PLL good behavior */
415#define REF_MIN 48
416#define REF_MAX 400
417#define VCO_MIN 2400
418#define VCO_MAX 4800
419
420#define ABS_DIFF(a, b) ((a > b) ? (a - b) : (b - a))
421
422struct wrpll_rnp {
423 unsigned p, n2, r2;
424};
425
426static unsigned wrpll_get_budget_for_freq(int clock)
427{
428 unsigned budget;
429
430 switch (clock) {
431 case 25175000:
432 case 25200000:
433 case 27000000:
434 case 27027000:
435 case 37762500:
436 case 37800000:
437 case 40500000:
438 case 40541000:
439 case 54000000:
440 case 54054000:
441 case 59341000:
442 case 59400000:
443 case 72000000:
444 case 74176000:
445 case 74250000:
446 case 81000000:
447 case 81081000:
448 case 89012000:
449 case 89100000:
450 case 108000000:
451 case 108108000:
452 case 111264000:
453 case 111375000:
454 case 148352000:
455 case 148500000:
456 case 162000000:
457 case 162162000:
458 case 222525000:
459 case 222750000:
460 case 296703000:
461 case 297000000:
462 budget = 0;
463 break;
464 case 233500000:
465 case 245250000:
466 case 247750000:
467 case 253250000:
468 case 298000000:
469 budget = 1500;
470 break;
471 case 169128000:
472 case 169500000:
473 case 179500000:
474 case 202000000:
475 budget = 2000;
476 break;
477 case 256250000:
478 case 262500000:
479 case 270000000:
480 case 272500000:
481 case 273750000:
482 case 280750000:
483 case 281250000:
484 case 286000000:
485 case 291750000:
486 budget = 4000;
487 break;
488 case 267250000:
489 case 268500000:
490 budget = 5000;
491 break;
492 default:
493 budget = 1000;
494 break;
495 }
496
497 return budget;
498}
499
500static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
501 unsigned r2, unsigned n2, unsigned p,
502 struct wrpll_rnp *best)
806{ 503{
807 u32 i; 504 uint64_t a, b, c, d, diff, diff_best;
808 505
809 for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) 506 /* No best (r,n,p) yet */
810 if (clock <= wrpll_tmds_clock_table[i].clock) 507 if (best->p == 0) {
811 break; 508 best->p = p;
509 best->n2 = n2;
510 best->r2 = r2;
511 return;
512 }
513
514 /*
515 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
516 * freq2k.
517 *
518 * delta = 1e6 *
519 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
520 * freq2k;
521 *
522 * and we would like delta <= budget.
523 *
524 * If the discrepancy is above the PPM-based budget, always prefer to
525 * improve upon the previous solution. However, if you're within the
526 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
527 */
528 a = freq2k * budget * p * r2;
529 b = freq2k * budget * best->p * best->r2;
530 diff = ABS_DIFF((freq2k * p * r2), (LC_FREQ_2K * n2));
531 diff_best = ABS_DIFF((freq2k * best->p * best->r2),
532 (LC_FREQ_2K * best->n2));
533 c = 1000000 * diff;
534 d = 1000000 * diff_best;
535
536 if (a < c && b < d) {
537 /* If both are above the budget, pick the closer */
538 if (best->p * best->r2 * diff < p * r2 * diff_best) {
539 best->p = p;
540 best->n2 = n2;
541 best->r2 = r2;
542 }
543 } else if (a >= c && b < d) {
544 /* If A is below the threshold but B is above it? Update. */
545 best->p = p;
546 best->n2 = n2;
547 best->r2 = r2;
548 } else if (a >= c && b >= d) {
549 /* Both are below the limit, so pick the higher n2/(r2*r2) */
550 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
551 best->p = p;
552 best->n2 = n2;
553 best->r2 = r2;
554 }
555 }
556 /* Otherwise a < c && b >= d, do nothing */
557}
558
559static void
560intel_ddi_calculate_wrpll(int clock /* in Hz */,
561 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
562{
563 uint64_t freq2k;
564 unsigned p, n2, r2;
565 struct wrpll_rnp best = { 0, 0, 0 };
566 unsigned budget;
812 567
813 if (i == ARRAY_SIZE(wrpll_tmds_clock_table)) 568 freq2k = clock / 100;
814 i--;
815 569
816 *p = wrpll_tmds_clock_table[i].p; 570 budget = wrpll_get_budget_for_freq(clock);
817 *n2 = wrpll_tmds_clock_table[i].n2;
818 *r2 = wrpll_tmds_clock_table[i].r2;
819 571
820 if (wrpll_tmds_clock_table[i].clock != clock) 572 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
821 DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n", 573 * and directly pass the LC PLL to it. */
822 wrpll_tmds_clock_table[i].clock, clock); 574 if (freq2k == 5400000) {
575 *n2_out = 2;
576 *p_out = 1;
577 *r2_out = 2;
578 return;
579 }
580
581 /*
582 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
583 * the WR PLL.
584 *
585 * We want R so that REF_MIN <= Ref <= REF_MAX.
586 * Injecting R2 = 2 * R gives:
587 * REF_MAX * r2 > LC_FREQ * 2 and
588 * REF_MIN * r2 < LC_FREQ * 2
589 *
590 * Which means the desired boundaries for r2 are:
591 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
592 *
593 */
594 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
595 r2 <= LC_FREQ * 2 / REF_MIN;
596 r2++) {
597
598 /*
599 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
600 *
601 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
602 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
603 * VCO_MAX * r2 > n2 * LC_FREQ and
604 * VCO_MIN * r2 < n2 * LC_FREQ)
605 *
606 * Which means the desired boundaries for n2 are:
607 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
608 */
609 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
610 n2 <= VCO_MAX * r2 / LC_FREQ;
611 n2++) {
823 612
824 DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", 613 for (p = P_MIN; p <= P_MAX; p += P_INC)
825 clock, *p, *n2, *r2); 614 wrpll_update_rnp(freq2k, budget,
615 r2, n2, p, &best);
616 }
617 }
618
619 *n2_out = best.n2;
620 *p_out = best.p;
621 *r2_out = best.r2;
622
623 DRM_DEBUG_KMS("WRPLL: %dHz refresh rate with p=%d, n2=%d r2=%d\n",
624 clock, *p_out, *n2_out, *r2_out);
826} 625}
827 626
828bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock) 627bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
829{ 628{
830 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 629 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
831 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 630 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
@@ -835,6 +634,7 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
835 int type = intel_encoder->type; 634 int type = intel_encoder->type;
836 enum pipe pipe = intel_crtc->pipe; 635 enum pipe pipe = intel_crtc->pipe;
837 uint32_t reg, val; 636 uint32_t reg, val;
637 int clock = intel_crtc->config.port_clock;
838 638
839 /* TODO: reuse PLLs when possible (compare values) */ 639 /* TODO: reuse PLLs when possible (compare values) */
840 640
@@ -863,7 +663,7 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
863 return true; 663 return true;
864 664
865 } else if (type == INTEL_OUTPUT_HDMI) { 665 } else if (type == INTEL_OUTPUT_HDMI) {
866 int p, n2, r2; 666 unsigned p, n2, r2;
867 667
868 if (plls->wrpll1_refcount == 0) { 668 if (plls->wrpll1_refcount == 0) {
869 DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n", 669 DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
@@ -885,7 +685,7 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
885 WARN(I915_READ(reg) & WRPLL_PLL_ENABLE, 685 WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
886 "WRPLL already enabled\n"); 686 "WRPLL already enabled\n");
887 687
888 intel_ddi_calculate_wrpll(clock, &p, &n2, &r2); 688 intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
889 689
890 val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | 690 val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
891 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | 691 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
@@ -995,7 +795,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
995 /* Can only use the always-on power well for eDP when 795 /* Can only use the always-on power well for eDP when
996 * not using the panel fitter, and when not using motion 796 * not using the panel fitter, and when not using motion
997 * blur mitigation (which we don't support). */ 797 * blur mitigation (which we don't support). */
998 if (dev_priv->pch_pf_size) 798 if (intel_crtc->config.pch_pfit.size)
999 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; 799 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
1000 else 800 else
1001 temp |= TRANS_DDI_EDP_INPUT_A_ON; 801 temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1022,7 +822,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
1022 822
1023 } else if (type == INTEL_OUTPUT_ANALOG) { 823 } else if (type == INTEL_OUTPUT_ANALOG) {
1024 temp |= TRANS_DDI_MODE_SELECT_FDI; 824 temp |= TRANS_DDI_MODE_SELECT_FDI;
1025 temp |= (intel_crtc->fdi_lanes - 1) << 1; 825 temp |= (intel_crtc->config.fdi_lanes - 1) << 1;
1026 826
1027 } else if (type == INTEL_OUTPUT_DISPLAYPORT || 827 } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
1028 type == INTEL_OUTPUT_EDP) { 828 type == INTEL_OUTPUT_EDP) {
@@ -1030,25 +830,10 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
1030 830
1031 temp |= TRANS_DDI_MODE_SELECT_DP_SST; 831 temp |= TRANS_DDI_MODE_SELECT_DP_SST;
1032 832
1033 switch (intel_dp->lane_count) { 833 temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
1034 case 1:
1035 temp |= TRANS_DDI_PORT_WIDTH_X1;
1036 break;
1037 case 2:
1038 temp |= TRANS_DDI_PORT_WIDTH_X2;
1039 break;
1040 case 4:
1041 temp |= TRANS_DDI_PORT_WIDTH_X4;
1042 break;
1043 default:
1044 temp |= TRANS_DDI_PORT_WIDTH_X4;
1045 WARN(1, "Unsupported lane count %d\n",
1046 intel_dp->lane_count);
1047 }
1048
1049 } else { 834 } else {
1050 WARN(1, "Invalid encoder type %d for pipe %d\n", 835 WARN(1, "Invalid encoder type %d for pipe %c\n",
1051 intel_encoder->type, pipe); 836 intel_encoder->type, pipe_name(pipe));
1052 } 837 }
1053 838
1054 I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); 839 I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
@@ -1148,7 +933,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1148 } 933 }
1149 } 934 }
1150 935
1151 DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port); 936 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
1152 937
1153 return false; 938 return false;
1154} 939}
@@ -1334,7 +1119,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1334 ironlake_edp_backlight_on(intel_dp); 1119 ironlake_edp_backlight_on(intel_dp);
1335 } 1120 }
1336 1121
1337 if (intel_crtc->eld_vld) { 1122 if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
1338 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 1123 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
1339 tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4)); 1124 tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
1340 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); 1125 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
@@ -1352,9 +1137,12 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1352 struct drm_i915_private *dev_priv = dev->dev_private; 1137 struct drm_i915_private *dev_priv = dev->dev_private;
1353 uint32_t tmp; 1138 uint32_t tmp;
1354 1139
1355 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 1140 if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
1356 tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4)); 1141 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
1357 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); 1142 tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
1143 (pipe * 4));
1144 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
1145 }
1358 1146
1359 if (type == INTEL_OUTPUT_EDP) { 1147 if (type == INTEL_OUTPUT_EDP) {
1360 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1148 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1366,14 +1154,14 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1366int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) 1154int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1367{ 1155{
1368 if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) 1156 if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
1369 return 450; 1157 return 450000;
1370 else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) == 1158 else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
1371 LCPLL_CLK_FREQ_450) 1159 LCPLL_CLK_FREQ_450)
1372 return 450; 1160 return 450000;
1373 else if (IS_ULT(dev_priv->dev)) 1161 else if (IS_ULT(dev_priv->dev))
1374 return 338; 1162 return 337500;
1375 else 1163 else
1376 return 540; 1164 return 540000;
1377} 1165}
1378 1166
1379void intel_ddi_pll_init(struct drm_device *dev) 1167void intel_ddi_pll_init(struct drm_device *dev)
@@ -1386,7 +1174,7 @@ void intel_ddi_pll_init(struct drm_device *dev)
1386 * Don't even try to turn it on. 1174 * Don't even try to turn it on.
1387 */ 1175 */
1388 1176
1389 DRM_DEBUG_KMS("CDCLK running at %dMHz\n", 1177 DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
1390 intel_ddi_get_cdclk_freq(dev_priv)); 1178 intel_ddi_get_cdclk_freq(dev_priv));
1391 1179
1392 if (val & LCPLL_CD_SOURCE_FCLK) 1180 if (val & LCPLL_CD_SOURCE_FCLK)
@@ -1472,6 +1260,27 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
1472 intel_dp_check_link_status(intel_dp); 1260 intel_dp_check_link_status(intel_dp);
1473} 1261}
1474 1262
1263static void intel_ddi_get_config(struct intel_encoder *encoder,
1264 struct intel_crtc_config *pipe_config)
1265{
1266 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1267 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1268 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
1269 u32 temp, flags = 0;
1270
1271 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1272 if (temp & TRANS_DDI_PHSYNC)
1273 flags |= DRM_MODE_FLAG_PHSYNC;
1274 else
1275 flags |= DRM_MODE_FLAG_NHSYNC;
1276 if (temp & TRANS_DDI_PVSYNC)
1277 flags |= DRM_MODE_FLAG_PVSYNC;
1278 else
1279 flags |= DRM_MODE_FLAG_NVSYNC;
1280
1281 pipe_config->adjusted_mode.flags |= flags;
1282}
1283
1475static void intel_ddi_destroy(struct drm_encoder *encoder) 1284static void intel_ddi_destroy(struct drm_encoder *encoder)
1476{ 1285{
1477 /* HDMI has nothing special to destroy, so we can go with this. */ 1286 /* HDMI has nothing special to destroy, so we can go with this. */
@@ -1482,9 +1291,13 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
1482 struct intel_crtc_config *pipe_config) 1291 struct intel_crtc_config *pipe_config)
1483{ 1292{
1484 int type = encoder->type; 1293 int type = encoder->type;
1294 int port = intel_ddi_get_encoder_port(encoder);
1485 1295
1486 WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n"); 1296 WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
1487 1297
1298 if (port == PORT_A)
1299 pipe_config->cpu_transcoder = TRANSCODER_EDP;
1300
1488 if (type == INTEL_OUTPUT_HDMI) 1301 if (type == INTEL_OUTPUT_HDMI)
1489 return intel_hdmi_compute_config(encoder, pipe_config); 1302 return intel_hdmi_compute_config(encoder, pipe_config);
1490 else 1303 else
@@ -1518,16 +1331,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1518 return; 1331 return;
1519 } 1332 }
1520 1333
1521 if (port != PORT_A) {
1522 hdmi_connector = kzalloc(sizeof(struct intel_connector),
1523 GFP_KERNEL);
1524 if (!hdmi_connector) {
1525 kfree(dp_connector);
1526 kfree(intel_dig_port);
1527 return;
1528 }
1529 }
1530
1531 intel_encoder = &intel_dig_port->base; 1334 intel_encoder = &intel_dig_port->base;
1532 encoder = &intel_encoder->base; 1335 encoder = &intel_encoder->base;
1533 1336
@@ -1541,12 +1344,11 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1541 intel_encoder->disable = intel_disable_ddi; 1344 intel_encoder->disable = intel_disable_ddi;
1542 intel_encoder->post_disable = intel_ddi_post_disable; 1345 intel_encoder->post_disable = intel_ddi_post_disable;
1543 intel_encoder->get_hw_state = intel_ddi_get_hw_state; 1346 intel_encoder->get_hw_state = intel_ddi_get_hw_state;
1347 intel_encoder->get_config = intel_ddi_get_config;
1544 1348
1545 intel_dig_port->port = port; 1349 intel_dig_port->port = port;
1546 intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & 1350 intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
1547 DDI_BUF_PORT_REVERSAL; 1351 DDI_BUF_PORT_REVERSAL;
1548 if (hdmi_connector)
1549 intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
1550 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); 1352 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
1551 1353
1552 intel_encoder->type = INTEL_OUTPUT_UNKNOWN; 1354 intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
@@ -1554,7 +1356,21 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1554 intel_encoder->cloneable = false; 1356 intel_encoder->cloneable = false;
1555 intel_encoder->hot_plug = intel_ddi_hot_plug; 1357 intel_encoder->hot_plug = intel_ddi_hot_plug;
1556 1358
1557 if (hdmi_connector) 1359 if (!intel_dp_init_connector(intel_dig_port, dp_connector)) {
1360 drm_encoder_cleanup(encoder);
1361 kfree(intel_dig_port);
1362 kfree(dp_connector);
1363 return;
1364 }
1365
1366 if (intel_encoder->type != INTEL_OUTPUT_EDP) {
1367 hdmi_connector = kzalloc(sizeof(struct intel_connector),
1368 GFP_KERNEL);
1369 if (!hdmi_connector) {
1370 return;
1371 }
1372
1373 intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
1558 intel_hdmi_init_connector(intel_dig_port, hdmi_connector); 1374 intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
1559 intel_dp_init_connector(intel_dig_port, dp_connector); 1375 }
1560} 1376}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 56746dcac40f..85f3eb74d2b7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -46,18 +46,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc);
46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
47 47
48typedef struct { 48typedef struct {
49 /* given values */
50 int n;
51 int m1, m2;
52 int p1, p2;
53 /* derived values */
54 int dot;
55 int vco;
56 int m;
57 int p;
58} intel_clock_t;
59
60typedef struct {
61 int min, max; 49 int min, max;
62} intel_range_t; 50} intel_range_t;
63 51
@@ -71,24 +59,6 @@ typedef struct intel_limit intel_limit_t;
71struct intel_limit { 59struct intel_limit {
72 intel_range_t dot, vco, n, m, m1, m2, p, p1; 60 intel_range_t dot, vco, n, m, m1, m2, p, p1;
73 intel_p2_t p2; 61 intel_p2_t p2;
74 /**
75 * find_pll() - Find the best values for the PLL
76 * @limit: limits for the PLL
77 * @crtc: current CRTC
78 * @target: target frequency in kHz
79 * @refclk: reference clock frequency in kHz
80 * @match_clock: if provided, @best_clock P divider must
81 * match the P divider from @match_clock
82 * used for LVDS downclocking
83 * @best_clock: best PLL values found
84 *
85 * Returns true on success, false on failure.
86 */
87 bool (*find_pll)(const intel_limit_t *limit,
88 struct drm_crtc *crtc,
89 int target, int refclk,
90 intel_clock_t *match_clock,
91 intel_clock_t *best_clock);
92}; 62};
93 63
94/* FDI */ 64/* FDI */
@@ -104,29 +74,6 @@ intel_pch_rawclk(struct drm_device *dev)
104 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; 74 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
105} 75}
106 76
107static bool
108intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
109 int target, int refclk, intel_clock_t *match_clock,
110 intel_clock_t *best_clock);
111static bool
112intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
113 int target, int refclk, intel_clock_t *match_clock,
114 intel_clock_t *best_clock);
115
116static bool
117intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
118 int target, int refclk, intel_clock_t *match_clock,
119 intel_clock_t *best_clock);
120static bool
121intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
122 int target, int refclk, intel_clock_t *match_clock,
123 intel_clock_t *best_clock);
124
125static bool
126intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
127 int target, int refclk, intel_clock_t *match_clock,
128 intel_clock_t *best_clock);
129
130static inline u32 /* units of 100MHz */ 77static inline u32 /* units of 100MHz */
131intel_fdi_link_freq(struct drm_device *dev) 78intel_fdi_link_freq(struct drm_device *dev)
132{ 79{
@@ -148,7 +95,6 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
148 .p1 = { .min = 2, .max = 33 }, 95 .p1 = { .min = 2, .max = 33 },
149 .p2 = { .dot_limit = 165000, 96 .p2 = { .dot_limit = 165000,
150 .p2_slow = 4, .p2_fast = 2 }, 97 .p2_slow = 4, .p2_fast = 2 },
151 .find_pll = intel_find_best_PLL,
152}; 98};
153 99
154static const intel_limit_t intel_limits_i8xx_lvds = { 100static const intel_limit_t intel_limits_i8xx_lvds = {
@@ -162,7 +108,6 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
162 .p1 = { .min = 1, .max = 6 }, 108 .p1 = { .min = 1, .max = 6 },
163 .p2 = { .dot_limit = 165000, 109 .p2 = { .dot_limit = 165000,
164 .p2_slow = 14, .p2_fast = 7 }, 110 .p2_slow = 14, .p2_fast = 7 },
165 .find_pll = intel_find_best_PLL,
166}; 111};
167 112
168static const intel_limit_t intel_limits_i9xx_sdvo = { 113static const intel_limit_t intel_limits_i9xx_sdvo = {
@@ -176,7 +121,6 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
176 .p1 = { .min = 1, .max = 8 }, 121 .p1 = { .min = 1, .max = 8 },
177 .p2 = { .dot_limit = 200000, 122 .p2 = { .dot_limit = 200000,
178 .p2_slow = 10, .p2_fast = 5 }, 123 .p2_slow = 10, .p2_fast = 5 },
179 .find_pll = intel_find_best_PLL,
180}; 124};
181 125
182static const intel_limit_t intel_limits_i9xx_lvds = { 126static const intel_limit_t intel_limits_i9xx_lvds = {
@@ -190,7 +134,6 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
190 .p1 = { .min = 1, .max = 8 }, 134 .p1 = { .min = 1, .max = 8 },
191 .p2 = { .dot_limit = 112000, 135 .p2 = { .dot_limit = 112000,
192 .p2_slow = 14, .p2_fast = 7 }, 136 .p2_slow = 14, .p2_fast = 7 },
193 .find_pll = intel_find_best_PLL,
194}; 137};
195 138
196 139
@@ -207,7 +150,6 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
207 .p2_slow = 10, 150 .p2_slow = 10,
208 .p2_fast = 10 151 .p2_fast = 10
209 }, 152 },
210 .find_pll = intel_g4x_find_best_PLL,
211}; 153};
212 154
213static const intel_limit_t intel_limits_g4x_hdmi = { 155static const intel_limit_t intel_limits_g4x_hdmi = {
@@ -221,7 +163,6 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
221 .p1 = { .min = 1, .max = 8}, 163 .p1 = { .min = 1, .max = 8},
222 .p2 = { .dot_limit = 165000, 164 .p2 = { .dot_limit = 165000,
223 .p2_slow = 10, .p2_fast = 5 }, 165 .p2_slow = 10, .p2_fast = 5 },
224 .find_pll = intel_g4x_find_best_PLL,
225}; 166};
226 167
227static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 168static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
@@ -236,7 +177,6 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
236 .p2 = { .dot_limit = 0, 177 .p2 = { .dot_limit = 0,
237 .p2_slow = 14, .p2_fast = 14 178 .p2_slow = 14, .p2_fast = 14
238 }, 179 },
239 .find_pll = intel_g4x_find_best_PLL,
240}; 180};
241 181
242static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 182static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
@@ -251,21 +191,6 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
251 .p2 = { .dot_limit = 0, 191 .p2 = { .dot_limit = 0,
252 .p2_slow = 7, .p2_fast = 7 192 .p2_slow = 7, .p2_fast = 7
253 }, 193 },
254 .find_pll = intel_g4x_find_best_PLL,
255};
256
257static const intel_limit_t intel_limits_g4x_display_port = {
258 .dot = { .min = 161670, .max = 227000 },
259 .vco = { .min = 1750000, .max = 3500000},
260 .n = { .min = 1, .max = 2 },
261 .m = { .min = 97, .max = 108 },
262 .m1 = { .min = 0x10, .max = 0x12 },
263 .m2 = { .min = 0x05, .max = 0x06 },
264 .p = { .min = 10, .max = 20 },
265 .p1 = { .min = 1, .max = 2},
266 .p2 = { .dot_limit = 0,
267 .p2_slow = 10, .p2_fast = 10 },
268 .find_pll = intel_find_pll_g4x_dp,
269}; 194};
270 195
271static const intel_limit_t intel_limits_pineview_sdvo = { 196static const intel_limit_t intel_limits_pineview_sdvo = {
@@ -281,7 +206,6 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
281 .p1 = { .min = 1, .max = 8 }, 206 .p1 = { .min = 1, .max = 8 },
282 .p2 = { .dot_limit = 200000, 207 .p2 = { .dot_limit = 200000,
283 .p2_slow = 10, .p2_fast = 5 }, 208 .p2_slow = 10, .p2_fast = 5 },
284 .find_pll = intel_find_best_PLL,
285}; 209};
286 210
287static const intel_limit_t intel_limits_pineview_lvds = { 211static const intel_limit_t intel_limits_pineview_lvds = {
@@ -295,7 +219,6 @@ static const intel_limit_t intel_limits_pineview_lvds = {
295 .p1 = { .min = 1, .max = 8 }, 219 .p1 = { .min = 1, .max = 8 },
296 .p2 = { .dot_limit = 112000, 220 .p2 = { .dot_limit = 112000,
297 .p2_slow = 14, .p2_fast = 14 }, 221 .p2_slow = 14, .p2_fast = 14 },
298 .find_pll = intel_find_best_PLL,
299}; 222};
300 223
301/* Ironlake / Sandybridge 224/* Ironlake / Sandybridge
@@ -314,7 +237,6 @@ static const intel_limit_t intel_limits_ironlake_dac = {
314 .p1 = { .min = 1, .max = 8 }, 237 .p1 = { .min = 1, .max = 8 },
315 .p2 = { .dot_limit = 225000, 238 .p2 = { .dot_limit = 225000,
316 .p2_slow = 10, .p2_fast = 5 }, 239 .p2_slow = 10, .p2_fast = 5 },
317 .find_pll = intel_g4x_find_best_PLL,
318}; 240};
319 241
320static const intel_limit_t intel_limits_ironlake_single_lvds = { 242static const intel_limit_t intel_limits_ironlake_single_lvds = {
@@ -328,7 +250,6 @@ static const intel_limit_t intel_limits_ironlake_single_lvds = {
328 .p1 = { .min = 2, .max = 8 }, 250 .p1 = { .min = 2, .max = 8 },
329 .p2 = { .dot_limit = 225000, 251 .p2 = { .dot_limit = 225000,
330 .p2_slow = 14, .p2_fast = 14 }, 252 .p2_slow = 14, .p2_fast = 14 },
331 .find_pll = intel_g4x_find_best_PLL,
332}; 253};
333 254
334static const intel_limit_t intel_limits_ironlake_dual_lvds = { 255static const intel_limit_t intel_limits_ironlake_dual_lvds = {
@@ -342,7 +263,6 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds = {
342 .p1 = { .min = 2, .max = 8 }, 263 .p1 = { .min = 2, .max = 8 },
343 .p2 = { .dot_limit = 225000, 264 .p2 = { .dot_limit = 225000,
344 .p2_slow = 7, .p2_fast = 7 }, 265 .p2_slow = 7, .p2_fast = 7 },
345 .find_pll = intel_g4x_find_best_PLL,
346}; 266};
347 267
348/* LVDS 100mhz refclk limits. */ 268/* LVDS 100mhz refclk limits. */
@@ -357,7 +277,6 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
357 .p1 = { .min = 2, .max = 8 }, 277 .p1 = { .min = 2, .max = 8 },
358 .p2 = { .dot_limit = 225000, 278 .p2 = { .dot_limit = 225000,
359 .p2_slow = 14, .p2_fast = 14 }, 279 .p2_slow = 14, .p2_fast = 14 },
360 .find_pll = intel_g4x_find_best_PLL,
361}; 280};
362 281
363static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 282static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
@@ -371,21 +290,6 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
371 .p1 = { .min = 2, .max = 6 }, 290 .p1 = { .min = 2, .max = 6 },
372 .p2 = { .dot_limit = 225000, 291 .p2 = { .dot_limit = 225000,
373 .p2_slow = 7, .p2_fast = 7 }, 292 .p2_slow = 7, .p2_fast = 7 },
374 .find_pll = intel_g4x_find_best_PLL,
375};
376
377static const intel_limit_t intel_limits_ironlake_display_port = {
378 .dot = { .min = 25000, .max = 350000 },
379 .vco = { .min = 1760000, .max = 3510000},
380 .n = { .min = 1, .max = 2 },
381 .m = { .min = 81, .max = 90 },
382 .m1 = { .min = 12, .max = 22 },
383 .m2 = { .min = 5, .max = 9 },
384 .p = { .min = 10, .max = 20 },
385 .p1 = { .min = 1, .max = 2},
386 .p2 = { .dot_limit = 0,
387 .p2_slow = 10, .p2_fast = 10 },
388 .find_pll = intel_find_pll_ironlake_dp,
389}; 293};
390 294
391static const intel_limit_t intel_limits_vlv_dac = { 295static const intel_limit_t intel_limits_vlv_dac = {
@@ -396,15 +300,14 @@ static const intel_limit_t intel_limits_vlv_dac = {
396 .m1 = { .min = 2, .max = 3 }, 300 .m1 = { .min = 2, .max = 3 },
397 .m2 = { .min = 11, .max = 156 }, 301 .m2 = { .min = 11, .max = 156 },
398 .p = { .min = 10, .max = 30 }, 302 .p = { .min = 10, .max = 30 },
399 .p1 = { .min = 2, .max = 3 }, 303 .p1 = { .min = 1, .max = 3 },
400 .p2 = { .dot_limit = 270000, 304 .p2 = { .dot_limit = 270000,
401 .p2_slow = 2, .p2_fast = 20 }, 305 .p2_slow = 2, .p2_fast = 20 },
402 .find_pll = intel_vlv_find_best_pll,
403}; 306};
404 307
405static const intel_limit_t intel_limits_vlv_hdmi = { 308static const intel_limit_t intel_limits_vlv_hdmi = {
406 .dot = { .min = 20000, .max = 165000 }, 309 .dot = { .min = 25000, .max = 270000 },
407 .vco = { .min = 4000000, .max = 5994000}, 310 .vco = { .min = 4000000, .max = 6000000 },
408 .n = { .min = 1, .max = 7 }, 311 .n = { .min = 1, .max = 7 },
409 .m = { .min = 60, .max = 300 }, /* guess */ 312 .m = { .min = 60, .max = 300 }, /* guess */
410 .m1 = { .min = 2, .max = 3 }, 313 .m1 = { .min = 2, .max = 3 },
@@ -413,7 +316,6 @@ static const intel_limit_t intel_limits_vlv_hdmi = {
413 .p1 = { .min = 2, .max = 3 }, 316 .p1 = { .min = 2, .max = 3 },
414 .p2 = { .dot_limit = 270000, 317 .p2 = { .dot_limit = 270000,
415 .p2_slow = 2, .p2_fast = 20 }, 318 .p2_slow = 2, .p2_fast = 20 },
416 .find_pll = intel_vlv_find_best_pll,
417}; 319};
418 320
419static const intel_limit_t intel_limits_vlv_dp = { 321static const intel_limit_t intel_limits_vlv_dp = {
@@ -424,61 +326,11 @@ static const intel_limit_t intel_limits_vlv_dp = {
424 .m1 = { .min = 2, .max = 3 }, 326 .m1 = { .min = 2, .max = 3 },
425 .m2 = { .min = 11, .max = 156 }, 327 .m2 = { .min = 11, .max = 156 },
426 .p = { .min = 10, .max = 30 }, 328 .p = { .min = 10, .max = 30 },
427 .p1 = { .min = 2, .max = 3 }, 329 .p1 = { .min = 1, .max = 3 },
428 .p2 = { .dot_limit = 270000, 330 .p2 = { .dot_limit = 270000,
429 .p2_slow = 2, .p2_fast = 20 }, 331 .p2_slow = 2, .p2_fast = 20 },
430 .find_pll = intel_vlv_find_best_pll,
431}; 332};
432 333
433u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
434{
435 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
436
437 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
438 DRM_ERROR("DPIO idle wait timed out\n");
439 return 0;
440 }
441
442 I915_WRITE(DPIO_REG, reg);
443 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
444 DPIO_BYTE);
445 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
446 DRM_ERROR("DPIO read wait timed out\n");
447 return 0;
448 }
449
450 return I915_READ(DPIO_DATA);
451}
452
453static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
454 u32 val)
455{
456 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
457
458 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
459 DRM_ERROR("DPIO idle wait timed out\n");
460 return;
461 }
462
463 I915_WRITE(DPIO_DATA, val);
464 I915_WRITE(DPIO_REG, reg);
465 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
466 DPIO_BYTE);
467 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
468 DRM_ERROR("DPIO write wait timed out\n");
469}
470
471static void vlv_init_dpio(struct drm_device *dev)
472{
473 struct drm_i915_private *dev_priv = dev->dev_private;
474
475 /* Reset the DPIO config */
476 I915_WRITE(DPIO_CTL, 0);
477 POSTING_READ(DPIO_CTL);
478 I915_WRITE(DPIO_CTL, 1);
479 POSTING_READ(DPIO_CTL);
480}
481
482static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 334static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
483 int refclk) 335 int refclk)
484{ 336{
@@ -497,10 +349,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
497 else 349 else
498 limit = &intel_limits_ironlake_single_lvds; 350 limit = &intel_limits_ironlake_single_lvds;
499 } 351 }
500 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 352 } else
501 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
502 limit = &intel_limits_ironlake_display_port;
503 else
504 limit = &intel_limits_ironlake_dac; 353 limit = &intel_limits_ironlake_dac;
505 354
506 return limit; 355 return limit;
@@ -521,8 +370,6 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
521 limit = &intel_limits_g4x_hdmi; 370 limit = &intel_limits_g4x_hdmi;
522 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { 371 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
523 limit = &intel_limits_g4x_sdvo; 372 limit = &intel_limits_g4x_sdvo;
524 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
525 limit = &intel_limits_g4x_display_port;
526 } else /* The option is for other outputs */ 373 } else /* The option is for other outputs */
527 limit = &intel_limits_i9xx_sdvo; 374 limit = &intel_limits_i9xx_sdvo;
528 375
@@ -573,13 +420,14 @@ static void pineview_clock(int refclk, intel_clock_t *clock)
573 clock->dot = clock->vco / clock->p; 420 clock->dot = clock->vco / clock->p;
574} 421}
575 422
576static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) 423static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
577{ 424{
578 if (IS_PINEVIEW(dev)) { 425 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
579 pineview_clock(refclk, clock); 426}
580 return; 427
581 } 428static void i9xx_clock(int refclk, intel_clock_t *clock)
582 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 429{
430 clock->m = i9xx_dpll_compute_m(clock);
583 clock->p = clock->p1 * clock->p2; 431 clock->p = clock->p1 * clock->p2;
584 clock->vco = refclk * clock->m / (clock->n + 2); 432 clock->vco = refclk * clock->m / (clock->n + 2);
585 clock->dot = clock->vco / clock->p; 433 clock->dot = clock->vco / clock->p;
@@ -636,10 +484,9 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
636} 484}
637 485
638static bool 486static bool
639intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 487i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
640 int target, int refclk, intel_clock_t *match_clock, 488 int target, int refclk, intel_clock_t *match_clock,
641 intel_clock_t *best_clock) 489 intel_clock_t *best_clock)
642
643{ 490{
644 struct drm_device *dev = crtc->dev; 491 struct drm_device *dev = crtc->dev;
645 intel_clock_t clock; 492 intel_clock_t clock;
@@ -668,8 +515,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
668 clock.m1++) { 515 clock.m1++) {
669 for (clock.m2 = limit->m2.min; 516 for (clock.m2 = limit->m2.min;
670 clock.m2 <= limit->m2.max; clock.m2++) { 517 clock.m2 <= limit->m2.max; clock.m2++) {
671 /* m1 is always 0 in Pineview */ 518 if (clock.m2 >= clock.m1)
672 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
673 break; 519 break;
674 for (clock.n = limit->n.min; 520 for (clock.n = limit->n.min;
675 clock.n <= limit->n.max; clock.n++) { 521 clock.n <= limit->n.max; clock.n++) {
@@ -677,7 +523,66 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
677 clock.p1 <= limit->p1.max; clock.p1++) { 523 clock.p1 <= limit->p1.max; clock.p1++) {
678 int this_err; 524 int this_err;
679 525
680 intel_clock(dev, refclk, &clock); 526 i9xx_clock(refclk, &clock);
527 if (!intel_PLL_is_valid(dev, limit,
528 &clock))
529 continue;
530 if (match_clock &&
531 clock.p != match_clock->p)
532 continue;
533
534 this_err = abs(clock.dot - target);
535 if (this_err < err) {
536 *best_clock = clock;
537 err = this_err;
538 }
539 }
540 }
541 }
542 }
543
544 return (err != target);
545}
546
547static bool
548pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
549 int target, int refclk, intel_clock_t *match_clock,
550 intel_clock_t *best_clock)
551{
552 struct drm_device *dev = crtc->dev;
553 intel_clock_t clock;
554 int err = target;
555
556 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
557 /*
558 * For LVDS just rely on its current settings for dual-channel.
559 * We haven't figured out how to reliably set up different
560 * single/dual channel state, if we even can.
561 */
562 if (intel_is_dual_link_lvds(dev))
563 clock.p2 = limit->p2.p2_fast;
564 else
565 clock.p2 = limit->p2.p2_slow;
566 } else {
567 if (target < limit->p2.dot_limit)
568 clock.p2 = limit->p2.p2_slow;
569 else
570 clock.p2 = limit->p2.p2_fast;
571 }
572
573 memset(best_clock, 0, sizeof(*best_clock));
574
575 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
576 clock.m1++) {
577 for (clock.m2 = limit->m2.min;
578 clock.m2 <= limit->m2.max; clock.m2++) {
579 for (clock.n = limit->n.min;
580 clock.n <= limit->n.max; clock.n++) {
581 for (clock.p1 = limit->p1.min;
582 clock.p1 <= limit->p1.max; clock.p1++) {
583 int this_err;
584
585 pineview_clock(refclk, &clock);
681 if (!intel_PLL_is_valid(dev, limit, 586 if (!intel_PLL_is_valid(dev, limit,
682 &clock)) 587 &clock))
683 continue; 588 continue;
@@ -699,9 +604,9 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
699} 604}
700 605
701static bool 606static bool
702intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 607g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
703 int target, int refclk, intel_clock_t *match_clock, 608 int target, int refclk, intel_clock_t *match_clock,
704 intel_clock_t *best_clock) 609 intel_clock_t *best_clock)
705{ 610{
706 struct drm_device *dev = crtc->dev; 611 struct drm_device *dev = crtc->dev;
707 intel_clock_t clock; 612 intel_clock_t clock;
@@ -712,12 +617,6 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
712 found = false; 617 found = false;
713 618
714 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 619 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
715 int lvds_reg;
716
717 if (HAS_PCH_SPLIT(dev))
718 lvds_reg = PCH_LVDS;
719 else
720 lvds_reg = LVDS;
721 if (intel_is_dual_link_lvds(dev)) 620 if (intel_is_dual_link_lvds(dev))
722 clock.p2 = limit->p2.p2_fast; 621 clock.p2 = limit->p2.p2_fast;
723 else 622 else
@@ -742,13 +641,10 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
742 clock.p1 >= limit->p1.min; clock.p1--) { 641 clock.p1 >= limit->p1.min; clock.p1--) {
743 int this_err; 642 int this_err;
744 643
745 intel_clock(dev, refclk, &clock); 644 i9xx_clock(refclk, &clock);
746 if (!intel_PLL_is_valid(dev, limit, 645 if (!intel_PLL_is_valid(dev, limit,
747 &clock)) 646 &clock))
748 continue; 647 continue;
749 if (match_clock &&
750 clock.p != match_clock->p)
751 continue;
752 648
753 this_err = abs(clock.dot - target); 649 this_err = abs(clock.dot - target);
754 if (this_err < err_most) { 650 if (this_err < err_most) {
@@ -765,62 +661,9 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
765} 661}
766 662
767static bool 663static bool
768intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 664vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
769 int target, int refclk, intel_clock_t *match_clock, 665 int target, int refclk, intel_clock_t *match_clock,
770 intel_clock_t *best_clock) 666 intel_clock_t *best_clock)
771{
772 struct drm_device *dev = crtc->dev;
773 intel_clock_t clock;
774
775 if (target < 200000) {
776 clock.n = 1;
777 clock.p1 = 2;
778 clock.p2 = 10;
779 clock.m1 = 12;
780 clock.m2 = 9;
781 } else {
782 clock.n = 2;
783 clock.p1 = 1;
784 clock.p2 = 10;
785 clock.m1 = 14;
786 clock.m2 = 8;
787 }
788 intel_clock(dev, refclk, &clock);
789 memcpy(best_clock, &clock, sizeof(intel_clock_t));
790 return true;
791}
792
793/* DisplayPort has only two frequencies, 162MHz and 270MHz */
794static bool
795intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
796 int target, int refclk, intel_clock_t *match_clock,
797 intel_clock_t *best_clock)
798{
799 intel_clock_t clock;
800 if (target < 200000) {
801 clock.p1 = 2;
802 clock.p2 = 10;
803 clock.n = 2;
804 clock.m1 = 23;
805 clock.m2 = 8;
806 } else {
807 clock.p1 = 1;
808 clock.p2 = 10;
809 clock.n = 1;
810 clock.m1 = 14;
811 clock.m2 = 2;
812 }
813 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
814 clock.p = (clock.p1 * clock.p2);
815 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
816 clock.vco = 0;
817 memcpy(best_clock, &clock, sizeof(intel_clock_t));
818 return true;
819}
820static bool
821intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
822 int target, int refclk, intel_clock_t *match_clock,
823 intel_clock_t *best_clock)
824{ 667{
825 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; 668 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
826 u32 m, n, fastclk; 669 u32 m, n, fastclk;
@@ -1066,14 +909,24 @@ static void assert_pll(struct drm_i915_private *dev_priv,
1066#define assert_pll_enabled(d, p) assert_pll(d, p, true) 909#define assert_pll_enabled(d, p) assert_pll(d, p, true)
1067#define assert_pll_disabled(d, p) assert_pll(d, p, false) 910#define assert_pll_disabled(d, p) assert_pll(d, p, false)
1068 911
912static struct intel_shared_dpll *
913intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
914{
915 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
916
917 if (crtc->config.shared_dpll < 0)
918 return NULL;
919
920 return &dev_priv->shared_dplls[crtc->config.shared_dpll];
921}
922
1069/* For ILK+ */ 923/* For ILK+ */
1070static void assert_pch_pll(struct drm_i915_private *dev_priv, 924static void assert_shared_dpll(struct drm_i915_private *dev_priv,
1071 struct intel_pch_pll *pll, 925 struct intel_shared_dpll *pll,
1072 struct intel_crtc *crtc, 926 bool state)
1073 bool state)
1074{ 927{
1075 u32 val;
1076 bool cur_state; 928 bool cur_state;
929 struct intel_dpll_hw_state hw_state;
1077 930
1078 if (HAS_PCH_LPT(dev_priv->dev)) { 931 if (HAS_PCH_LPT(dev_priv->dev)) {
1079 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n"); 932 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
@@ -1081,36 +934,16 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv,
1081 } 934 }
1082 935
1083 if (WARN (!pll, 936 if (WARN (!pll,
1084 "asserting PCH PLL %s with no PLL\n", state_string(state))) 937 "asserting DPLL %s with no DPLL\n", state_string(state)))
1085 return; 938 return;
1086 939
1087 val = I915_READ(pll->pll_reg); 940 cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1088 cur_state = !!(val & DPLL_VCO_ENABLE);
1089 WARN(cur_state != state, 941 WARN(cur_state != state,
1090 "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n", 942 "%s assertion failure (expected %s, current %s)\n",
1091 pll->pll_reg, state_string(state), state_string(cur_state), val); 943 pll->name, state_string(state), state_string(cur_state));
1092
1093 /* Make sure the selected PLL is correctly attached to the transcoder */
1094 if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
1095 u32 pch_dpll;
1096
1097 pch_dpll = I915_READ(PCH_DPLL_SEL);
1098 cur_state = pll->pll_reg == _PCH_DPLL_B;
1099 if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
1100 "PLL[%d] not attached to this transcoder %d: %08x\n",
1101 cur_state, crtc->pipe, pch_dpll)) {
1102 cur_state = !!(val >> (4*crtc->pipe + 3));
1103 WARN(cur_state != state,
1104 "PLL[%d] not %s on this transcoder %d: %08x\n",
1105 pll->pll_reg == _PCH_DPLL_B,
1106 state_string(state),
1107 crtc->pipe,
1108 val);
1109 }
1110 }
1111} 944}
1112#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true) 945#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
1113#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false) 946#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
1114 947
1115static void assert_fdi_tx(struct drm_i915_private *dev_priv, 948static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1116 enum pipe pipe, bool state) 949 enum pipe pipe, bool state)
@@ -1227,8 +1060,8 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1227 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 1060 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1228 state = true; 1061 state = true;
1229 1062
1230 if (!intel_using_power_well(dev_priv->dev) && 1063 if (!intel_display_power_enabled(dev_priv->dev,
1231 cpu_transcoder != TRANSCODER_EDP) { 1064 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1232 cur_state = false; 1065 cur_state = false;
1233 } else { 1066 } else {
1234 reg = PIPECONF(cpu_transcoder); 1067 reg = PIPECONF(cpu_transcoder);
@@ -1262,12 +1095,13 @@ static void assert_plane(struct drm_i915_private *dev_priv,
1262static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1095static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1263 enum pipe pipe) 1096 enum pipe pipe)
1264{ 1097{
1098 struct drm_device *dev = dev_priv->dev;
1265 int reg, i; 1099 int reg, i;
1266 u32 val; 1100 u32 val;
1267 int cur_pipe; 1101 int cur_pipe;
1268 1102
1269 /* Planes are fixed to pipes on ILK+ */ 1103 /* Primary planes are fixed to pipes on gen4+ */
1270 if (HAS_PCH_SPLIT(dev_priv->dev) || IS_VALLEYVIEW(dev_priv->dev)) { 1104 if (INTEL_INFO(dev)->gen >= 4) {
1271 reg = DSPCNTR(pipe); 1105 reg = DSPCNTR(pipe);
1272 val = I915_READ(reg); 1106 val = I915_READ(reg);
1273 WARN((val & DISPLAY_PLANE_ENABLE), 1107 WARN((val & DISPLAY_PLANE_ENABLE),
@@ -1277,7 +1111,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1277 } 1111 }
1278 1112
1279 /* Need to check both planes against the pipe */ 1113 /* Need to check both planes against the pipe */
1280 for (i = 0; i < 2; i++) { 1114 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
1281 reg = DSPCNTR(i); 1115 reg = DSPCNTR(i);
1282 val = I915_READ(reg); 1116 val = I915_READ(reg);
1283 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1117 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
@@ -1291,19 +1125,30 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1291static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1125static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1292 enum pipe pipe) 1126 enum pipe pipe)
1293{ 1127{
1128 struct drm_device *dev = dev_priv->dev;
1294 int reg, i; 1129 int reg, i;
1295 u32 val; 1130 u32 val;
1296 1131
1297 if (!IS_VALLEYVIEW(dev_priv->dev)) 1132 if (IS_VALLEYVIEW(dev)) {
1298 return; 1133 for (i = 0; i < dev_priv->num_plane; i++) {
1299 1134 reg = SPCNTR(pipe, i);
1300 /* Need to check both planes against the pipe */ 1135 val = I915_READ(reg);
1301 for (i = 0; i < dev_priv->num_plane; i++) { 1136 WARN((val & SP_ENABLE),
1302 reg = SPCNTR(pipe, i); 1137 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1138 sprite_name(pipe, i), pipe_name(pipe));
1139 }
1140 } else if (INTEL_INFO(dev)->gen >= 7) {
1141 reg = SPRCTL(pipe);
1303 val = I915_READ(reg); 1142 val = I915_READ(reg);
1304 WARN((val & SP_ENABLE), 1143 WARN((val & SPRITE_ENABLE),
1305 "sprite %d assertion failure, should be off on pipe %c but is still active\n", 1144 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1306 pipe * 2 + i, pipe_name(pipe)); 1145 plane_name(pipe), pipe_name(pipe));
1146 } else if (INTEL_INFO(dev)->gen >= 5) {
1147 reg = DVSCNTR(pipe);
1148 val = I915_READ(reg);
1149 WARN((val & DVS_ENABLE),
1150 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1151 plane_name(pipe), pipe_name(pipe));
1307 } 1152 }
1308} 1153}
1309 1154
@@ -1323,14 +1168,14 @@ static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1323 WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); 1168 WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1324} 1169}
1325 1170
1326static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, 1171static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1327 enum pipe pipe) 1172 enum pipe pipe)
1328{ 1173{
1329 int reg; 1174 int reg;
1330 u32 val; 1175 u32 val;
1331 bool enabled; 1176 bool enabled;
1332 1177
1333 reg = TRANSCONF(pipe); 1178 reg = PCH_TRANSCONF(pipe);
1334 val = I915_READ(reg); 1179 val = I915_READ(reg);
1335 enabled = !!(val & TRANS_ENABLE); 1180 enabled = !!(val & TRANS_ENABLE);
1336 WARN(enabled, 1181 WARN(enabled,
@@ -1474,6 +1319,8 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1474 int reg; 1319 int reg;
1475 u32 val; 1320 u32 val;
1476 1321
1322 assert_pipe_disabled(dev_priv, pipe);
1323
1477 /* No really, not for ILK+ */ 1324 /* No really, not for ILK+ */
1478 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5); 1325 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
1479 1326
@@ -1525,156 +1372,86 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1525 POSTING_READ(reg); 1372 POSTING_READ(reg);
1526} 1373}
1527 1374
1528/* SBI access */ 1375void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
1529static void
1530intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1531 enum intel_sbi_destination destination)
1532{ 1376{
1533 u32 tmp; 1377 u32 port_mask;
1534 1378
1535 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); 1379 if (!port)
1536 1380 port_mask = DPLL_PORTB_READY_MASK;
1537 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
1538 100)) {
1539 DRM_ERROR("timeout waiting for SBI to become ready\n");
1540 return;
1541 }
1542
1543 I915_WRITE(SBI_ADDR, (reg << 16));
1544 I915_WRITE(SBI_DATA, value);
1545
1546 if (destination == SBI_ICLK)
1547 tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
1548 else 1381 else
1549 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR; 1382 port_mask = DPLL_PORTC_READY_MASK;
1550 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
1551 1383
1552 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1384 if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
1553 100)) { 1385 WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1554 DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); 1386 'B' + port, I915_READ(DPLL(0)));
1555 return;
1556 }
1557}
1558
1559static u32
1560intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1561 enum intel_sbi_destination destination)
1562{
1563 u32 value = 0;
1564 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1565
1566 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
1567 100)) {
1568 DRM_ERROR("timeout waiting for SBI to become ready\n");
1569 return 0;
1570 }
1571
1572 I915_WRITE(SBI_ADDR, (reg << 16));
1573
1574 if (destination == SBI_ICLK)
1575 value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
1576 else
1577 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
1578 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
1579
1580 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1581 100)) {
1582 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1583 return 0;
1584 }
1585
1586 return I915_READ(SBI_DATA);
1587} 1387}
1588 1388
1589/** 1389/**
1590 * ironlake_enable_pch_pll - enable PCH PLL 1390 * ironlake_enable_shared_dpll - enable PCH PLL
1591 * @dev_priv: i915 private structure 1391 * @dev_priv: i915 private structure
1592 * @pipe: pipe PLL to enable 1392 * @pipe: pipe PLL to enable
1593 * 1393 *
1594 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1394 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1595 * drives the transcoder clock. 1395 * drives the transcoder clock.
1596 */ 1396 */
1597static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc) 1397static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
1598{ 1398{
1599 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; 1399 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1600 struct intel_pch_pll *pll; 1400 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1601 int reg;
1602 u32 val;
1603 1401
1604 /* PCH PLLs only available on ILK, SNB and IVB */ 1402 /* PCH PLLs only available on ILK, SNB and IVB */
1605 BUG_ON(dev_priv->info->gen < 5); 1403 BUG_ON(dev_priv->info->gen < 5);
1606 pll = intel_crtc->pch_pll; 1404 if (WARN_ON(pll == NULL))
1607 if (pll == NULL)
1608 return; 1405 return;
1609 1406
1610 if (WARN_ON(pll->refcount == 0)) 1407 if (WARN_ON(pll->refcount == 0))
1611 return; 1408 return;
1612 1409
1613 DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n", 1410 DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1614 pll->pll_reg, pll->active, pll->on, 1411 pll->name, pll->active, pll->on,
1615 intel_crtc->base.base.id); 1412 crtc->base.base.id);
1616
1617 /* PCH refclock must be enabled first */
1618 assert_pch_refclk_enabled(dev_priv);
1619 1413
1620 if (pll->active++ && pll->on) { 1414 if (pll->active++) {
1621 assert_pch_pll_enabled(dev_priv, pll, NULL); 1415 WARN_ON(!pll->on);
1416 assert_shared_dpll_enabled(dev_priv, pll);
1622 return; 1417 return;
1623 } 1418 }
1419 WARN_ON(pll->on);
1624 1420
1625 DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg); 1421 DRM_DEBUG_KMS("enabling %s\n", pll->name);
1626 1422 pll->enable(dev_priv, pll);
1627 reg = pll->pll_reg;
1628 val = I915_READ(reg);
1629 val |= DPLL_VCO_ENABLE;
1630 I915_WRITE(reg, val);
1631 POSTING_READ(reg);
1632 udelay(200);
1633
1634 pll->on = true; 1423 pll->on = true;
1635} 1424}
1636 1425
1637static void intel_disable_pch_pll(struct intel_crtc *intel_crtc) 1426static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1638{ 1427{
1639 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; 1428 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1640 struct intel_pch_pll *pll = intel_crtc->pch_pll; 1429 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1641 int reg;
1642 u32 val;
1643 1430
1644 /* PCH only available on ILK+ */ 1431 /* PCH only available on ILK+ */
1645 BUG_ON(dev_priv->info->gen < 5); 1432 BUG_ON(dev_priv->info->gen < 5);
1646 if (pll == NULL) 1433 if (WARN_ON(pll == NULL))
1647 return; 1434 return;
1648 1435
1649 if (WARN_ON(pll->refcount == 0)) 1436 if (WARN_ON(pll->refcount == 0))
1650 return; 1437 return;
1651 1438
1652 DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n", 1439 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1653 pll->pll_reg, pll->active, pll->on, 1440 pll->name, pll->active, pll->on,
1654 intel_crtc->base.base.id); 1441 crtc->base.base.id);
1655 1442
1656 if (WARN_ON(pll->active == 0)) { 1443 if (WARN_ON(pll->active == 0)) {
1657 assert_pch_pll_disabled(dev_priv, pll, NULL); 1444 assert_shared_dpll_disabled(dev_priv, pll);
1658 return; 1445 return;
1659 } 1446 }
1660 1447
1661 if (--pll->active) { 1448 assert_shared_dpll_enabled(dev_priv, pll);
1662 assert_pch_pll_enabled(dev_priv, pll, NULL); 1449 WARN_ON(!pll->on);
1450 if (--pll->active)
1663 return; 1451 return;
1664 }
1665
1666 DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1667
1668 /* Make sure transcoder isn't still depending on us */
1669 assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
1670
1671 reg = pll->pll_reg;
1672 val = I915_READ(reg);
1673 val &= ~DPLL_VCO_ENABLE;
1674 I915_WRITE(reg, val);
1675 POSTING_READ(reg);
1676 udelay(200);
1677 1452
1453 DRM_DEBUG_KMS("disabling %s\n", pll->name);
1454 pll->disable(dev_priv, pll);
1678 pll->on = false; 1455 pll->on = false;
1679} 1456}
1680 1457
@@ -1683,15 +1460,15 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1683{ 1460{
1684 struct drm_device *dev = dev_priv->dev; 1461 struct drm_device *dev = dev_priv->dev;
1685 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1462 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1463 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1686 uint32_t reg, val, pipeconf_val; 1464 uint32_t reg, val, pipeconf_val;
1687 1465
1688 /* PCH only available on ILK+ */ 1466 /* PCH only available on ILK+ */
1689 BUG_ON(dev_priv->info->gen < 5); 1467 BUG_ON(dev_priv->info->gen < 5);
1690 1468
1691 /* Make sure PCH DPLL is enabled */ 1469 /* Make sure PCH DPLL is enabled */
1692 assert_pch_pll_enabled(dev_priv, 1470 assert_shared_dpll_enabled(dev_priv,
1693 to_intel_crtc(crtc)->pch_pll, 1471 intel_crtc_to_shared_dpll(intel_crtc));
1694 to_intel_crtc(crtc));
1695 1472
1696 /* FDI must be feeding us bits for PCH ports */ 1473 /* FDI must be feeding us bits for PCH ports */
1697 assert_fdi_tx_enabled(dev_priv, pipe); 1474 assert_fdi_tx_enabled(dev_priv, pipe);
@@ -1706,7 +1483,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1706 I915_WRITE(reg, val); 1483 I915_WRITE(reg, val);
1707 } 1484 }
1708 1485
1709 reg = TRANSCONF(pipe); 1486 reg = PCH_TRANSCONF(pipe);
1710 val = I915_READ(reg); 1487 val = I915_READ(reg);
1711 pipeconf_val = I915_READ(PIPECONF(pipe)); 1488 pipeconf_val = I915_READ(PIPECONF(pipe));
1712 1489
@@ -1731,7 +1508,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1731 1508
1732 I915_WRITE(reg, val | TRANS_ENABLE); 1509 I915_WRITE(reg, val | TRANS_ENABLE);
1733 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 1510 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1734 DRM_ERROR("failed to enable transcoder %d\n", pipe); 1511 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1735} 1512}
1736 1513
1737static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1514static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -1760,8 +1537,8 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1760 else 1537 else
1761 val |= TRANS_PROGRESSIVE; 1538 val |= TRANS_PROGRESSIVE;
1762 1539
1763 I915_WRITE(TRANSCONF(TRANSCODER_A), val); 1540 I915_WRITE(LPT_TRANSCONF, val);
1764 if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100)) 1541 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1765 DRM_ERROR("Failed to enable PCH transcoder\n"); 1542 DRM_ERROR("Failed to enable PCH transcoder\n");
1766} 1543}
1767 1544
@@ -1778,13 +1555,13 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1778 /* Ports must be off as well */ 1555 /* Ports must be off as well */
1779 assert_pch_ports_disabled(dev_priv, pipe); 1556 assert_pch_ports_disabled(dev_priv, pipe);
1780 1557
1781 reg = TRANSCONF(pipe); 1558 reg = PCH_TRANSCONF(pipe);
1782 val = I915_READ(reg); 1559 val = I915_READ(reg);
1783 val &= ~TRANS_ENABLE; 1560 val &= ~TRANS_ENABLE;
1784 I915_WRITE(reg, val); 1561 I915_WRITE(reg, val);
1785 /* wait for PCH transcoder off, transcoder state */ 1562 /* wait for PCH transcoder off, transcoder state */
1786 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 1563 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1787 DRM_ERROR("failed to disable transcoder %d\n", pipe); 1564 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1788 1565
1789 if (!HAS_PCH_IBX(dev)) { 1566 if (!HAS_PCH_IBX(dev)) {
1790 /* Workaround: Clear the timing override chicken bit again. */ 1567 /* Workaround: Clear the timing override chicken bit again. */
@@ -1799,11 +1576,11 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1799{ 1576{
1800 u32 val; 1577 u32 val;
1801 1578
1802 val = I915_READ(_TRANSACONF); 1579 val = I915_READ(LPT_TRANSCONF);
1803 val &= ~TRANS_ENABLE; 1580 val &= ~TRANS_ENABLE;
1804 I915_WRITE(_TRANSACONF, val); 1581 I915_WRITE(LPT_TRANSCONF, val);
1805 /* wait for PCH transcoder off, transcoder state */ 1582 /* wait for PCH transcoder off, transcoder state */
1806 if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50)) 1583 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1807 DRM_ERROR("Failed to disable PCH transcoder\n"); 1584 DRM_ERROR("Failed to disable PCH transcoder\n");
1808 1585
1809 /* Workaround: clear timing override bit. */ 1586 /* Workaround: clear timing override bit. */
@@ -1835,6 +1612,9 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1835 int reg; 1612 int reg;
1836 u32 val; 1613 u32 val;
1837 1614
1615 assert_planes_disabled(dev_priv, pipe);
1616 assert_sprites_disabled(dev_priv, pipe);
1617
1838 if (HAS_PCH_LPT(dev_priv->dev)) 1618 if (HAS_PCH_LPT(dev_priv->dev))
1839 pch_transcoder = TRANSCODER_A; 1619 pch_transcoder = TRANSCODER_A;
1840 else 1620 else
@@ -2096,7 +1876,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2096 case 1: 1876 case 1:
2097 break; 1877 break;
2098 default: 1878 default:
2099 DRM_ERROR("Can't update plane %d in SAREA\n", plane); 1879 DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2100 return -EINVAL; 1880 return -EINVAL;
2101 } 1881 }
2102 1882
@@ -2145,6 +1925,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2145 dspcntr &= ~DISPPLANE_TILED; 1925 dspcntr &= ~DISPPLANE_TILED;
2146 } 1926 }
2147 1927
1928 if (IS_G4X(dev))
1929 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1930
2148 I915_WRITE(reg, dspcntr); 1931 I915_WRITE(reg, dspcntr);
2149 1932
2150 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 1933 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
@@ -2193,7 +1976,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2193 case 2: 1976 case 2:
2194 break; 1977 break;
2195 default: 1978 default:
2196 DRM_ERROR("Can't update plane %d in SAREA\n", plane); 1979 DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2197 return -EINVAL; 1980 return -EINVAL;
2198 } 1981 }
2199 1982
@@ -2384,9 +2167,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2384 } 2167 }
2385 2168
2386 if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) { 2169 if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
2387 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n", 2170 DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2388 intel_crtc->plane, 2171 plane_name(intel_crtc->plane),
2389 INTEL_INFO(dev)->num_pipes); 2172 INTEL_INFO(dev)->num_pipes);
2390 return -EINVAL; 2173 return -EINVAL;
2391 } 2174 }
2392 2175
@@ -2414,7 +2197,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2414 crtc->y = y; 2197 crtc->y = y;
2415 2198
2416 if (old_fb) { 2199 if (old_fb) {
2417 intel_wait_for_vblank(dev, intel_crtc->pipe); 2200 if (intel_crtc->active && old_fb != fb)
2201 intel_wait_for_vblank(dev, intel_crtc->pipe);
2418 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); 2202 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2419 } 2203 }
2420 2204
@@ -2467,6 +2251,11 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
2467 FDI_FE_ERRC_ENABLE); 2251 FDI_FE_ERRC_ENABLE);
2468} 2252}
2469 2253
2254static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc)
2255{
2256 return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder;
2257}
2258
2470static void ivb_modeset_global_resources(struct drm_device *dev) 2259static void ivb_modeset_global_resources(struct drm_device *dev)
2471{ 2260{
2472 struct drm_i915_private *dev_priv = dev->dev_private; 2261 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2476,10 +2265,13 @@ static void ivb_modeset_global_resources(struct drm_device *dev)
2476 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); 2265 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2477 uint32_t temp; 2266 uint32_t temp;
2478 2267
2479 /* When everything is off disable fdi C so that we could enable fdi B 2268 /*
2480 * with all lanes. XXX: This misses the case where a pipe is not using 2269 * When everything is off disable fdi C so that we could enable fdi B
2481 * any pch resources and so doesn't need any fdi lanes. */ 2270 * with all lanes. Note that we don't care about enabled pipes without
2482 if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) { 2271 * an enabled pch encoder.
2272 */
2273 if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2274 !pipe_has_enabled_pch(pipe_C_crtc)) {
2483 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 2275 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2484 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 2276 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2485 2277
@@ -2517,8 +2309,8 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2517 /* enable CPU FDI TX and PCH FDI RX */ 2309 /* enable CPU FDI TX and PCH FDI RX */
2518 reg = FDI_TX_CTL(pipe); 2310 reg = FDI_TX_CTL(pipe);
2519 temp = I915_READ(reg); 2311 temp = I915_READ(reg);
2520 temp &= ~(7 << 19); 2312 temp &= ~FDI_DP_PORT_WIDTH_MASK;
2521 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2313 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2522 temp &= ~FDI_LINK_TRAIN_NONE; 2314 temp &= ~FDI_LINK_TRAIN_NONE;
2523 temp |= FDI_LINK_TRAIN_PATTERN_1; 2315 temp |= FDI_LINK_TRAIN_PATTERN_1;
2524 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2316 I915_WRITE(reg, temp | FDI_TX_ENABLE);
@@ -2615,8 +2407,8 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2615 /* enable CPU FDI TX and PCH FDI RX */ 2407 /* enable CPU FDI TX and PCH FDI RX */
2616 reg = FDI_TX_CTL(pipe); 2408 reg = FDI_TX_CTL(pipe);
2617 temp = I915_READ(reg); 2409 temp = I915_READ(reg);
2618 temp &= ~(7 << 19); 2410 temp &= ~FDI_DP_PORT_WIDTH_MASK;
2619 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2411 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2620 temp &= ~FDI_LINK_TRAIN_NONE; 2412 temp &= ~FDI_LINK_TRAIN_NONE;
2621 temp |= FDI_LINK_TRAIN_PATTERN_1; 2413 temp |= FDI_LINK_TRAIN_PATTERN_1;
2622 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2414 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2750,8 +2542,8 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2750 /* enable CPU FDI TX and PCH FDI RX */ 2542 /* enable CPU FDI TX and PCH FDI RX */
2751 reg = FDI_TX_CTL(pipe); 2543 reg = FDI_TX_CTL(pipe);
2752 temp = I915_READ(reg); 2544 temp = I915_READ(reg);
2753 temp &= ~(7 << 19); 2545 temp &= ~FDI_DP_PORT_WIDTH_MASK;
2754 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2546 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2755 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 2547 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2756 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 2548 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2757 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2549 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2852,8 +2644,8 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2852 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 2644 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2853 reg = FDI_RX_CTL(pipe); 2645 reg = FDI_RX_CTL(pipe);
2854 temp = I915_READ(reg); 2646 temp = I915_READ(reg);
2855 temp &= ~((0x7 << 19) | (0x7 << 16)); 2647 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
2856 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2648 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2857 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 2649 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2858 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 2650 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2859 2651
@@ -3085,6 +2877,30 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
3085 mutex_unlock(&dev_priv->dpio_lock); 2877 mutex_unlock(&dev_priv->dpio_lock);
3086} 2878}
3087 2879
2880static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
2881 enum pipe pch_transcoder)
2882{
2883 struct drm_device *dev = crtc->base.dev;
2884 struct drm_i915_private *dev_priv = dev->dev_private;
2885 enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
2886
2887 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
2888 I915_READ(HTOTAL(cpu_transcoder)));
2889 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
2890 I915_READ(HBLANK(cpu_transcoder)));
2891 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
2892 I915_READ(HSYNC(cpu_transcoder)));
2893
2894 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
2895 I915_READ(VTOTAL(cpu_transcoder)));
2896 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
2897 I915_READ(VBLANK(cpu_transcoder)));
2898 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
2899 I915_READ(VSYNC(cpu_transcoder)));
2900 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2901 I915_READ(VSYNCSHIFT(cpu_transcoder)));
2902}
2903
3088/* 2904/*
3089 * Enable PCH resources required for PCH ports: 2905 * Enable PCH resources required for PCH ports:
3090 * - PCH PLLs 2906 * - PCH PLLs
@@ -3101,7 +2917,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3101 int pipe = intel_crtc->pipe; 2917 int pipe = intel_crtc->pipe;
3102 u32 reg, temp; 2918 u32 reg, temp;
3103 2919
3104 assert_transcoder_disabled(dev_priv, pipe); 2920 assert_pch_transcoder_disabled(dev_priv, pipe);
3105 2921
3106 /* Write the TU size bits before fdi link training, so that error 2922 /* Write the TU size bits before fdi link training, so that error
3107 * detection works. */ 2923 * detection works. */
@@ -3115,31 +2931,18 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3115 * transcoder, and we actually should do this to not upset any PCH 2931 * transcoder, and we actually should do this to not upset any PCH
3116 * transcoder that already use the clock when we share it. 2932 * transcoder that already use the clock when we share it.
3117 * 2933 *
3118 * Note that enable_pch_pll tries to do the right thing, but get_pch_pll 2934 * Note that enable_shared_dpll tries to do the right thing, but
3119 * unconditionally resets the pll - we need that to have the right LVDS 2935 * get_shared_dpll unconditionally resets the pll - we need that to have
3120 * enable sequence. */ 2936 * the right LVDS enable sequence. */
3121 ironlake_enable_pch_pll(intel_crtc); 2937 ironlake_enable_shared_dpll(intel_crtc);
3122 2938
3123 if (HAS_PCH_CPT(dev)) { 2939 if (HAS_PCH_CPT(dev)) {
3124 u32 sel; 2940 u32 sel;
3125 2941
3126 temp = I915_READ(PCH_DPLL_SEL); 2942 temp = I915_READ(PCH_DPLL_SEL);
3127 switch (pipe) { 2943 temp |= TRANS_DPLL_ENABLE(pipe);
3128 default: 2944 sel = TRANS_DPLLB_SEL(pipe);
3129 case 0: 2945 if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3130 temp |= TRANSA_DPLL_ENABLE;
3131 sel = TRANSA_DPLLB_SEL;
3132 break;
3133 case 1:
3134 temp |= TRANSB_DPLL_ENABLE;
3135 sel = TRANSB_DPLLB_SEL;
3136 break;
3137 case 2:
3138 temp |= TRANSC_DPLL_ENABLE;
3139 sel = TRANSC_DPLLB_SEL;
3140 break;
3141 }
3142 if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
3143 temp |= sel; 2946 temp |= sel;
3144 else 2947 else
3145 temp &= ~sel; 2948 temp &= ~sel;
@@ -3148,14 +2951,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3148 2951
3149 /* set transcoder timing, panel must allow it */ 2952 /* set transcoder timing, panel must allow it */
3150 assert_panel_unlocked(dev_priv, pipe); 2953 assert_panel_unlocked(dev_priv, pipe);
3151 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); 2954 ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
3152 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3153 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
3154
3155 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3156 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3157 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
3158 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
3159 2955
3160 intel_fdi_normal_train(crtc); 2956 intel_fdi_normal_train(crtc);
3161 2957
@@ -3205,86 +3001,82 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
3205 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3001 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3206 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 3002 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3207 3003
3208 assert_transcoder_disabled(dev_priv, TRANSCODER_A); 3004 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3209 3005
3210 lpt_program_iclkip(crtc); 3006 lpt_program_iclkip(crtc);
3211 3007
3212 /* Set transcoder timing. */ 3008 /* Set transcoder timing. */
3213 I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder))); 3009 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3214 I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
3215 I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder)));
3216
3217 I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
3218 I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
3219 I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder)));
3220 I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
3221 3010
3222 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 3011 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3223} 3012}
3224 3013
3225static void intel_put_pch_pll(struct intel_crtc *intel_crtc) 3014static void intel_put_shared_dpll(struct intel_crtc *crtc)
3226{ 3015{
3227 struct intel_pch_pll *pll = intel_crtc->pch_pll; 3016 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3228 3017
3229 if (pll == NULL) 3018 if (pll == NULL)
3230 return; 3019 return;
3231 3020
3232 if (pll->refcount == 0) { 3021 if (pll->refcount == 0) {
3233 WARN(1, "bad PCH PLL refcount\n"); 3022 WARN(1, "bad %s refcount\n", pll->name);
3234 return; 3023 return;
3235 } 3024 }
3236 3025
3237 --pll->refcount; 3026 if (--pll->refcount == 0) {
3238 intel_crtc->pch_pll = NULL; 3027 WARN_ON(pll->on);
3028 WARN_ON(pll->active);
3029 }
3030
3031 crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3239} 3032}
3240 3033
3241static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp) 3034static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, u32 dpll, u32 fp)
3242{ 3035{
3243 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; 3036 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3244 struct intel_pch_pll *pll; 3037 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3245 int i; 3038 enum intel_dpll_id i;
3246 3039
3247 pll = intel_crtc->pch_pll;
3248 if (pll) { 3040 if (pll) {
3249 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n", 3041 DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3250 intel_crtc->base.base.id, pll->pll_reg); 3042 crtc->base.base.id, pll->name);
3251 goto prepare; 3043 intel_put_shared_dpll(crtc);
3252 } 3044 }
3253 3045
3254 if (HAS_PCH_IBX(dev_priv->dev)) { 3046 if (HAS_PCH_IBX(dev_priv->dev)) {
3255 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 3047 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3256 i = intel_crtc->pipe; 3048 i = crtc->pipe;
3257 pll = &dev_priv->pch_plls[i]; 3049 pll = &dev_priv->shared_dplls[i];
3258 3050
3259 DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n", 3051 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3260 intel_crtc->base.base.id, pll->pll_reg); 3052 crtc->base.base.id, pll->name);
3261 3053
3262 goto found; 3054 goto found;
3263 } 3055 }
3264 3056
3265 for (i = 0; i < dev_priv->num_pch_pll; i++) { 3057 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3266 pll = &dev_priv->pch_plls[i]; 3058 pll = &dev_priv->shared_dplls[i];
3267 3059
3268 /* Only want to check enabled timings first */ 3060 /* Only want to check enabled timings first */
3269 if (pll->refcount == 0) 3061 if (pll->refcount == 0)
3270 continue; 3062 continue;
3271 3063
3272 if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) && 3064 if (dpll == (I915_READ(PCH_DPLL(pll->id)) & 0x7fffffff) &&
3273 fp == I915_READ(pll->fp0_reg)) { 3065 fp == I915_READ(PCH_FP0(pll->id))) {
3274 DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n", 3066 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3275 intel_crtc->base.base.id, 3067 crtc->base.base.id,
3276 pll->pll_reg, pll->refcount, pll->active); 3068 pll->name, pll->refcount, pll->active);
3277 3069
3278 goto found; 3070 goto found;
3279 } 3071 }
3280 } 3072 }
3281 3073
3282 /* Ok no matching timings, maybe there's a free one? */ 3074 /* Ok no matching timings, maybe there's a free one? */
3283 for (i = 0; i < dev_priv->num_pch_pll; i++) { 3075 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3284 pll = &dev_priv->pch_plls[i]; 3076 pll = &dev_priv->shared_dplls[i];
3285 if (pll->refcount == 0) { 3077 if (pll->refcount == 0) {
3286 DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n", 3078 DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3287 intel_crtc->base.base.id, pll->pll_reg); 3079 crtc->base.base.id, pll->name);
3288 goto found; 3080 goto found;
3289 } 3081 }
3290 } 3082 }
@@ -3292,24 +3084,32 @@ static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u3
3292 return NULL; 3084 return NULL;
3293 3085
3294found: 3086found:
3295 intel_crtc->pch_pll = pll; 3087 crtc->config.shared_dpll = i;
3296 pll->refcount++; 3088 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3297 DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe); 3089 pipe_name(crtc->pipe));
3298prepare: /* separate function? */
3299 DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
3300 3090
3301 /* Wait for the clocks to stabilize before rewriting the regs */ 3091 if (pll->active == 0) {
3302 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE); 3092 memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
3303 POSTING_READ(pll->pll_reg); 3093 sizeof(pll->hw_state));
3304 udelay(150); 3094
3095 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
3096 WARN_ON(pll->on);
3097 assert_shared_dpll_disabled(dev_priv, pll);
3098
3099 /* Wait for the clocks to stabilize before rewriting the regs */
3100 I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
3101 POSTING_READ(PCH_DPLL(pll->id));
3102 udelay(150);
3103
3104 I915_WRITE(PCH_FP0(pll->id), fp);
3105 I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
3106 }
3107 pll->refcount++;
3305 3108
3306 I915_WRITE(pll->fp0_reg, fp);
3307 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
3308 pll->on = false;
3309 return pll; 3109 return pll;
3310} 3110}
3311 3111
3312void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) 3112static void cpt_verify_modeset(struct drm_device *dev, int pipe)
3313{ 3113{
3314 struct drm_i915_private *dev_priv = dev->dev_private; 3114 struct drm_i915_private *dev_priv = dev->dev_private;
3315 int dslreg = PIPEDSL(pipe); 3115 int dslreg = PIPEDSL(pipe);
@@ -3319,10 +3119,53 @@ void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3319 udelay(500); 3119 udelay(500);
3320 if (wait_for(I915_READ(dslreg) != temp, 5)) { 3120 if (wait_for(I915_READ(dslreg) != temp, 5)) {
3321 if (wait_for(I915_READ(dslreg) != temp, 5)) 3121 if (wait_for(I915_READ(dslreg) != temp, 5))
3322 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); 3122 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
3123 }
3124}
3125
3126static void ironlake_pfit_enable(struct intel_crtc *crtc)
3127{
3128 struct drm_device *dev = crtc->base.dev;
3129 struct drm_i915_private *dev_priv = dev->dev_private;
3130 int pipe = crtc->pipe;
3131
3132 if (crtc->config.pch_pfit.size) {
3133 /* Force use of hard-coded filter coefficients
3134 * as some pre-programmed values are broken,
3135 * e.g. x201.
3136 */
3137 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3138 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3139 PF_PIPE_SEL_IVB(pipe));
3140 else
3141 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3142 I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3143 I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3323 } 3144 }
3324} 3145}
3325 3146
3147static void intel_enable_planes(struct drm_crtc *crtc)
3148{
3149 struct drm_device *dev = crtc->dev;
3150 enum pipe pipe = to_intel_crtc(crtc)->pipe;
3151 struct intel_plane *intel_plane;
3152
3153 list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3154 if (intel_plane->pipe == pipe)
3155 intel_plane_restore(&intel_plane->base);
3156}
3157
3158static void intel_disable_planes(struct drm_crtc *crtc)
3159{
3160 struct drm_device *dev = crtc->dev;
3161 enum pipe pipe = to_intel_crtc(crtc)->pipe;
3162 struct intel_plane *intel_plane;
3163
3164 list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3165 if (intel_plane->pipe == pipe)
3166 intel_plane_disable(&intel_plane->base);
3167}
3168
3326static void ironlake_crtc_enable(struct drm_crtc *crtc) 3169static void ironlake_crtc_enable(struct drm_crtc *crtc)
3327{ 3170{
3328 struct drm_device *dev = crtc->dev; 3171 struct drm_device *dev = crtc->dev;
@@ -3339,6 +3182,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3339 return; 3182 return;
3340 3183
3341 intel_crtc->active = true; 3184 intel_crtc->active = true;
3185
3186 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3187 intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3188
3342 intel_update_watermarks(dev); 3189 intel_update_watermarks(dev);
3343 3190
3344 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 3191 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -3362,22 +3209,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3362 if (encoder->pre_enable) 3209 if (encoder->pre_enable)
3363 encoder->pre_enable(encoder); 3210 encoder->pre_enable(encoder);
3364 3211
3365 /* Enable panel fitting for LVDS */ 3212 ironlake_pfit_enable(intel_crtc);
3366 if (dev_priv->pch_pf_size &&
3367 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
3368 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3369 /* Force use of hard-coded filter coefficients
3370 * as some pre-programmed values are broken,
3371 * e.g. x201.
3372 */
3373 if (IS_IVYBRIDGE(dev))
3374 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3375 PF_PIPE_SEL_IVB(pipe));
3376 else
3377 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3378 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3379 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3380 }
3381 3213
3382 /* 3214 /*
3383 * On ILK+ LUT must be loaded before the pipe is running but with 3215 * On ILK+ LUT must be loaded before the pipe is running but with
@@ -3388,6 +3220,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3388 intel_enable_pipe(dev_priv, pipe, 3220 intel_enable_pipe(dev_priv, pipe,
3389 intel_crtc->config.has_pch_encoder); 3221 intel_crtc->config.has_pch_encoder);
3390 intel_enable_plane(dev_priv, plane, pipe); 3222 intel_enable_plane(dev_priv, plane, pipe);
3223 intel_enable_planes(crtc);
3224 intel_crtc_update_cursor(crtc, true);
3391 3225
3392 if (intel_crtc->config.has_pch_encoder) 3226 if (intel_crtc->config.has_pch_encoder)
3393 ironlake_pch_enable(crtc); 3227 ironlake_pch_enable(crtc);
@@ -3396,13 +3230,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3396 intel_update_fbc(dev); 3230 intel_update_fbc(dev);
3397 mutex_unlock(&dev->struct_mutex); 3231 mutex_unlock(&dev->struct_mutex);
3398 3232
3399 intel_crtc_update_cursor(crtc, true);
3400
3401 for_each_encoder_on_crtc(dev, crtc, encoder) 3233 for_each_encoder_on_crtc(dev, crtc, encoder)
3402 encoder->enable(encoder); 3234 encoder->enable(encoder);
3403 3235
3404 if (HAS_PCH_CPT(dev)) 3236 if (HAS_PCH_CPT(dev))
3405 intel_cpt_verify_modeset(dev, intel_crtc->pipe); 3237 cpt_verify_modeset(dev, intel_crtc->pipe);
3406 3238
3407 /* 3239 /*
3408 * There seems to be a race in PCH platform hw (at least on some 3240 * There seems to be a race in PCH platform hw (at least on some
@@ -3415,6 +3247,42 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3415 intel_wait_for_vblank(dev, intel_crtc->pipe); 3247 intel_wait_for_vblank(dev, intel_crtc->pipe);
3416} 3248}
3417 3249
3250/* IPS only exists on ULT machines and is tied to pipe A. */
3251static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3252{
3253 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
3254}
3255
3256static void hsw_enable_ips(struct intel_crtc *crtc)
3257{
3258 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3259
3260 if (!crtc->config.ips_enabled)
3261 return;
3262
3263 /* We can only enable IPS after we enable a plane and wait for a vblank.
3264 * We guarantee that the plane is enabled by calling intel_enable_ips
3265 * only after intel_enable_plane. And intel_enable_plane already waits
3266 * for a vblank, so all we need to do here is to enable the IPS bit. */
3267 assert_plane_enabled(dev_priv, crtc->plane);
3268 I915_WRITE(IPS_CTL, IPS_ENABLE);
3269}
3270
3271static void hsw_disable_ips(struct intel_crtc *crtc)
3272{
3273 struct drm_device *dev = crtc->base.dev;
3274 struct drm_i915_private *dev_priv = dev->dev_private;
3275
3276 if (!crtc->config.ips_enabled)
3277 return;
3278
3279 assert_plane_enabled(dev_priv, crtc->plane);
3280 I915_WRITE(IPS_CTL, 0);
3281
3282 /* We need to wait for a vblank before we can disable the plane. */
3283 intel_wait_for_vblank(dev, crtc->pipe);
3284}
3285
3418static void haswell_crtc_enable(struct drm_crtc *crtc) 3286static void haswell_crtc_enable(struct drm_crtc *crtc)
3419{ 3287{
3420 struct drm_device *dev = crtc->dev; 3288 struct drm_device *dev = crtc->dev;
@@ -3430,6 +3298,11 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3430 return; 3298 return;
3431 3299
3432 intel_crtc->active = true; 3300 intel_crtc->active = true;
3301
3302 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3303 if (intel_crtc->config.has_pch_encoder)
3304 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3305
3433 intel_update_watermarks(dev); 3306 intel_update_watermarks(dev);
3434 3307
3435 if (intel_crtc->config.has_pch_encoder) 3308 if (intel_crtc->config.has_pch_encoder)
@@ -3441,18 +3314,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3441 3314
3442 intel_ddi_enable_pipe_clock(intel_crtc); 3315 intel_ddi_enable_pipe_clock(intel_crtc);
3443 3316
3444 /* Enable panel fitting for eDP */ 3317 ironlake_pfit_enable(intel_crtc);
3445 if (dev_priv->pch_pf_size &&
3446 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
3447 /* Force use of hard-coded filter coefficients
3448 * as some pre-programmed values are broken,
3449 * e.g. x201.
3450 */
3451 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3452 PF_PIPE_SEL_IVB(pipe));
3453 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3454 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3455 }
3456 3318
3457 /* 3319 /*
3458 * On ILK+ LUT must be loaded before the pipe is running but with 3320 * On ILK+ LUT must be loaded before the pipe is running but with
@@ -3466,6 +3328,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3466 intel_enable_pipe(dev_priv, pipe, 3328 intel_enable_pipe(dev_priv, pipe,
3467 intel_crtc->config.has_pch_encoder); 3329 intel_crtc->config.has_pch_encoder);
3468 intel_enable_plane(dev_priv, plane, pipe); 3330 intel_enable_plane(dev_priv, plane, pipe);
3331 intel_enable_planes(crtc);
3332 intel_crtc_update_cursor(crtc, true);
3333
3334 hsw_enable_ips(intel_crtc);
3469 3335
3470 if (intel_crtc->config.has_pch_encoder) 3336 if (intel_crtc->config.has_pch_encoder)
3471 lpt_pch_enable(crtc); 3337 lpt_pch_enable(crtc);
@@ -3474,8 +3340,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3474 intel_update_fbc(dev); 3340 intel_update_fbc(dev);
3475 mutex_unlock(&dev->struct_mutex); 3341 mutex_unlock(&dev->struct_mutex);
3476 3342
3477 intel_crtc_update_cursor(crtc, true);
3478
3479 for_each_encoder_on_crtc(dev, crtc, encoder) 3343 for_each_encoder_on_crtc(dev, crtc, encoder)
3480 encoder->enable(encoder); 3344 encoder->enable(encoder);
3481 3345
@@ -3490,6 +3354,21 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3490 intel_wait_for_vblank(dev, intel_crtc->pipe); 3354 intel_wait_for_vblank(dev, intel_crtc->pipe);
3491} 3355}
3492 3356
3357static void ironlake_pfit_disable(struct intel_crtc *crtc)
3358{
3359 struct drm_device *dev = crtc->base.dev;
3360 struct drm_i915_private *dev_priv = dev->dev_private;
3361 int pipe = crtc->pipe;
3362
3363 /* To avoid upsetting the power well on haswell only disable the pfit if
3364 * it's in use. The hw state code will make sure we get this right. */
3365 if (crtc->config.pch_pfit.size) {
3366 I915_WRITE(PF_CTL(pipe), 0);
3367 I915_WRITE(PF_WIN_POS(pipe), 0);
3368 I915_WRITE(PF_WIN_SZ(pipe), 0);
3369 }
3370}
3371
3493static void ironlake_crtc_disable(struct drm_crtc *crtc) 3372static void ironlake_crtc_disable(struct drm_crtc *crtc)
3494{ 3373{
3495 struct drm_device *dev = crtc->dev; 3374 struct drm_device *dev = crtc->dev;
@@ -3509,58 +3388,51 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3509 3388
3510 intel_crtc_wait_for_pending_flips(crtc); 3389 intel_crtc_wait_for_pending_flips(crtc);
3511 drm_vblank_off(dev, pipe); 3390 drm_vblank_off(dev, pipe);
3512 intel_crtc_update_cursor(crtc, false);
3513
3514 intel_disable_plane(dev_priv, plane, pipe);
3515 3391
3516 if (dev_priv->cfb_plane == plane) 3392 if (dev_priv->cfb_plane == plane)
3517 intel_disable_fbc(dev); 3393 intel_disable_fbc(dev);
3518 3394
3395 intel_crtc_update_cursor(crtc, false);
3396 intel_disable_planes(crtc);
3397 intel_disable_plane(dev_priv, plane, pipe);
3398
3399 if (intel_crtc->config.has_pch_encoder)
3400 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
3401
3519 intel_disable_pipe(dev_priv, pipe); 3402 intel_disable_pipe(dev_priv, pipe);
3520 3403
3521 /* Disable PF */ 3404 ironlake_pfit_disable(intel_crtc);
3522 I915_WRITE(PF_CTL(pipe), 0);
3523 I915_WRITE(PF_WIN_SZ(pipe), 0);
3524 3405
3525 for_each_encoder_on_crtc(dev, crtc, encoder) 3406 for_each_encoder_on_crtc(dev, crtc, encoder)
3526 if (encoder->post_disable) 3407 if (encoder->post_disable)
3527 encoder->post_disable(encoder); 3408 encoder->post_disable(encoder);
3528 3409
3529 ironlake_fdi_disable(crtc); 3410 if (intel_crtc->config.has_pch_encoder) {
3411 ironlake_fdi_disable(crtc);
3530 3412
3531 ironlake_disable_pch_transcoder(dev_priv, pipe); 3413 ironlake_disable_pch_transcoder(dev_priv, pipe);
3414 intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3532 3415
3533 if (HAS_PCH_CPT(dev)) { 3416 if (HAS_PCH_CPT(dev)) {
3534 /* disable TRANS_DP_CTL */ 3417 /* disable TRANS_DP_CTL */
3535 reg = TRANS_DP_CTL(pipe); 3418 reg = TRANS_DP_CTL(pipe);
3536 temp = I915_READ(reg); 3419 temp = I915_READ(reg);
3537 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); 3420 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3538 temp |= TRANS_DP_PORT_SEL_NONE; 3421 TRANS_DP_PORT_SEL_MASK);
3539 I915_WRITE(reg, temp); 3422 temp |= TRANS_DP_PORT_SEL_NONE;
3540 3423 I915_WRITE(reg, temp);
3541 /* disable DPLL_SEL */ 3424
3542 temp = I915_READ(PCH_DPLL_SEL); 3425 /* disable DPLL_SEL */
3543 switch (pipe) { 3426 temp = I915_READ(PCH_DPLL_SEL);
3544 case 0: 3427 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3545 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); 3428 I915_WRITE(PCH_DPLL_SEL, temp);
3546 break;
3547 case 1:
3548 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3549 break;
3550 case 2:
3551 /* C shares PLL A or B */
3552 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3553 break;
3554 default:
3555 BUG(); /* wtf */
3556 } 3429 }
3557 I915_WRITE(PCH_DPLL_SEL, temp);
3558 }
3559 3430
3560 /* disable PCH DPLL */ 3431 /* disable PCH DPLL */
3561 intel_disable_pch_pll(intel_crtc); 3432 intel_disable_shared_dpll(intel_crtc);
3562 3433
3563 ironlake_fdi_pll_disable(intel_crtc); 3434 ironlake_fdi_pll_disable(intel_crtc);
3435 }
3564 3436
3565 intel_crtc->active = false; 3437 intel_crtc->active = false;
3566 intel_update_watermarks(dev); 3438 intel_update_watermarks(dev);
@@ -3588,24 +3460,24 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3588 3460
3589 intel_crtc_wait_for_pending_flips(crtc); 3461 intel_crtc_wait_for_pending_flips(crtc);
3590 drm_vblank_off(dev, pipe); 3462 drm_vblank_off(dev, pipe);
3591 intel_crtc_update_cursor(crtc, false);
3592
3593 intel_disable_plane(dev_priv, plane, pipe);
3594 3463
3464 /* FBC must be disabled before disabling the plane on HSW. */
3595 if (dev_priv->cfb_plane == plane) 3465 if (dev_priv->cfb_plane == plane)
3596 intel_disable_fbc(dev); 3466 intel_disable_fbc(dev);
3597 3467
3468 hsw_disable_ips(intel_crtc);
3469
3470 intel_crtc_update_cursor(crtc, false);
3471 intel_disable_planes(crtc);
3472 intel_disable_plane(dev_priv, plane, pipe);
3473
3474 if (intel_crtc->config.has_pch_encoder)
3475 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
3598 intel_disable_pipe(dev_priv, pipe); 3476 intel_disable_pipe(dev_priv, pipe);
3599 3477
3600 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 3478 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3601 3479
3602 /* XXX: Once we have proper panel fitter state tracking implemented with 3480 ironlake_pfit_disable(intel_crtc);
3603 * hardware state read/check support we should switch to only disable
3604 * the panel fitter when we know it's used. */
3605 if (intel_using_power_well(dev)) {
3606 I915_WRITE(PF_CTL(pipe), 0);
3607 I915_WRITE(PF_WIN_SZ(pipe), 0);
3608 }
3609 3481
3610 intel_ddi_disable_pipe_clock(intel_crtc); 3482 intel_ddi_disable_pipe_clock(intel_crtc);
3611 3483
@@ -3615,6 +3487,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3615 3487
3616 if (intel_crtc->config.has_pch_encoder) { 3488 if (intel_crtc->config.has_pch_encoder) {
3617 lpt_disable_pch_transcoder(dev_priv); 3489 lpt_disable_pch_transcoder(dev_priv);
3490 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3618 intel_ddi_fdi_disable(crtc); 3491 intel_ddi_fdi_disable(crtc);
3619 } 3492 }
3620 3493
@@ -3629,17 +3502,11 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3629static void ironlake_crtc_off(struct drm_crtc *crtc) 3502static void ironlake_crtc_off(struct drm_crtc *crtc)
3630{ 3503{
3631 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3504 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3632 intel_put_pch_pll(intel_crtc); 3505 intel_put_shared_dpll(intel_crtc);
3633} 3506}
3634 3507
3635static void haswell_crtc_off(struct drm_crtc *crtc) 3508static void haswell_crtc_off(struct drm_crtc *crtc)
3636{ 3509{
3637 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3638
3639 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
3640 * start using it. */
3641 intel_crtc->config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
3642
3643 intel_ddi_put_crtc_pll(crtc); 3510 intel_ddi_put_crtc_pll(crtc);
3644} 3511}
3645 3512
@@ -3685,6 +3552,77 @@ g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
3685 } 3552 }
3686} 3553}
3687 3554
3555static void i9xx_pfit_enable(struct intel_crtc *crtc)
3556{
3557 struct drm_device *dev = crtc->base.dev;
3558 struct drm_i915_private *dev_priv = dev->dev_private;
3559 struct intel_crtc_config *pipe_config = &crtc->config;
3560
3561 if (!crtc->config.gmch_pfit.control)
3562 return;
3563
3564 /*
3565 * The panel fitter should only be adjusted whilst the pipe is disabled,
3566 * according to register description and PRM.
3567 */
3568 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
3569 assert_pipe_disabled(dev_priv, crtc->pipe);
3570
3571 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
3572 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
3573
3574 /* Border color in case we don't scale up to the full screen. Black by
3575 * default, change to something else for debugging. */
3576 I915_WRITE(BCLRPAT(crtc->pipe), 0);
3577}
3578
3579static void valleyview_crtc_enable(struct drm_crtc *crtc)
3580{
3581 struct drm_device *dev = crtc->dev;
3582 struct drm_i915_private *dev_priv = dev->dev_private;
3583 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3584 struct intel_encoder *encoder;
3585 int pipe = intel_crtc->pipe;
3586 int plane = intel_crtc->plane;
3587
3588 WARN_ON(!crtc->enabled);
3589
3590 if (intel_crtc->active)
3591 return;
3592
3593 intel_crtc->active = true;
3594 intel_update_watermarks(dev);
3595
3596 mutex_lock(&dev_priv->dpio_lock);
3597
3598 for_each_encoder_on_crtc(dev, crtc, encoder)
3599 if (encoder->pre_pll_enable)
3600 encoder->pre_pll_enable(encoder);
3601
3602 intel_enable_pll(dev_priv, pipe);
3603
3604 for_each_encoder_on_crtc(dev, crtc, encoder)
3605 if (encoder->pre_enable)
3606 encoder->pre_enable(encoder);
3607
3608 /* VLV wants encoder enabling _before_ the pipe is up. */
3609 for_each_encoder_on_crtc(dev, crtc, encoder)
3610 encoder->enable(encoder);
3611
3612 i9xx_pfit_enable(intel_crtc);
3613
3614 intel_crtc_load_lut(crtc);
3615
3616 intel_enable_pipe(dev_priv, pipe, false);
3617 intel_enable_plane(dev_priv, plane, pipe);
3618 intel_enable_planes(crtc);
3619 intel_crtc_update_cursor(crtc, true);
3620
3621 intel_update_fbc(dev);
3622
3623 mutex_unlock(&dev_priv->dpio_lock);
3624}
3625
3688static void i9xx_crtc_enable(struct drm_crtc *crtc) 3626static void i9xx_crtc_enable(struct drm_crtc *crtc)
3689{ 3627{
3690 struct drm_device *dev = crtc->dev; 3628 struct drm_device *dev = crtc->dev;
@@ -3708,17 +3646,22 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3708 if (encoder->pre_enable) 3646 if (encoder->pre_enable)
3709 encoder->pre_enable(encoder); 3647 encoder->pre_enable(encoder);
3710 3648
3649 i9xx_pfit_enable(intel_crtc);
3650
3651 intel_crtc_load_lut(crtc);
3652
3711 intel_enable_pipe(dev_priv, pipe, false); 3653 intel_enable_pipe(dev_priv, pipe, false);
3712 intel_enable_plane(dev_priv, plane, pipe); 3654 intel_enable_plane(dev_priv, plane, pipe);
3655 intel_enable_planes(crtc);
3656 /* The fixup needs to happen before cursor is enabled */
3713 if (IS_G4X(dev)) 3657 if (IS_G4X(dev))
3714 g4x_fixup_plane(dev_priv, pipe); 3658 g4x_fixup_plane(dev_priv, pipe);
3715 3659 intel_crtc_update_cursor(crtc, true);
3716 intel_crtc_load_lut(crtc);
3717 intel_update_fbc(dev);
3718 3660
3719 /* Give the overlay scaler a chance to enable if it's on this pipe */ 3661 /* Give the overlay scaler a chance to enable if it's on this pipe */
3720 intel_crtc_dpms_overlay(intel_crtc, true); 3662 intel_crtc_dpms_overlay(intel_crtc, true);
3721 intel_crtc_update_cursor(crtc, true); 3663
3664 intel_update_fbc(dev);
3722 3665
3723 for_each_encoder_on_crtc(dev, crtc, encoder) 3666 for_each_encoder_on_crtc(dev, crtc, encoder)
3724 encoder->enable(encoder); 3667 encoder->enable(encoder);
@@ -3728,20 +3671,15 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
3728{ 3671{
3729 struct drm_device *dev = crtc->base.dev; 3672 struct drm_device *dev = crtc->base.dev;
3730 struct drm_i915_private *dev_priv = dev->dev_private; 3673 struct drm_i915_private *dev_priv = dev->dev_private;
3731 enum pipe pipe;
3732 uint32_t pctl = I915_READ(PFIT_CONTROL);
3733 3674
3734 assert_pipe_disabled(dev_priv, crtc->pipe); 3675 if (!crtc->config.gmch_pfit.control)
3676 return;
3735 3677
3736 if (INTEL_INFO(dev)->gen >= 4) 3678 assert_pipe_disabled(dev_priv, crtc->pipe);
3737 pipe = (pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT;
3738 else
3739 pipe = PIPE_B;
3740 3679
3741 if (pipe == crtc->pipe) { 3680 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
3742 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", pctl); 3681 I915_READ(PFIT_CONTROL));
3743 I915_WRITE(PFIT_CONTROL, 0); 3682 I915_WRITE(PFIT_CONTROL, 0);
3744 }
3745} 3683}
3746 3684
3747static void i9xx_crtc_disable(struct drm_crtc *crtc) 3685static void i9xx_crtc_disable(struct drm_crtc *crtc)
@@ -3762,17 +3700,23 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3762 /* Give the overlay scaler a chance to disable if it's on this pipe */ 3700 /* Give the overlay scaler a chance to disable if it's on this pipe */
3763 intel_crtc_wait_for_pending_flips(crtc); 3701 intel_crtc_wait_for_pending_flips(crtc);
3764 drm_vblank_off(dev, pipe); 3702 drm_vblank_off(dev, pipe);
3765 intel_crtc_dpms_overlay(intel_crtc, false);
3766 intel_crtc_update_cursor(crtc, false);
3767 3703
3768 if (dev_priv->cfb_plane == plane) 3704 if (dev_priv->cfb_plane == plane)
3769 intel_disable_fbc(dev); 3705 intel_disable_fbc(dev);
3770 3706
3707 intel_crtc_dpms_overlay(intel_crtc, false);
3708 intel_crtc_update_cursor(crtc, false);
3709 intel_disable_planes(crtc);
3771 intel_disable_plane(dev_priv, plane, pipe); 3710 intel_disable_plane(dev_priv, plane, pipe);
3711
3772 intel_disable_pipe(dev_priv, pipe); 3712 intel_disable_pipe(dev_priv, pipe);
3773 3713
3774 i9xx_pfit_disable(intel_crtc); 3714 i9xx_pfit_disable(intel_crtc);
3775 3715
3716 for_each_encoder_on_crtc(dev, crtc, encoder)
3717 if (encoder->post_disable)
3718 encoder->post_disable(encoder);
3719
3776 intel_disable_pll(dev_priv, pipe); 3720 intel_disable_pll(dev_priv, pipe);
3777 3721
3778 intel_crtc->active = false; 3722 intel_crtc->active = false;
@@ -3845,8 +3789,8 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
3845 /* crtc should still be enabled when we disable it. */ 3789 /* crtc should still be enabled when we disable it. */
3846 WARN_ON(!crtc->enabled); 3790 WARN_ON(!crtc->enabled);
3847 3791
3848 intel_crtc->eld_vld = false;
3849 dev_priv->display.crtc_disable(crtc); 3792 dev_priv->display.crtc_disable(crtc);
3793 intel_crtc->eld_vld = false;
3850 intel_crtc_update_sarea(crtc, false); 3794 intel_crtc_update_sarea(crtc, false);
3851 dev_priv->display.off(crtc); 3795 dev_priv->display.off(crtc);
3852 3796
@@ -3977,17 +3921,131 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
3977 return encoder->get_hw_state(encoder, &pipe); 3921 return encoder->get_hw_state(encoder, &pipe);
3978} 3922}
3979 3923
3980static bool intel_crtc_compute_config(struct drm_crtc *crtc, 3924static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
3981 struct intel_crtc_config *pipe_config) 3925 struct intel_crtc_config *pipe_config)
3982{ 3926{
3983 struct drm_device *dev = crtc->dev; 3927 struct drm_i915_private *dev_priv = dev->dev_private;
3928 struct intel_crtc *pipe_B_crtc =
3929 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
3930
3931 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
3932 pipe_name(pipe), pipe_config->fdi_lanes);
3933 if (pipe_config->fdi_lanes > 4) {
3934 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
3935 pipe_name(pipe), pipe_config->fdi_lanes);
3936 return false;
3937 }
3938
3939 if (IS_HASWELL(dev)) {
3940 if (pipe_config->fdi_lanes > 2) {
3941 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
3942 pipe_config->fdi_lanes);
3943 return false;
3944 } else {
3945 return true;
3946 }
3947 }
3948
3949 if (INTEL_INFO(dev)->num_pipes == 2)
3950 return true;
3951
3952 /* Ivybridge 3 pipe is really complicated */
3953 switch (pipe) {
3954 case PIPE_A:
3955 return true;
3956 case PIPE_B:
3957 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
3958 pipe_config->fdi_lanes > 2) {
3959 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
3960 pipe_name(pipe), pipe_config->fdi_lanes);
3961 return false;
3962 }
3963 return true;
3964 case PIPE_C:
3965 if (!pipe_has_enabled_pch(pipe_B_crtc) ||
3966 pipe_B_crtc->config.fdi_lanes <= 2) {
3967 if (pipe_config->fdi_lanes > 2) {
3968 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
3969 pipe_name(pipe), pipe_config->fdi_lanes);
3970 return false;
3971 }
3972 } else {
3973 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
3974 return false;
3975 }
3976 return true;
3977 default:
3978 BUG();
3979 }
3980}
3981
3982#define RETRY 1
3983static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
3984 struct intel_crtc_config *pipe_config)
3985{
3986 struct drm_device *dev = intel_crtc->base.dev;
3987 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
3988 int lane, link_bw, fdi_dotclock;
3989 bool setup_ok, needs_recompute = false;
3990
3991retry:
3992 /* FDI is a binary signal running at ~2.7GHz, encoding
3993 * each output octet as 10 bits. The actual frequency
3994 * is stored as a divider into a 100MHz clock, and the
3995 * mode pixel clock is stored in units of 1KHz.
3996 * Hence the bw of each lane in terms of the mode signal
3997 * is:
3998 */
3999 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4000
4001 fdi_dotclock = adjusted_mode->clock;
4002 fdi_dotclock /= pipe_config->pixel_multiplier;
4003
4004 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
4005 pipe_config->pipe_bpp);
4006
4007 pipe_config->fdi_lanes = lane;
4008
4009 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
4010 link_bw, &pipe_config->fdi_m_n);
4011
4012 setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
4013 intel_crtc->pipe, pipe_config);
4014 if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
4015 pipe_config->pipe_bpp -= 2*3;
4016 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
4017 pipe_config->pipe_bpp);
4018 needs_recompute = true;
4019 pipe_config->bw_constrained = true;
4020
4021 goto retry;
4022 }
4023
4024 if (needs_recompute)
4025 return RETRY;
4026
4027 return setup_ok ? 0 : -EINVAL;
4028}
4029
4030static void hsw_compute_ips_config(struct intel_crtc *crtc,
4031 struct intel_crtc_config *pipe_config)
4032{
4033 pipe_config->ips_enabled = i915_enable_ips &&
4034 hsw_crtc_supports_ips(crtc) &&
4035 pipe_config->pipe_bpp == 24;
4036}
4037
4038static int intel_crtc_compute_config(struct intel_crtc *crtc,
4039 struct intel_crtc_config *pipe_config)
4040{
4041 struct drm_device *dev = crtc->base.dev;
3984 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 4042 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
3985 4043
3986 if (HAS_PCH_SPLIT(dev)) { 4044 if (HAS_PCH_SPLIT(dev)) {
3987 /* FDI link clock is fixed at 2.7G */ 4045 /* FDI link clock is fixed at 2.7G */
3988 if (pipe_config->requested_mode.clock * 3 4046 if (pipe_config->requested_mode.clock * 3
3989 > IRONLAKE_FDI_FREQ * 4) 4047 > IRONLAKE_FDI_FREQ * 4)
3990 return false; 4048 return -EINVAL;
3991 } 4049 }
3992 4050
3993 /* All interlaced capable intel hw wants timings in frames. Note though 4051 /* All interlaced capable intel hw wants timings in frames. Note though
@@ -3996,12 +4054,12 @@ static bool intel_crtc_compute_config(struct drm_crtc *crtc,
3996 if (!pipe_config->timings_set) 4054 if (!pipe_config->timings_set)
3997 drm_mode_set_crtcinfo(adjusted_mode, 0); 4055 drm_mode_set_crtcinfo(adjusted_mode, 0);
3998 4056
3999 /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes 4057 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4000 * with a hsync front porch of 0. 4058 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4001 */ 4059 */
4002 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 4060 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
4003 adjusted_mode->hsync_start == adjusted_mode->hdisplay) 4061 adjusted_mode->hsync_start == adjusted_mode->hdisplay)
4004 return false; 4062 return -EINVAL;
4005 4063
4006 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) { 4064 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
4007 pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */ 4065 pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
@@ -4011,7 +4069,18 @@ static bool intel_crtc_compute_config(struct drm_crtc *crtc,
4011 pipe_config->pipe_bpp = 8*3; 4069 pipe_config->pipe_bpp = 8*3;
4012 } 4070 }
4013 4071
4014 return true; 4072 if (HAS_IPS(dev))
4073 hsw_compute_ips_config(crtc, pipe_config);
4074
4075 /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
4076 * clock survives for now. */
4077 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
4078 pipe_config->shared_dpll = crtc->config.shared_dpll;
4079
4080 if (pipe_config->has_pch_encoder)
4081 return ironlake_fdi_compute_config(crtc, pipe_config);
4082
4083 return 0;
4015} 4084}
4016 4085
4017static int valleyview_get_display_clock_speed(struct drm_device *dev) 4086static int valleyview_get_display_clock_speed(struct drm_device *dev)
@@ -4120,7 +4189,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4120{ 4189{
4121 if (i915_panel_use_ssc >= 0) 4190 if (i915_panel_use_ssc >= 0)
4122 return i915_panel_use_ssc != 0; 4191 return i915_panel_use_ssc != 0;
4123 return dev_priv->lvds_use_ssc 4192 return dev_priv->vbt.lvds_use_ssc
4124 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 4193 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4125} 4194}
4126 4195
@@ -4156,7 +4225,7 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4156 refclk = vlv_get_refclk(crtc); 4225 refclk = vlv_get_refclk(crtc);
4157 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 4226 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4158 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 4227 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4159 refclk = dev_priv->lvds_ssc_freq * 1000; 4228 refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
4160 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 4229 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4161 refclk / 1000); 4230 refclk / 1000);
4162 } else if (!IS_GEN2(dev)) { 4231 } else if (!IS_GEN2(dev)) {
@@ -4168,28 +4237,14 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4168 return refclk; 4237 return refclk;
4169} 4238}
4170 4239
4171static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc *crtc) 4240static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
4172{ 4241{
4173 unsigned dotclock = crtc->config.adjusted_mode.clock; 4242 return (1 << dpll->n) << 16 | dpll->m2;
4174 struct dpll *clock = &crtc->config.dpll; 4243}
4175
4176 /* SDVO TV has fixed PLL values depend on its clock range,
4177 this mirrors vbios setting. */
4178 if (dotclock >= 100000 && dotclock < 140500) {
4179 clock->p1 = 2;
4180 clock->p2 = 10;
4181 clock->n = 3;
4182 clock->m1 = 16;
4183 clock->m2 = 8;
4184 } else if (dotclock >= 140500 && dotclock <= 200000) {
4185 clock->p1 = 1;
4186 clock->p2 = 10;
4187 clock->n = 6;
4188 clock->m1 = 12;
4189 clock->m2 = 8;
4190 }
4191 4244
4192 crtc->config.clock_set = true; 4245static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
4246{
4247 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
4193} 4248}
4194 4249
4195static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 4250static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
@@ -4199,18 +4254,15 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4199 struct drm_i915_private *dev_priv = dev->dev_private; 4254 struct drm_i915_private *dev_priv = dev->dev_private;
4200 int pipe = crtc->pipe; 4255 int pipe = crtc->pipe;
4201 u32 fp, fp2 = 0; 4256 u32 fp, fp2 = 0;
4202 struct dpll *clock = &crtc->config.dpll;
4203 4257
4204 if (IS_PINEVIEW(dev)) { 4258 if (IS_PINEVIEW(dev)) {
4205 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2; 4259 fp = pnv_dpll_compute_fp(&crtc->config.dpll);
4206 if (reduced_clock) 4260 if (reduced_clock)
4207 fp2 = (1 << reduced_clock->n) << 16 | 4261 fp2 = pnv_dpll_compute_fp(reduced_clock);
4208 reduced_clock->m1 << 8 | reduced_clock->m2;
4209 } else { 4262 } else {
4210 fp = clock->n << 16 | clock->m1 << 8 | clock->m2; 4263 fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
4211 if (reduced_clock) 4264 if (reduced_clock)
4212 fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 | 4265 fp2 = i9xx_dpll_compute_fp(reduced_clock);
4213 reduced_clock->m2;
4214 } 4266 }
4215 4267
4216 I915_WRITE(FP0(pipe), fp); 4268 I915_WRITE(FP0(pipe), fp);
@@ -4225,6 +4277,68 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4225 } 4277 }
4226} 4278}
4227 4279
4280static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
4281{
4282 u32 reg_val;
4283
4284 /*
4285 * PLLB opamp always calibrates to max value of 0x3f, force enable it
4286 * and set it to a reasonable value instead.
4287 */
4288 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
4289 reg_val &= 0xffffff00;
4290 reg_val |= 0x00000030;
4291 vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
4292
4293 reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
4294 reg_val &= 0x8cffffff;
4295 reg_val = 0x8c000000;
4296 vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
4297
4298 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
4299 reg_val &= 0xffffff00;
4300 vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
4301
4302 reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
4303 reg_val &= 0x00ffffff;
4304 reg_val |= 0xb0000000;
4305 vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
4306}
4307
4308static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
4309 struct intel_link_m_n *m_n)
4310{
4311 struct drm_device *dev = crtc->base.dev;
4312 struct drm_i915_private *dev_priv = dev->dev_private;
4313 int pipe = crtc->pipe;
4314
4315 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4316 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4317 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4318 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4319}
4320
4321static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
4322 struct intel_link_m_n *m_n)
4323{
4324 struct drm_device *dev = crtc->base.dev;
4325 struct drm_i915_private *dev_priv = dev->dev_private;
4326 int pipe = crtc->pipe;
4327 enum transcoder transcoder = crtc->config.cpu_transcoder;
4328
4329 if (INTEL_INFO(dev)->gen >= 5) {
4330 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
4331 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
4332 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
4333 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
4334 } else {
4335 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4336 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4337 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
4338 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
4339 }
4340}
4341
4228static void intel_dp_set_m_n(struct intel_crtc *crtc) 4342static void intel_dp_set_m_n(struct intel_crtc *crtc)
4229{ 4343{
4230 if (crtc->config.has_pch_encoder) 4344 if (crtc->config.has_pch_encoder)
@@ -4237,24 +4351,16 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4237{ 4351{
4238 struct drm_device *dev = crtc->base.dev; 4352 struct drm_device *dev = crtc->base.dev;
4239 struct drm_i915_private *dev_priv = dev->dev_private; 4353 struct drm_i915_private *dev_priv = dev->dev_private;
4354 struct intel_encoder *encoder;
4240 int pipe = crtc->pipe; 4355 int pipe = crtc->pipe;
4241 u32 dpll, mdiv, pdiv; 4356 u32 dpll, mdiv;
4242 u32 bestn, bestm1, bestm2, bestp1, bestp2; 4357 u32 bestn, bestm1, bestm2, bestp1, bestp2;
4243 bool is_sdvo; 4358 bool is_hdmi;
4244 u32 temp; 4359 u32 coreclk, reg_val, dpll_md;
4245 4360
4246 mutex_lock(&dev_priv->dpio_lock); 4361 mutex_lock(&dev_priv->dpio_lock);
4247 4362
4248 is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) || 4363 is_hdmi = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
4249 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
4250
4251 dpll = DPLL_VGA_MODE_DIS;
4252 dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
4253 dpll |= DPLL_REFA_CLK_ENABLE_VLV;
4254 dpll |= DPLL_INTEGRATED_CLOCK_VLV;
4255
4256 I915_WRITE(DPLL(pipe), dpll);
4257 POSTING_READ(DPLL(pipe));
4258 4364
4259 bestn = crtc->config.dpll.n; 4365 bestn = crtc->config.dpll.n;
4260 bestm1 = crtc->config.dpll.m1; 4366 bestm1 = crtc->config.dpll.m1;
@@ -4262,72 +4368,104 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4262 bestp1 = crtc->config.dpll.p1; 4368 bestp1 = crtc->config.dpll.p1;
4263 bestp2 = crtc->config.dpll.p2; 4369 bestp2 = crtc->config.dpll.p2;
4264 4370
4265 /* 4371 /* See eDP HDMI DPIO driver vbios notes doc */
4266 * In Valleyview PLL and program lane counter registers are exposed 4372
4267 * through DPIO interface 4373 /* PLL B needs special handling */
4268 */ 4374 if (pipe)
4375 vlv_pllb_recal_opamp(dev_priv);
4376
4377 /* Set up Tx target for periodic Rcomp update */
4378 vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f);
4379
4380 /* Disable target IRef on PLL */
4381 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe));
4382 reg_val &= 0x00ffffff;
4383 vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val);
4384
4385 /* Disable fast lock */
4386 vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610);
4387
4388 /* Set idtafcrecal before PLL is enabled */
4269 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 4389 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4270 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 4390 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
4271 mdiv |= ((bestn << DPIO_N_SHIFT)); 4391 mdiv |= ((bestn << DPIO_N_SHIFT));
4272 mdiv |= (1 << DPIO_POST_DIV_SHIFT);
4273 mdiv |= (1 << DPIO_K_SHIFT); 4392 mdiv |= (1 << DPIO_K_SHIFT);
4393
4394 /*
4395 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
4396 * but we don't support that).
4397 * Note: don't use the DAC post divider as it seems unstable.
4398 */
4399 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4400 vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
4401
4274 mdiv |= DPIO_ENABLE_CALIBRATION; 4402 mdiv |= DPIO_ENABLE_CALIBRATION;
4275 intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); 4403 vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
4404
4405 /* Set HBR and RBR LPF coefficients */
4406 if (crtc->config.port_clock == 162000 ||
4407 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
4408 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4409 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
4410 0x005f0021);
4411 else
4412 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
4413 0x00d0000f);
4414
4415 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
4416 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
4417 /* Use SSC source */
4418 if (!pipe)
4419 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4420 0x0df40000);
4421 else
4422 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4423 0x0df70000);
4424 } else { /* HDMI or VGA */
4425 /* Use bend source */
4426 if (!pipe)
4427 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4428 0x0df70000);
4429 else
4430 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4431 0x0df40000);
4432 }
4276 4433
4277 intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); 4434 coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe));
4435 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
4436 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
4437 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
4438 coreclk |= 0x01000000;
4439 vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk);
4278 4440
4279 pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) | 4441 vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
4280 (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
4281 (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
4282 (5 << DPIO_CLK_BIAS_CTL_SHIFT);
4283 intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
4284 4442
4285 intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b); 4443 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4444 if (encoder->pre_pll_enable)
4445 encoder->pre_pll_enable(encoder);
4446
4447 /* Enable DPIO clock input */
4448 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
4449 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
4450 if (pipe)
4451 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
4286 4452
4287 dpll |= DPLL_VCO_ENABLE; 4453 dpll |= DPLL_VCO_ENABLE;
4288 I915_WRITE(DPLL(pipe), dpll); 4454 I915_WRITE(DPLL(pipe), dpll);
4289 POSTING_READ(DPLL(pipe)); 4455 POSTING_READ(DPLL(pipe));
4456 udelay(150);
4457
4290 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 4458 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
4291 DRM_ERROR("DPLL %d failed to lock\n", pipe); 4459 DRM_ERROR("DPLL %d failed to lock\n", pipe);
4292 4460
4293 intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620); 4461 dpll_md = (crtc->config.pixel_multiplier - 1)
4462 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4463 I915_WRITE(DPLL_MD(pipe), dpll_md);
4464 POSTING_READ(DPLL_MD(pipe));
4294 4465
4295 if (crtc->config.has_dp_encoder) 4466 if (crtc->config.has_dp_encoder)
4296 intel_dp_set_m_n(crtc); 4467 intel_dp_set_m_n(crtc);
4297 4468
4298 I915_WRITE(DPLL(pipe), dpll);
4299
4300 /* Wait for the clocks to stabilize. */
4301 POSTING_READ(DPLL(pipe));
4302 udelay(150);
4303
4304 temp = 0;
4305 if (is_sdvo) {
4306 temp = 0;
4307 if (crtc->config.pixel_multiplier > 1) {
4308 temp = (crtc->config.pixel_multiplier - 1)
4309 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4310 }
4311 }
4312 I915_WRITE(DPLL_MD(pipe), temp);
4313 POSTING_READ(DPLL_MD(pipe));
4314
4315 /* Now program lane control registers */
4316 if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)
4317 || intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
4318 temp = 0x1000C4;
4319 if(pipe == 1)
4320 temp |= (1 << 21);
4321 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
4322 }
4323
4324 if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) {
4325 temp = 0x1000C4;
4326 if(pipe == 1)
4327 temp |= (1 << 21);
4328 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
4329 }
4330
4331 mutex_unlock(&dev_priv->dpio_lock); 4469 mutex_unlock(&dev_priv->dpio_lock);
4332} 4470}
4333 4471
@@ -4355,14 +4493,14 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
4355 else 4493 else
4356 dpll |= DPLLB_MODE_DAC_SERIAL; 4494 dpll |= DPLLB_MODE_DAC_SERIAL;
4357 4495
4358 if (is_sdvo) { 4496 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
4359 if ((crtc->config.pixel_multiplier > 1) && 4497 dpll |= (crtc->config.pixel_multiplier - 1)
4360 (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))) { 4498 << SDVO_MULTIPLIER_SHIFT_HIRES;
4361 dpll |= (crtc->config.pixel_multiplier - 1)
4362 << SDVO_MULTIPLIER_SHIFT_HIRES;
4363 }
4364 dpll |= DPLL_DVO_HIGH_SPEED;
4365 } 4499 }
4500
4501 if (is_sdvo)
4502 dpll |= DPLL_DVO_HIGH_SPEED;
4503
4366 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) 4504 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
4367 dpll |= DPLL_DVO_HIGH_SPEED; 4505 dpll |= DPLL_DVO_HIGH_SPEED;
4368 4506
@@ -4391,12 +4529,8 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
4391 if (INTEL_INFO(dev)->gen >= 4) 4529 if (INTEL_INFO(dev)->gen >= 4)
4392 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 4530 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4393 4531
4394 if (is_sdvo && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT)) 4532 if (crtc->config.sdvo_tv_clock)
4395 dpll |= PLL_REF_INPUT_TVCLKINBC; 4533 dpll |= PLL_REF_INPUT_TVCLKINBC;
4396 else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
4397 /* XXX: just matching BIOS for now */
4398 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4399 dpll |= 3;
4400 else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 4534 else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4401 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4535 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4402 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4536 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
@@ -4422,15 +4556,9 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
4422 udelay(150); 4556 udelay(150);
4423 4557
4424 if (INTEL_INFO(dev)->gen >= 4) { 4558 if (INTEL_INFO(dev)->gen >= 4) {
4425 u32 temp = 0; 4559 u32 dpll_md = (crtc->config.pixel_multiplier - 1)
4426 if (is_sdvo) { 4560 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4427 temp = 0; 4561 I915_WRITE(DPLL_MD(pipe), dpll_md);
4428 if (crtc->config.pixel_multiplier > 1) {
4429 temp = (crtc->config.pixel_multiplier - 1)
4430 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4431 }
4432 }
4433 I915_WRITE(DPLL_MD(pipe), temp);
4434 } else { 4562 } else {
4435 /* The pixel multiplier can only be updated once the 4563 /* The pixel multiplier can only be updated once the
4436 * DPLL is enabled and the clocks are stable. 4564 * DPLL is enabled and the clocks are stable.
@@ -4442,7 +4570,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
4442} 4570}
4443 4571
4444static void i8xx_update_pll(struct intel_crtc *crtc, 4572static void i8xx_update_pll(struct intel_crtc *crtc,
4445 struct drm_display_mode *adjusted_mode,
4446 intel_clock_t *reduced_clock, 4573 intel_clock_t *reduced_clock,
4447 int num_connectors) 4574 int num_connectors)
4448{ 4575{
@@ -4497,20 +4624,26 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
4497 I915_WRITE(DPLL(pipe), dpll); 4624 I915_WRITE(DPLL(pipe), dpll);
4498} 4625}
4499 4626
4500static void intel_set_pipe_timings(struct intel_crtc *intel_crtc, 4627static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
4501 struct drm_display_mode *mode,
4502 struct drm_display_mode *adjusted_mode)
4503{ 4628{
4504 struct drm_device *dev = intel_crtc->base.dev; 4629 struct drm_device *dev = intel_crtc->base.dev;
4505 struct drm_i915_private *dev_priv = dev->dev_private; 4630 struct drm_i915_private *dev_priv = dev->dev_private;
4506 enum pipe pipe = intel_crtc->pipe; 4631 enum pipe pipe = intel_crtc->pipe;
4507 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 4632 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4508 uint32_t vsyncshift; 4633 struct drm_display_mode *adjusted_mode =
4634 &intel_crtc->config.adjusted_mode;
4635 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
4636 uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
4637
4638 /* We need to be careful not to changed the adjusted mode, for otherwise
4639 * the hw state checker will get angry at the mismatch. */
4640 crtc_vtotal = adjusted_mode->crtc_vtotal;
4641 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
4509 4642
4510 if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4643 if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4511 /* the chip adds 2 halflines automatically */ 4644 /* the chip adds 2 halflines automatically */
4512 adjusted_mode->crtc_vtotal -= 1; 4645 crtc_vtotal -= 1;
4513 adjusted_mode->crtc_vblank_end -= 1; 4646 crtc_vblank_end -= 1;
4514 vsyncshift = adjusted_mode->crtc_hsync_start 4647 vsyncshift = adjusted_mode->crtc_hsync_start
4515 - adjusted_mode->crtc_htotal / 2; 4648 - adjusted_mode->crtc_htotal / 2;
4516 } else { 4649 } else {
@@ -4532,10 +4665,10 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
4532 4665
4533 I915_WRITE(VTOTAL(cpu_transcoder), 4666 I915_WRITE(VTOTAL(cpu_transcoder),
4534 (adjusted_mode->crtc_vdisplay - 1) | 4667 (adjusted_mode->crtc_vdisplay - 1) |
4535 ((adjusted_mode->crtc_vtotal - 1) << 16)); 4668 ((crtc_vtotal - 1) << 16));
4536 I915_WRITE(VBLANK(cpu_transcoder), 4669 I915_WRITE(VBLANK(cpu_transcoder),
4537 (adjusted_mode->crtc_vblank_start - 1) | 4670 (adjusted_mode->crtc_vblank_start - 1) |
4538 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 4671 ((crtc_vblank_end - 1) << 16));
4539 I915_WRITE(VSYNC(cpu_transcoder), 4672 I915_WRITE(VSYNC(cpu_transcoder),
4540 (adjusted_mode->crtc_vsync_start - 1) | 4673 (adjusted_mode->crtc_vsync_start - 1) |
4541 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 4674 ((adjusted_mode->crtc_vsync_end - 1) << 16));
@@ -4555,13 +4688,52 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
4555 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 4688 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4556} 4689}
4557 4690
4691static void intel_get_pipe_timings(struct intel_crtc *crtc,
4692 struct intel_crtc_config *pipe_config)
4693{
4694 struct drm_device *dev = crtc->base.dev;
4695 struct drm_i915_private *dev_priv = dev->dev_private;
4696 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4697 uint32_t tmp;
4698
4699 tmp = I915_READ(HTOTAL(cpu_transcoder));
4700 pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4701 pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4702 tmp = I915_READ(HBLANK(cpu_transcoder));
4703 pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
4704 pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
4705 tmp = I915_READ(HSYNC(cpu_transcoder));
4706 pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4707 pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4708
4709 tmp = I915_READ(VTOTAL(cpu_transcoder));
4710 pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4711 pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4712 tmp = I915_READ(VBLANK(cpu_transcoder));
4713 pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
4714 pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
4715 tmp = I915_READ(VSYNC(cpu_transcoder));
4716 pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4717 pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4718
4719 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
4720 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4721 pipe_config->adjusted_mode.crtc_vtotal += 1;
4722 pipe_config->adjusted_mode.crtc_vblank_end += 1;
4723 }
4724
4725 tmp = I915_READ(PIPESRC(crtc->pipe));
4726 pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1;
4727 pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
4728}
4729
4558static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 4730static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4559{ 4731{
4560 struct drm_device *dev = intel_crtc->base.dev; 4732 struct drm_device *dev = intel_crtc->base.dev;
4561 struct drm_i915_private *dev_priv = dev->dev_private; 4733 struct drm_i915_private *dev_priv = dev->dev_private;
4562 uint32_t pipeconf; 4734 uint32_t pipeconf;
4563 4735
4564 pipeconf = I915_READ(PIPECONF(intel_crtc->pipe)); 4736 pipeconf = 0;
4565 4737
4566 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { 4738 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4567 /* Enable pixel doubling when the dot clock is > 90% of the (display) 4739 /* Enable pixel doubling when the dot clock is > 90% of the (display)
@@ -4573,26 +4745,28 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4573 if (intel_crtc->config.requested_mode.clock > 4745 if (intel_crtc->config.requested_mode.clock >
4574 dev_priv->display.get_display_clock_speed(dev) * 9 / 10) 4746 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4575 pipeconf |= PIPECONF_DOUBLE_WIDE; 4747 pipeconf |= PIPECONF_DOUBLE_WIDE;
4576 else
4577 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4578 } 4748 }
4579 4749
4580 /* default to 8bpc */ 4750 /* only g4x and later have fancy bpc/dither controls */
4581 pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN); 4751 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
4582 if (intel_crtc->config.has_dp_encoder) { 4752 /* Bspec claims that we can't use dithering for 30bpp pipes. */
4583 if (intel_crtc->config.dither) { 4753 if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
4584 pipeconf |= PIPECONF_6BPC | 4754 pipeconf |= PIPECONF_DITHER_EN |
4585 PIPECONF_DITHER_EN |
4586 PIPECONF_DITHER_TYPE_SP; 4755 PIPECONF_DITHER_TYPE_SP;
4587 }
4588 }
4589 4756
4590 if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(&intel_crtc->base, 4757 switch (intel_crtc->config.pipe_bpp) {
4591 INTEL_OUTPUT_EDP)) { 4758 case 18:
4592 if (intel_crtc->config.dither) { 4759 pipeconf |= PIPECONF_6BPC;
4593 pipeconf |= PIPECONF_6BPC | 4760 break;
4594 PIPECONF_ENABLE | 4761 case 24:
4595 I965_PIPECONF_ACTIVE; 4762 pipeconf |= PIPECONF_8BPC;
4763 break;
4764 case 30:
4765 pipeconf |= PIPECONF_10BPC;
4766 break;
4767 default:
4768 /* Case prevented by intel_choose_pipe_bpp_dither. */
4769 BUG();
4596 } 4770 }
4597 } 4771 }
4598 4772
@@ -4602,23 +4776,17 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4602 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 4776 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4603 } else { 4777 } else {
4604 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 4778 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4605 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4606 } 4779 }
4607 } 4780 }
4608 4781
4609 pipeconf &= ~PIPECONF_INTERLACE_MASK;
4610 if (!IS_GEN2(dev) && 4782 if (!IS_GEN2(dev) &&
4611 intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 4783 intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
4612 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 4784 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4613 else 4785 else
4614 pipeconf |= PIPECONF_PROGRESSIVE; 4786 pipeconf |= PIPECONF_PROGRESSIVE;
4615 4787
4616 if (IS_VALLEYVIEW(dev)) { 4788 if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
4617 if (intel_crtc->config.limited_color_range) 4789 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4618 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4619 else
4620 pipeconf &= ~PIPECONF_COLOR_RANGE_SELECT;
4621 }
4622 4790
4623 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 4791 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
4624 POSTING_READ(PIPECONF(intel_crtc->pipe)); 4792 POSTING_READ(PIPECONF(intel_crtc->pipe));
@@ -4631,16 +4799,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4631 struct drm_device *dev = crtc->dev; 4799 struct drm_device *dev = crtc->dev;
4632 struct drm_i915_private *dev_priv = dev->dev_private; 4800 struct drm_i915_private *dev_priv = dev->dev_private;
4633 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4801 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4634 struct drm_display_mode *adjusted_mode =
4635 &intel_crtc->config.adjusted_mode;
4636 struct drm_display_mode *mode = &intel_crtc->config.requested_mode; 4802 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
4637 int pipe = intel_crtc->pipe; 4803 int pipe = intel_crtc->pipe;
4638 int plane = intel_crtc->plane; 4804 int plane = intel_crtc->plane;
4639 int refclk, num_connectors = 0; 4805 int refclk, num_connectors = 0;
4640 intel_clock_t clock, reduced_clock; 4806 intel_clock_t clock, reduced_clock;
4641 u32 dspcntr; 4807 u32 dspcntr;
4642 bool ok, has_reduced_clock = false, is_sdvo = false; 4808 bool ok, has_reduced_clock = false;
4643 bool is_lvds = false, is_tv = false; 4809 bool is_lvds = false;
4644 struct intel_encoder *encoder; 4810 struct intel_encoder *encoder;
4645 const intel_limit_t *limit; 4811 const intel_limit_t *limit;
4646 int ret; 4812 int ret;
@@ -4650,15 +4816,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4650 case INTEL_OUTPUT_LVDS: 4816 case INTEL_OUTPUT_LVDS:
4651 is_lvds = true; 4817 is_lvds = true;
4652 break; 4818 break;
4653 case INTEL_OUTPUT_SDVO:
4654 case INTEL_OUTPUT_HDMI:
4655 is_sdvo = true;
4656 if (encoder->needs_tv_clock)
4657 is_tv = true;
4658 break;
4659 case INTEL_OUTPUT_TVOUT:
4660 is_tv = true;
4661 break;
4662 } 4819 }
4663 4820
4664 num_connectors++; 4821 num_connectors++;
@@ -4672,9 +4829,10 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4672 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 4829 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4673 */ 4830 */
4674 limit = intel_limit(crtc, refclk); 4831 limit = intel_limit(crtc, refclk);
4675 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, 4832 ok = dev_priv->display.find_dpll(limit, crtc,
4676 &clock); 4833 intel_crtc->config.port_clock,
4677 if (!ok) { 4834 refclk, NULL, &clock);
4835 if (!ok && !intel_crtc->config.clock_set) {
4678 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 4836 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4679 return -EINVAL; 4837 return -EINVAL;
4680 } 4838 }
@@ -4689,10 +4847,10 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4689 * by using the FP0/FP1. In such case we will disable the LVDS 4847 * by using the FP0/FP1. In such case we will disable the LVDS
4690 * downclock feature. 4848 * downclock feature.
4691 */ 4849 */
4692 has_reduced_clock = limit->find_pll(limit, crtc, 4850 has_reduced_clock =
4851 dev_priv->display.find_dpll(limit, crtc,
4693 dev_priv->lvds_downclock, 4852 dev_priv->lvds_downclock,
4694 refclk, 4853 refclk, &clock,
4695 &clock,
4696 &reduced_clock); 4854 &reduced_clock);
4697 } 4855 }
4698 /* Compat-code for transition, will disappear. */ 4856 /* Compat-code for transition, will disappear. */
@@ -4704,11 +4862,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4704 intel_crtc->config.dpll.p2 = clock.p2; 4862 intel_crtc->config.dpll.p2 = clock.p2;
4705 } 4863 }
4706 4864
4707 if (is_sdvo && is_tv)
4708 i9xx_adjust_sdvo_tv_clock(intel_crtc);
4709
4710 if (IS_GEN2(dev)) 4865 if (IS_GEN2(dev))
4711 i8xx_update_pll(intel_crtc, adjusted_mode, 4866 i8xx_update_pll(intel_crtc,
4712 has_reduced_clock ? &reduced_clock : NULL, 4867 has_reduced_clock ? &reduced_clock : NULL,
4713 num_connectors); 4868 num_connectors);
4714 else if (IS_VALLEYVIEW(dev)) 4869 else if (IS_VALLEYVIEW(dev))
@@ -4716,7 +4871,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4716 else 4871 else
4717 i9xx_update_pll(intel_crtc, 4872 i9xx_update_pll(intel_crtc,
4718 has_reduced_clock ? &reduced_clock : NULL, 4873 has_reduced_clock ? &reduced_clock : NULL,
4719 num_connectors); 4874 num_connectors);
4720 4875
4721 /* Set up the display plane register */ 4876 /* Set up the display plane register */
4722 dspcntr = DISPPLANE_GAMMA_ENABLE; 4877 dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -4728,10 +4883,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4728 dspcntr |= DISPPLANE_SEL_PIPE_B; 4883 dspcntr |= DISPPLANE_SEL_PIPE_B;
4729 } 4884 }
4730 4885
4731 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 4886 intel_set_pipe_timings(intel_crtc);
4732 drm_mode_debug_printmodeline(mode);
4733
4734 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
4735 4887
4736 /* pipesrc and dspsize control the size that is scaled from, 4888 /* pipesrc and dspsize control the size that is scaled from,
4737 * which should always be the user's requested size. 4889 * which should always be the user's requested size.
@@ -4743,10 +4895,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4743 4895
4744 i9xx_set_pipeconf(intel_crtc); 4896 i9xx_set_pipeconf(intel_crtc);
4745 4897
4746 intel_enable_pipe(dev_priv, pipe, false);
4747
4748 intel_wait_for_vblank(dev, pipe);
4749
4750 I915_WRITE(DSPCNTR(plane), dspcntr); 4898 I915_WRITE(DSPCNTR(plane), dspcntr);
4751 POSTING_READ(DSPCNTR(plane)); 4899 POSTING_READ(DSPCNTR(plane));
4752 4900
@@ -4757,6 +4905,36 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4757 return ret; 4905 return ret;
4758} 4906}
4759 4907
4908static void i9xx_get_pfit_config(struct intel_crtc *crtc,
4909 struct intel_crtc_config *pipe_config)
4910{
4911 struct drm_device *dev = crtc->base.dev;
4912 struct drm_i915_private *dev_priv = dev->dev_private;
4913 uint32_t tmp;
4914
4915 tmp = I915_READ(PFIT_CONTROL);
4916
4917 if (INTEL_INFO(dev)->gen < 4) {
4918 if (crtc->pipe != PIPE_B)
4919 return;
4920
4921 /* gen2/3 store dither state in pfit control, needs to match */
4922 pipe_config->gmch_pfit.control = tmp & PANEL_8TO6_DITHER_ENABLE;
4923 } else {
4924 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
4925 return;
4926 }
4927
4928 if (!(tmp & PFIT_ENABLE))
4929 return;
4930
4931 pipe_config->gmch_pfit.control = I915_READ(PFIT_CONTROL);
4932 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
4933 if (INTEL_INFO(dev)->gen < 5)
4934 pipe_config->gmch_pfit.lvds_border_bits =
4935 I915_READ(LVDS) & LVDS_BORDER_ENABLE;
4936}
4937
4760static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 4938static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4761 struct intel_crtc_config *pipe_config) 4939 struct intel_crtc_config *pipe_config)
4762{ 4940{
@@ -4764,10 +4942,34 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4764 struct drm_i915_private *dev_priv = dev->dev_private; 4942 struct drm_i915_private *dev_priv = dev->dev_private;
4765 uint32_t tmp; 4943 uint32_t tmp;
4766 4944
4945 pipe_config->cpu_transcoder = crtc->pipe;
4946 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
4947
4767 tmp = I915_READ(PIPECONF(crtc->pipe)); 4948 tmp = I915_READ(PIPECONF(crtc->pipe));
4768 if (!(tmp & PIPECONF_ENABLE)) 4949 if (!(tmp & PIPECONF_ENABLE))
4769 return false; 4950 return false;
4770 4951
4952 intel_get_pipe_timings(crtc, pipe_config);
4953
4954 i9xx_get_pfit_config(crtc, pipe_config);
4955
4956 if (INTEL_INFO(dev)->gen >= 4) {
4957 tmp = I915_READ(DPLL_MD(crtc->pipe));
4958 pipe_config->pixel_multiplier =
4959 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
4960 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
4961 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
4962 tmp = I915_READ(DPLL(crtc->pipe));
4963 pipe_config->pixel_multiplier =
4964 ((tmp & SDVO_MULTIPLIER_MASK)
4965 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
4966 } else {
4967 /* Note that on i915G/GM the pixel multiplier is in the sdvo
4968 * port and will be fixed up in the encoder->get_config
4969 * function. */
4970 pipe_config->pixel_multiplier = 1;
4971 }
4972
4771 return true; 4973 return true;
4772} 4974}
4773 4975
@@ -4779,7 +4981,6 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
4779 u32 val, final; 4981 u32 val, final;
4780 bool has_lvds = false; 4982 bool has_lvds = false;
4781 bool has_cpu_edp = false; 4983 bool has_cpu_edp = false;
4782 bool has_pch_edp = false;
4783 bool has_panel = false; 4984 bool has_panel = false;
4784 bool has_ck505 = false; 4985 bool has_ck505 = false;
4785 bool can_ssc = false; 4986 bool can_ssc = false;
@@ -4794,25 +4995,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
4794 break; 4995 break;
4795 case INTEL_OUTPUT_EDP: 4996 case INTEL_OUTPUT_EDP:
4796 has_panel = true; 4997 has_panel = true;
4797 if (intel_encoder_is_pch_edp(&encoder->base)) 4998 if (enc_to_dig_port(&encoder->base)->port == PORT_A)
4798 has_pch_edp = true;
4799 else
4800 has_cpu_edp = true; 4999 has_cpu_edp = true;
4801 break; 5000 break;
4802 } 5001 }
4803 } 5002 }
4804 5003
4805 if (HAS_PCH_IBX(dev)) { 5004 if (HAS_PCH_IBX(dev)) {
4806 has_ck505 = dev_priv->display_clock_mode; 5005 has_ck505 = dev_priv->vbt.display_clock_mode;
4807 can_ssc = has_ck505; 5006 can_ssc = has_ck505;
4808 } else { 5007 } else {
4809 has_ck505 = false; 5008 has_ck505 = false;
4810 can_ssc = true; 5009 can_ssc = true;
4811 } 5010 }
4812 5011
4813 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", 5012 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
4814 has_panel, has_lvds, has_pch_edp, has_cpu_edp, 5013 has_panel, has_lvds, has_ck505);
4815 has_ck505);
4816 5014
4817 /* Ironlake: try to setup display ref clock before DPLL 5015 /* Ironlake: try to setup display ref clock before DPLL
4818 * enabling. This is only under driver's control after 5016 * enabling. This is only under driver's control after
@@ -5102,7 +5300,6 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
5102 struct drm_device *dev = crtc->dev; 5300 struct drm_device *dev = crtc->dev;
5103 struct drm_i915_private *dev_priv = dev->dev_private; 5301 struct drm_i915_private *dev_priv = dev->dev_private;
5104 struct intel_encoder *encoder; 5302 struct intel_encoder *encoder;
5105 struct intel_encoder *edp_encoder = NULL;
5106 int num_connectors = 0; 5303 int num_connectors = 0;
5107 bool is_lvds = false; 5304 bool is_lvds = false;
5108 5305
@@ -5111,34 +5308,28 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
5111 case INTEL_OUTPUT_LVDS: 5308 case INTEL_OUTPUT_LVDS:
5112 is_lvds = true; 5309 is_lvds = true;
5113 break; 5310 break;
5114 case INTEL_OUTPUT_EDP:
5115 edp_encoder = encoder;
5116 break;
5117 } 5311 }
5118 num_connectors++; 5312 num_connectors++;
5119 } 5313 }
5120 5314
5121 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 5315 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5122 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 5316 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5123 dev_priv->lvds_ssc_freq); 5317 dev_priv->vbt.lvds_ssc_freq);
5124 return dev_priv->lvds_ssc_freq * 1000; 5318 return dev_priv->vbt.lvds_ssc_freq * 1000;
5125 } 5319 }
5126 5320
5127 return 120000; 5321 return 120000;
5128} 5322}
5129 5323
5130static void ironlake_set_pipeconf(struct drm_crtc *crtc, 5324static void ironlake_set_pipeconf(struct drm_crtc *crtc)
5131 struct drm_display_mode *adjusted_mode,
5132 bool dither)
5133{ 5325{
5134 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 5326 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5135 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5327 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5136 int pipe = intel_crtc->pipe; 5328 int pipe = intel_crtc->pipe;
5137 uint32_t val; 5329 uint32_t val;
5138 5330
5139 val = I915_READ(PIPECONF(pipe)); 5331 val = 0;
5140 5332
5141 val &= ~PIPECONF_BPC_MASK;
5142 switch (intel_crtc->config.pipe_bpp) { 5333 switch (intel_crtc->config.pipe_bpp) {
5143 case 18: 5334 case 18:
5144 val |= PIPECONF_6BPC; 5335 val |= PIPECONF_6BPC;
@@ -5157,20 +5348,16 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
5157 BUG(); 5348 BUG();
5158 } 5349 }
5159 5350
5160 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); 5351 if (intel_crtc->config.dither)
5161 if (dither)
5162 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 5352 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5163 5353
5164 val &= ~PIPECONF_INTERLACE_MASK; 5354 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5165 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
5166 val |= PIPECONF_INTERLACED_ILK; 5355 val |= PIPECONF_INTERLACED_ILK;
5167 else 5356 else
5168 val |= PIPECONF_PROGRESSIVE; 5357 val |= PIPECONF_PROGRESSIVE;
5169 5358
5170 if (intel_crtc->config.limited_color_range) 5359 if (intel_crtc->config.limited_color_range)
5171 val |= PIPECONF_COLOR_RANGE_SELECT; 5360 val |= PIPECONF_COLOR_RANGE_SELECT;
5172 else
5173 val &= ~PIPECONF_COLOR_RANGE_SELECT;
5174 5361
5175 I915_WRITE(PIPECONF(pipe), val); 5362 I915_WRITE(PIPECONF(pipe), val);
5176 POSTING_READ(PIPECONF(pipe)); 5363 POSTING_READ(PIPECONF(pipe));
@@ -5240,33 +5427,31 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
5240 } 5427 }
5241} 5428}
5242 5429
5243static void haswell_set_pipeconf(struct drm_crtc *crtc, 5430static void haswell_set_pipeconf(struct drm_crtc *crtc)
5244 struct drm_display_mode *adjusted_mode,
5245 bool dither)
5246{ 5431{
5247 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 5432 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5248 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5433 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5249 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 5434 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5250 uint32_t val; 5435 uint32_t val;
5251 5436
5252 val = I915_READ(PIPECONF(cpu_transcoder)); 5437 val = 0;
5253 5438
5254 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); 5439 if (intel_crtc->config.dither)
5255 if (dither)
5256 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 5440 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5257 5441
5258 val &= ~PIPECONF_INTERLACE_MASK_HSW; 5442 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5259 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
5260 val |= PIPECONF_INTERLACED_ILK; 5443 val |= PIPECONF_INTERLACED_ILK;
5261 else 5444 else
5262 val |= PIPECONF_PROGRESSIVE; 5445 val |= PIPECONF_PROGRESSIVE;
5263 5446
5264 I915_WRITE(PIPECONF(cpu_transcoder), val); 5447 I915_WRITE(PIPECONF(cpu_transcoder), val);
5265 POSTING_READ(PIPECONF(cpu_transcoder)); 5448 POSTING_READ(PIPECONF(cpu_transcoder));
5449
5450 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
5451 POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
5266} 5452}
5267 5453
5268static bool ironlake_compute_clocks(struct drm_crtc *crtc, 5454static bool ironlake_compute_clocks(struct drm_crtc *crtc,
5269 struct drm_display_mode *adjusted_mode,
5270 intel_clock_t *clock, 5455 intel_clock_t *clock,
5271 bool *has_reduced_clock, 5456 bool *has_reduced_clock,
5272 intel_clock_t *reduced_clock) 5457 intel_clock_t *reduced_clock)
@@ -5276,22 +5461,13 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
5276 struct intel_encoder *intel_encoder; 5461 struct intel_encoder *intel_encoder;
5277 int refclk; 5462 int refclk;
5278 const intel_limit_t *limit; 5463 const intel_limit_t *limit;
5279 bool ret, is_sdvo = false, is_tv = false, is_lvds = false; 5464 bool ret, is_lvds = false;
5280 5465
5281 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 5466 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5282 switch (intel_encoder->type) { 5467 switch (intel_encoder->type) {
5283 case INTEL_OUTPUT_LVDS: 5468 case INTEL_OUTPUT_LVDS:
5284 is_lvds = true; 5469 is_lvds = true;
5285 break; 5470 break;
5286 case INTEL_OUTPUT_SDVO:
5287 case INTEL_OUTPUT_HDMI:
5288 is_sdvo = true;
5289 if (intel_encoder->needs_tv_clock)
5290 is_tv = true;
5291 break;
5292 case INTEL_OUTPUT_TVOUT:
5293 is_tv = true;
5294 break;
5295 } 5471 }
5296 } 5472 }
5297 5473
@@ -5303,8 +5479,9 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
5303 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 5479 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5304 */ 5480 */
5305 limit = intel_limit(crtc, refclk); 5481 limit = intel_limit(crtc, refclk);
5306 ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, 5482 ret = dev_priv->display.find_dpll(limit, crtc,
5307 clock); 5483 to_intel_crtc(crtc)->config.port_clock,
5484 refclk, NULL, clock);
5308 if (!ret) 5485 if (!ret)
5309 return false; 5486 return false;
5310 5487
@@ -5315,16 +5492,13 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
5315 * by using the FP0/FP1. In such case we will disable the LVDS 5492 * by using the FP0/FP1. In such case we will disable the LVDS
5316 * downclock feature. 5493 * downclock feature.
5317 */ 5494 */
5318 *has_reduced_clock = limit->find_pll(limit, crtc, 5495 *has_reduced_clock =
5319 dev_priv->lvds_downclock, 5496 dev_priv->display.find_dpll(limit, crtc,
5320 refclk, 5497 dev_priv->lvds_downclock,
5321 clock, 5498 refclk, clock,
5322 reduced_clock); 5499 reduced_clock);
5323 } 5500 }
5324 5501
5325 if (is_sdvo && is_tv)
5326 i9xx_adjust_sdvo_tv_clock(to_intel_crtc(crtc));
5327
5328 return true; 5502 return true;
5329} 5503}
5330 5504
@@ -5346,65 +5520,25 @@ static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
5346 POSTING_READ(SOUTH_CHICKEN1); 5520 POSTING_READ(SOUTH_CHICKEN1);
5347} 5521}
5348 5522
5349static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc) 5523static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
5350{ 5524{
5351 struct drm_device *dev = intel_crtc->base.dev; 5525 struct drm_device *dev = intel_crtc->base.dev;
5352 struct drm_i915_private *dev_priv = dev->dev_private; 5526 struct drm_i915_private *dev_priv = dev->dev_private;
5353 struct intel_crtc *pipe_B_crtc =
5354 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5355
5356 DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
5357 intel_crtc->pipe, intel_crtc->fdi_lanes);
5358 if (intel_crtc->fdi_lanes > 4) {
5359 DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
5360 intel_crtc->pipe, intel_crtc->fdi_lanes);
5361 /* Clamp lanes to avoid programming the hw with bogus values. */
5362 intel_crtc->fdi_lanes = 4;
5363
5364 return false;
5365 }
5366
5367 if (INTEL_INFO(dev)->num_pipes == 2)
5368 return true;
5369 5527
5370 switch (intel_crtc->pipe) { 5528 switch (intel_crtc->pipe) {
5371 case PIPE_A: 5529 case PIPE_A:
5372 return true; 5530 break;
5373 case PIPE_B: 5531 case PIPE_B:
5374 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && 5532 if (intel_crtc->config.fdi_lanes > 2)
5375 intel_crtc->fdi_lanes > 2) {
5376 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5377 intel_crtc->pipe, intel_crtc->fdi_lanes);
5378 /* Clamp lanes to avoid programming the hw with bogus values. */
5379 intel_crtc->fdi_lanes = 2;
5380
5381 return false;
5382 }
5383
5384 if (intel_crtc->fdi_lanes > 2)
5385 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); 5533 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
5386 else 5534 else
5387 cpt_enable_fdi_bc_bifurcation(dev); 5535 cpt_enable_fdi_bc_bifurcation(dev);
5388 5536
5389 return true; 5537 break;
5390 case PIPE_C: 5538 case PIPE_C:
5391 if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
5392 if (intel_crtc->fdi_lanes > 2) {
5393 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5394 intel_crtc->pipe, intel_crtc->fdi_lanes);
5395 /* Clamp lanes to avoid programming the hw with bogus values. */
5396 intel_crtc->fdi_lanes = 2;
5397
5398 return false;
5399 }
5400 } else {
5401 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5402 return false;
5403 }
5404
5405 cpt_enable_fdi_bc_bifurcation(dev); 5539 cpt_enable_fdi_bc_bifurcation(dev);
5406 5540
5407 return true; 5541 break;
5408 default: 5542 default:
5409 BUG(); 5543 BUG();
5410 } 5544 }
@@ -5421,78 +5555,13 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
5421 return bps / (link_bw * 8) + 1; 5555 return bps / (link_bw * 8) + 1;
5422} 5556}
5423 5557
5424void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 5558static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
5425 struct intel_link_m_n *m_n)
5426{ 5559{
5427 struct drm_device *dev = crtc->base.dev; 5560 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
5428 struct drm_i915_private *dev_priv = dev->dev_private;
5429 int pipe = crtc->pipe;
5430
5431 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5432 I915_WRITE(TRANSDATA_N1(pipe), m_n->gmch_n);
5433 I915_WRITE(TRANSDPLINK_M1(pipe), m_n->link_m);
5434 I915_WRITE(TRANSDPLINK_N1(pipe), m_n->link_n);
5435}
5436
5437void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5438 struct intel_link_m_n *m_n)
5439{
5440 struct drm_device *dev = crtc->base.dev;
5441 struct drm_i915_private *dev_priv = dev->dev_private;
5442 int pipe = crtc->pipe;
5443 enum transcoder transcoder = crtc->config.cpu_transcoder;
5444
5445 if (INTEL_INFO(dev)->gen >= 5) {
5446 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5447 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5448 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5449 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5450 } else {
5451 I915_WRITE(PIPE_GMCH_DATA_M(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5452 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n->gmch_n);
5453 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n->link_m);
5454 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n->link_n);
5455 }
5456}
5457
5458static void ironlake_fdi_set_m_n(struct drm_crtc *crtc)
5459{
5460 struct drm_device *dev = crtc->dev;
5461 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5462 struct drm_display_mode *adjusted_mode =
5463 &intel_crtc->config.adjusted_mode;
5464 struct intel_link_m_n m_n = {0};
5465 int target_clock, lane, link_bw;
5466
5467 /* FDI is a binary signal running at ~2.7GHz, encoding
5468 * each output octet as 10 bits. The actual frequency
5469 * is stored as a divider into a 100MHz clock, and the
5470 * mode pixel clock is stored in units of 1KHz.
5471 * Hence the bw of each lane in terms of the mode signal
5472 * is:
5473 */
5474 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5475
5476 if (intel_crtc->config.pixel_target_clock)
5477 target_clock = intel_crtc->config.pixel_target_clock;
5478 else
5479 target_clock = adjusted_mode->clock;
5480
5481 lane = ironlake_get_lanes_required(target_clock, link_bw,
5482 intel_crtc->config.pipe_bpp);
5483
5484 intel_crtc->fdi_lanes = lane;
5485
5486 if (intel_crtc->config.pixel_multiplier > 1)
5487 link_bw *= intel_crtc->config.pixel_multiplier;
5488 intel_link_compute_m_n(intel_crtc->config.pipe_bpp, lane, target_clock,
5489 link_bw, &m_n);
5490
5491 intel_cpu_transcoder_set_m_n(intel_crtc, &m_n);
5492} 5561}
5493 5562
5494static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, 5563static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5495 intel_clock_t *clock, u32 *fp, 5564 u32 *fp,
5496 intel_clock_t *reduced_clock, u32 *fp2) 5565 intel_clock_t *reduced_clock, u32 *fp2)
5497{ 5566{
5498 struct drm_crtc *crtc = &intel_crtc->base; 5567 struct drm_crtc *crtc = &intel_crtc->base;
@@ -5501,7 +5570,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5501 struct intel_encoder *intel_encoder; 5570 struct intel_encoder *intel_encoder;
5502 uint32_t dpll; 5571 uint32_t dpll;
5503 int factor, num_connectors = 0; 5572 int factor, num_connectors = 0;
5504 bool is_lvds = false, is_sdvo = false, is_tv = false; 5573 bool is_lvds = false, is_sdvo = false;
5505 5574
5506 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 5575 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5507 switch (intel_encoder->type) { 5576 switch (intel_encoder->type) {
@@ -5511,11 +5580,6 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5511 case INTEL_OUTPUT_SDVO: 5580 case INTEL_OUTPUT_SDVO:
5512 case INTEL_OUTPUT_HDMI: 5581 case INTEL_OUTPUT_HDMI:
5513 is_sdvo = true; 5582 is_sdvo = true;
5514 if (intel_encoder->needs_tv_clock)
5515 is_tv = true;
5516 break;
5517 case INTEL_OUTPUT_TVOUT:
5518 is_tv = true;
5519 break; 5583 break;
5520 } 5584 }
5521 5585
@@ -5526,13 +5590,13 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5526 factor = 21; 5590 factor = 21;
5527 if (is_lvds) { 5591 if (is_lvds) {
5528 if ((intel_panel_use_ssc(dev_priv) && 5592 if ((intel_panel_use_ssc(dev_priv) &&
5529 dev_priv->lvds_ssc_freq == 100) || 5593 dev_priv->vbt.lvds_ssc_freq == 100) ||
5530 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 5594 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
5531 factor = 25; 5595 factor = 25;
5532 } else if (is_sdvo && is_tv) 5596 } else if (intel_crtc->config.sdvo_tv_clock)
5533 factor = 20; 5597 factor = 20;
5534 5598
5535 if (clock->m < factor * clock->n) 5599 if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
5536 *fp |= FP_CB_TUNE; 5600 *fp |= FP_CB_TUNE;
5537 5601
5538 if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) 5602 if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
@@ -5544,23 +5608,21 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5544 dpll |= DPLLB_MODE_LVDS; 5608 dpll |= DPLLB_MODE_LVDS;
5545 else 5609 else
5546 dpll |= DPLLB_MODE_DAC_SERIAL; 5610 dpll |= DPLLB_MODE_DAC_SERIAL;
5547 if (is_sdvo) { 5611
5548 if (intel_crtc->config.pixel_multiplier > 1) { 5612 dpll |= (intel_crtc->config.pixel_multiplier - 1)
5549 dpll |= (intel_crtc->config.pixel_multiplier - 1) 5613 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5550 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 5614
5551 } 5615 if (is_sdvo)
5552 dpll |= DPLL_DVO_HIGH_SPEED; 5616 dpll |= DPLL_DVO_HIGH_SPEED;
5553 } 5617 if (intel_crtc->config.has_dp_encoder)
5554 if (intel_crtc->config.has_dp_encoder &&
5555 intel_crtc->config.has_pch_encoder)
5556 dpll |= DPLL_DVO_HIGH_SPEED; 5618 dpll |= DPLL_DVO_HIGH_SPEED;
5557 5619
5558 /* compute bitmask from p1 value */ 5620 /* compute bitmask from p1 value */
5559 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5621 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5560 /* also FPA1 */ 5622 /* also FPA1 */
5561 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 5623 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5562 5624
5563 switch (clock->p2) { 5625 switch (intel_crtc->config.dpll.p2) {
5564 case 5: 5626 case 5:
5565 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 5627 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5566 break; 5628 break;
@@ -5575,18 +5637,12 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5575 break; 5637 break;
5576 } 5638 }
5577 5639
5578 if (is_sdvo && is_tv) 5640 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5579 dpll |= PLL_REF_INPUT_TVCLKINBC;
5580 else if (is_tv)
5581 /* XXX: just matching BIOS for now */
5582 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
5583 dpll |= 3;
5584 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5585 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 5641 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5586 else 5642 else
5587 dpll |= PLL_REF_INPUT_DREFCLK; 5643 dpll |= PLL_REF_INPUT_DREFCLK;
5588 5644
5589 return dpll; 5645 return dpll | DPLL_VCO_ENABLE;
5590} 5646}
5591 5647
5592static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 5648static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
@@ -5596,19 +5652,16 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5596 struct drm_device *dev = crtc->dev; 5652 struct drm_device *dev = crtc->dev;
5597 struct drm_i915_private *dev_priv = dev->dev_private; 5653 struct drm_i915_private *dev_priv = dev->dev_private;
5598 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5654 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5599 struct drm_display_mode *adjusted_mode =
5600 &intel_crtc->config.adjusted_mode;
5601 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
5602 int pipe = intel_crtc->pipe; 5655 int pipe = intel_crtc->pipe;
5603 int plane = intel_crtc->plane; 5656 int plane = intel_crtc->plane;
5604 int num_connectors = 0; 5657 int num_connectors = 0;
5605 intel_clock_t clock, reduced_clock; 5658 intel_clock_t clock, reduced_clock;
5606 u32 dpll, fp = 0, fp2 = 0; 5659 u32 dpll = 0, fp = 0, fp2 = 0;
5607 bool ok, has_reduced_clock = false; 5660 bool ok, has_reduced_clock = false;
5608 bool is_lvds = false; 5661 bool is_lvds = false;
5609 struct intel_encoder *encoder; 5662 struct intel_encoder *encoder;
5663 struct intel_shared_dpll *pll;
5610 int ret; 5664 int ret;
5611 bool dither, fdi_config_ok;
5612 5665
5613 for_each_encoder_on_crtc(dev, crtc, encoder) { 5666 for_each_encoder_on_crtc(dev, crtc, encoder) {
5614 switch (encoder->type) { 5667 switch (encoder->type) {
@@ -5623,11 +5676,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5623 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 5676 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
5624 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 5677 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
5625 5678
5626 intel_crtc->config.cpu_transcoder = pipe; 5679 ok = ironlake_compute_clocks(crtc, &clock,
5627
5628 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5629 &has_reduced_clock, &reduced_clock); 5680 &has_reduced_clock, &reduced_clock);
5630 if (!ok) { 5681 if (!ok && !intel_crtc->config.clock_set) {
5631 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 5682 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5632 return -EINVAL; 5683 return -EINVAL;
5633 } 5684 }
@@ -5643,34 +5694,31 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5643 /* Ensure that the cursor is valid for the new mode before changing... */ 5694 /* Ensure that the cursor is valid for the new mode before changing... */
5644 intel_crtc_update_cursor(crtc, true); 5695 intel_crtc_update_cursor(crtc, true);
5645 5696
5646 /* determine panel color depth */
5647 dither = intel_crtc->config.dither;
5648 if (is_lvds && dev_priv->lvds_dither)
5649 dither = true;
5650
5651 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5652 if (has_reduced_clock)
5653 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5654 reduced_clock.m2;
5655
5656 dpll = ironlake_compute_dpll(intel_crtc, &clock, &fp, &reduced_clock,
5657 has_reduced_clock ? &fp2 : NULL);
5658
5659 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5660 drm_mode_debug_printmodeline(mode);
5661
5662 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 5697 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5663 if (intel_crtc->config.has_pch_encoder) { 5698 if (intel_crtc->config.has_pch_encoder) {
5664 struct intel_pch_pll *pll; 5699 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
5700 if (has_reduced_clock)
5701 fp2 = i9xx_dpll_compute_fp(&reduced_clock);
5702
5703 dpll = ironlake_compute_dpll(intel_crtc,
5704 &fp, &reduced_clock,
5705 has_reduced_clock ? &fp2 : NULL);
5706
5707 intel_crtc->config.dpll_hw_state.dpll = dpll;
5708 intel_crtc->config.dpll_hw_state.fp0 = fp;
5709 if (has_reduced_clock)
5710 intel_crtc->config.dpll_hw_state.fp1 = fp2;
5711 else
5712 intel_crtc->config.dpll_hw_state.fp1 = fp;
5665 5713
5666 pll = intel_get_pch_pll(intel_crtc, dpll, fp); 5714 pll = intel_get_shared_dpll(intel_crtc, dpll, fp);
5667 if (pll == NULL) { 5715 if (pll == NULL) {
5668 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n", 5716 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
5669 pipe); 5717 pipe_name(pipe));
5670 return -EINVAL; 5718 return -EINVAL;
5671 } 5719 }
5672 } else 5720 } else
5673 intel_put_pch_pll(intel_crtc); 5721 intel_put_shared_dpll(intel_crtc);
5674 5722
5675 if (intel_crtc->config.has_dp_encoder) 5723 if (intel_crtc->config.has_dp_encoder)
5676 intel_dp_set_m_n(intel_crtc); 5724 intel_dp_set_m_n(intel_crtc);
@@ -5679,11 +5727,18 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5679 if (encoder->pre_pll_enable) 5727 if (encoder->pre_pll_enable)
5680 encoder->pre_pll_enable(encoder); 5728 encoder->pre_pll_enable(encoder);
5681 5729
5682 if (intel_crtc->pch_pll) { 5730 if (is_lvds && has_reduced_clock && i915_powersave)
5683 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); 5731 intel_crtc->lowfreq_avail = true;
5732 else
5733 intel_crtc->lowfreq_avail = false;
5734
5735 if (intel_crtc->config.has_pch_encoder) {
5736 pll = intel_crtc_to_shared_dpll(intel_crtc);
5737
5738 I915_WRITE(PCH_DPLL(pll->id), dpll);
5684 5739
5685 /* Wait for the clocks to stabilize. */ 5740 /* Wait for the clocks to stabilize. */
5686 POSTING_READ(intel_crtc->pch_pll->pll_reg); 5741 POSTING_READ(PCH_DPLL(pll->id));
5687 udelay(150); 5742 udelay(150);
5688 5743
5689 /* The pixel multiplier can only be updated once the 5744 /* The pixel multiplier can only be updated once the
@@ -5691,32 +5746,25 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5691 * 5746 *
5692 * So write it again. 5747 * So write it again.
5693 */ 5748 */
5694 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); 5749 I915_WRITE(PCH_DPLL(pll->id), dpll);
5695 }
5696 5750
5697 intel_crtc->lowfreq_avail = false; 5751 if (has_reduced_clock)
5698 if (intel_crtc->pch_pll) { 5752 I915_WRITE(PCH_FP1(pll->id), fp2);
5699 if (is_lvds && has_reduced_clock && i915_powersave) { 5753 else
5700 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); 5754 I915_WRITE(PCH_FP1(pll->id), fp);
5701 intel_crtc->lowfreq_avail = true;
5702 } else {
5703 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
5704 }
5705 } 5755 }
5706 5756
5707 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); 5757 intel_set_pipe_timings(intel_crtc);
5708 5758
5709 /* Note, this also computes intel_crtc->fdi_lanes which is used below in 5759 if (intel_crtc->config.has_pch_encoder) {
5710 * ironlake_check_fdi_lanes. */ 5760 intel_cpu_transcoder_set_m_n(intel_crtc,
5711 intel_crtc->fdi_lanes = 0; 5761 &intel_crtc->config.fdi_m_n);
5712 if (intel_crtc->config.has_pch_encoder) 5762 }
5713 ironlake_fdi_set_m_n(crtc);
5714
5715 fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
5716 5763
5717 ironlake_set_pipeconf(crtc, adjusted_mode, dither); 5764 if (IS_IVYBRIDGE(dev))
5765 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
5718 5766
5719 intel_wait_for_vblank(dev, pipe); 5767 ironlake_set_pipeconf(crtc);
5720 5768
5721 /* Set up the display plane register */ 5769 /* Set up the display plane register */
5722 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); 5770 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
@@ -5726,9 +5774,46 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5726 5774
5727 intel_update_watermarks(dev); 5775 intel_update_watermarks(dev);
5728 5776
5729 intel_update_linetime_watermarks(dev, pipe, adjusted_mode); 5777 return ret;
5778}
5730 5779
5731 return fdi_config_ok ? ret : -EINVAL; 5780static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
5781 struct intel_crtc_config *pipe_config)
5782{
5783 struct drm_device *dev = crtc->base.dev;
5784 struct drm_i915_private *dev_priv = dev->dev_private;
5785 enum transcoder transcoder = pipe_config->cpu_transcoder;
5786
5787 pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder));
5788 pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder));
5789 pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
5790 & ~TU_SIZE_MASK;
5791 pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
5792 pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder))
5793 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5794}
5795
5796static void ironlake_get_pfit_config(struct intel_crtc *crtc,
5797 struct intel_crtc_config *pipe_config)
5798{
5799 struct drm_device *dev = crtc->base.dev;
5800 struct drm_i915_private *dev_priv = dev->dev_private;
5801 uint32_t tmp;
5802
5803 tmp = I915_READ(PF_CTL(crtc->pipe));
5804
5805 if (tmp & PF_ENABLE) {
5806 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
5807 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
5808
5809 /* We currently do not free assignements of panel fitters on
5810 * ivb/hsw (since we don't use the higher upscaling modes which
5811 * differentiates them) so just WARN about this case for now. */
5812 if (IS_GEN7(dev)) {
5813 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
5814 PF_PIPE_SEL_IVB(crtc->pipe));
5815 }
5816 }
5732} 5817}
5733 5818
5734static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 5819static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
@@ -5738,42 +5823,67 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5738 struct drm_i915_private *dev_priv = dev->dev_private; 5823 struct drm_i915_private *dev_priv = dev->dev_private;
5739 uint32_t tmp; 5824 uint32_t tmp;
5740 5825
5826 pipe_config->cpu_transcoder = crtc->pipe;
5827 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5828
5741 tmp = I915_READ(PIPECONF(crtc->pipe)); 5829 tmp = I915_READ(PIPECONF(crtc->pipe));
5742 if (!(tmp & PIPECONF_ENABLE)) 5830 if (!(tmp & PIPECONF_ENABLE))
5743 return false; 5831 return false;
5744 5832
5745 if (I915_READ(TRANSCONF(crtc->pipe)) & TRANS_ENABLE) 5833 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
5834 struct intel_shared_dpll *pll;
5835
5746 pipe_config->has_pch_encoder = true; 5836 pipe_config->has_pch_encoder = true;
5747 5837
5838 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
5839 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5840 FDI_DP_PORT_WIDTH_SHIFT) + 1;
5841
5842 ironlake_get_fdi_m_n_config(crtc, pipe_config);
5843
5844 /* XXX: Can't properly read out the pch dpll pixel multiplier
5845 * since we don't have state tracking for pch clocks yet. */
5846 pipe_config->pixel_multiplier = 1;
5847
5848 if (HAS_PCH_IBX(dev_priv->dev)) {
5849 pipe_config->shared_dpll = crtc->pipe;
5850 } else {
5851 tmp = I915_READ(PCH_DPLL_SEL);
5852 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
5853 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
5854 else
5855 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
5856 }
5857
5858 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
5859
5860 WARN_ON(!pll->get_hw_state(dev_priv, pll,
5861 &pipe_config->dpll_hw_state));
5862 } else {
5863 pipe_config->pixel_multiplier = 1;
5864 }
5865
5866 intel_get_pipe_timings(crtc, pipe_config);
5867
5868 ironlake_get_pfit_config(crtc, pipe_config);
5869
5748 return true; 5870 return true;
5749} 5871}
5750 5872
5751static void haswell_modeset_global_resources(struct drm_device *dev) 5873static void haswell_modeset_global_resources(struct drm_device *dev)
5752{ 5874{
5753 struct drm_i915_private *dev_priv = dev->dev_private;
5754 bool enable = false; 5875 bool enable = false;
5755 struct intel_crtc *crtc; 5876 struct intel_crtc *crtc;
5756 struct intel_encoder *encoder;
5757 5877
5758 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 5878 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
5759 if (crtc->pipe != PIPE_A && crtc->base.enabled) 5879 if (!crtc->base.enabled)
5760 enable = true; 5880 continue;
5761 /* XXX: Should check for edp transcoder here, but thanks to init
5762 * sequence that's not yet available. Just in case desktop eDP
5763 * on PORT D is possible on haswell, too. */
5764 }
5765 5881
5766 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 5882 if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.size ||
5767 base.head) { 5883 crtc->config.cpu_transcoder != TRANSCODER_EDP)
5768 if (encoder->type != INTEL_OUTPUT_EDP &&
5769 encoder->connectors_active)
5770 enable = true; 5884 enable = true;
5771 } 5885 }
5772 5886
5773 /* Even the eDP panel fitter is outside the always-on well. */
5774 if (dev_priv->pch_pf_size)
5775 enable = true;
5776
5777 intel_set_power_well(dev, enable); 5887 intel_set_power_well(dev, enable);
5778} 5888}
5779 5889
@@ -5784,68 +5894,28 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5784 struct drm_device *dev = crtc->dev; 5894 struct drm_device *dev = crtc->dev;
5785 struct drm_i915_private *dev_priv = dev->dev_private; 5895 struct drm_i915_private *dev_priv = dev->dev_private;
5786 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5896 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5787 struct drm_display_mode *adjusted_mode =
5788 &intel_crtc->config.adjusted_mode;
5789 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
5790 int pipe = intel_crtc->pipe;
5791 int plane = intel_crtc->plane; 5897 int plane = intel_crtc->plane;
5792 int num_connectors = 0;
5793 bool is_cpu_edp = false;
5794 struct intel_encoder *encoder;
5795 int ret; 5898 int ret;
5796 bool dither;
5797
5798 for_each_encoder_on_crtc(dev, crtc, encoder) {
5799 switch (encoder->type) {
5800 case INTEL_OUTPUT_EDP:
5801 if (!intel_encoder_is_pch_edp(&encoder->base))
5802 is_cpu_edp = true;
5803 break;
5804 }
5805
5806 num_connectors++;
5807 }
5808
5809 if (is_cpu_edp)
5810 intel_crtc->config.cpu_transcoder = TRANSCODER_EDP;
5811 else
5812 intel_crtc->config.cpu_transcoder = pipe;
5813
5814 /* We are not sure yet this won't happen. */
5815 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
5816 INTEL_PCH_TYPE(dev));
5817 5899
5818 WARN(num_connectors != 1, "%d connectors attached to pipe %c\n", 5900 if (!intel_ddi_pll_mode_set(crtc))
5819 num_connectors, pipe_name(pipe));
5820
5821 WARN_ON(I915_READ(PIPECONF(intel_crtc->config.cpu_transcoder)) &
5822 (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
5823
5824 WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
5825
5826 if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
5827 return -EINVAL; 5901 return -EINVAL;
5828 5902
5829 /* Ensure that the cursor is valid for the new mode before changing... */ 5903 /* Ensure that the cursor is valid for the new mode before changing... */
5830 intel_crtc_update_cursor(crtc, true); 5904 intel_crtc_update_cursor(crtc, true);
5831 5905
5832 /* determine panel color depth */
5833 dither = intel_crtc->config.dither;
5834
5835 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5836 drm_mode_debug_printmodeline(mode);
5837
5838 if (intel_crtc->config.has_dp_encoder) 5906 if (intel_crtc->config.has_dp_encoder)
5839 intel_dp_set_m_n(intel_crtc); 5907 intel_dp_set_m_n(intel_crtc);
5840 5908
5841 intel_crtc->lowfreq_avail = false; 5909 intel_crtc->lowfreq_avail = false;
5842 5910
5843 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); 5911 intel_set_pipe_timings(intel_crtc);
5844 5912
5845 if (intel_crtc->config.has_pch_encoder) 5913 if (intel_crtc->config.has_pch_encoder) {
5846 ironlake_fdi_set_m_n(crtc); 5914 intel_cpu_transcoder_set_m_n(intel_crtc,
5915 &intel_crtc->config.fdi_m_n);
5916 }
5847 5917
5848 haswell_set_pipeconf(crtc, adjusted_mode, dither); 5918 haswell_set_pipeconf(crtc);
5849 5919
5850 intel_set_pipe_csc(crtc); 5920 intel_set_pipe_csc(crtc);
5851 5921
@@ -5857,8 +5927,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5857 5927
5858 intel_update_watermarks(dev); 5928 intel_update_watermarks(dev);
5859 5929
5860 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5861
5862 return ret; 5930 return ret;
5863} 5931}
5864 5932
@@ -5867,22 +5935,69 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
5867{ 5935{
5868 struct drm_device *dev = crtc->base.dev; 5936 struct drm_device *dev = crtc->base.dev;
5869 struct drm_i915_private *dev_priv = dev->dev_private; 5937 struct drm_i915_private *dev_priv = dev->dev_private;
5938 enum intel_display_power_domain pfit_domain;
5870 uint32_t tmp; 5939 uint32_t tmp;
5871 5940
5872 tmp = I915_READ(PIPECONF(crtc->config.cpu_transcoder)); 5941 pipe_config->cpu_transcoder = crtc->pipe;
5942 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5943
5944 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
5945 if (tmp & TRANS_DDI_FUNC_ENABLE) {
5946 enum pipe trans_edp_pipe;
5947 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
5948 default:
5949 WARN(1, "unknown pipe linked to edp transcoder\n");
5950 case TRANS_DDI_EDP_INPUT_A_ONOFF:
5951 case TRANS_DDI_EDP_INPUT_A_ON:
5952 trans_edp_pipe = PIPE_A;
5953 break;
5954 case TRANS_DDI_EDP_INPUT_B_ONOFF:
5955 trans_edp_pipe = PIPE_B;
5956 break;
5957 case TRANS_DDI_EDP_INPUT_C_ONOFF:
5958 trans_edp_pipe = PIPE_C;
5959 break;
5960 }
5961
5962 if (trans_edp_pipe == crtc->pipe)
5963 pipe_config->cpu_transcoder = TRANSCODER_EDP;
5964 }
5965
5966 if (!intel_display_power_enabled(dev,
5967 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
5968 return false;
5969
5970 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
5873 if (!(tmp & PIPECONF_ENABLE)) 5971 if (!(tmp & PIPECONF_ENABLE))
5874 return false; 5972 return false;
5875 5973
5876 /* 5974 /*
5877 * aswell has only FDI/PCH transcoder A. It is which is connected to 5975 * Haswell has only FDI/PCH transcoder A. It is which is connected to
5878 * DDI E. So just check whether this pipe is wired to DDI E and whether 5976 * DDI E. So just check whether this pipe is wired to DDI E and whether
5879 * the PCH transcoder is on. 5977 * the PCH transcoder is on.
5880 */ 5978 */
5881 tmp = I915_READ(TRANS_DDI_FUNC_CTL(crtc->pipe)); 5979 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
5882 if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) && 5980 if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
5883 I915_READ(TRANSCONF(PIPE_A)) & TRANS_ENABLE) 5981 I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
5884 pipe_config->has_pch_encoder = true; 5982 pipe_config->has_pch_encoder = true;
5885 5983
5984 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
5985 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5986 FDI_DP_PORT_WIDTH_SHIFT) + 1;
5987
5988 ironlake_get_fdi_m_n_config(crtc, pipe_config);
5989 }
5990
5991 intel_get_pipe_timings(crtc, pipe_config);
5992
5993 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
5994 if (intel_display_power_enabled(dev, pfit_domain))
5995 ironlake_get_pfit_config(crtc, pipe_config);
5996
5997 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
5998 (I915_READ(IPS_CTL) & IPS_ENABLE);
5999
6000 pipe_config->pixel_multiplier = 1;
5886 6001
5887 return true; 6002 return true;
5888} 6003}
@@ -6120,7 +6235,7 @@ static void ironlake_write_eld(struct drm_connector *connector,
6120 eldv |= IBX_ELD_VALIDB << 4; 6235 eldv |= IBX_ELD_VALIDB << 4;
6121 eldv |= IBX_ELD_VALIDB << 8; 6236 eldv |= IBX_ELD_VALIDB << 8;
6122 } else { 6237 } else {
6123 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i); 6238 DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
6124 eldv = IBX_ELD_VALIDB << ((i - 1) * 4); 6239 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6125 } 6240 }
6126 6241
@@ -6188,16 +6303,31 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
6188 struct drm_device *dev = crtc->dev; 6303 struct drm_device *dev = crtc->dev;
6189 struct drm_i915_private *dev_priv = dev->dev_private; 6304 struct drm_i915_private *dev_priv = dev->dev_private;
6190 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6305 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6191 int palreg = PALETTE(intel_crtc->pipe); 6306 enum pipe pipe = intel_crtc->pipe;
6307 int palreg = PALETTE(pipe);
6192 int i; 6308 int i;
6309 bool reenable_ips = false;
6193 6310
6194 /* The clocks have to be on to load the palette. */ 6311 /* The clocks have to be on to load the palette. */
6195 if (!crtc->enabled || !intel_crtc->active) 6312 if (!crtc->enabled || !intel_crtc->active)
6196 return; 6313 return;
6197 6314
6315 if (!HAS_PCH_SPLIT(dev_priv->dev))
6316 assert_pll_enabled(dev_priv, pipe);
6317
6198 /* use legacy palette for Ironlake */ 6318 /* use legacy palette for Ironlake */
6199 if (HAS_PCH_SPLIT(dev)) 6319 if (HAS_PCH_SPLIT(dev))
6200 palreg = LGC_PALETTE(intel_crtc->pipe); 6320 palreg = LGC_PALETTE(pipe);
6321
6322 /* Workaround : Do not read or write the pipe palette/gamma data while
6323 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6324 */
6325 if (intel_crtc->config.ips_enabled &&
6326 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
6327 GAMMA_MODE_MODE_SPLIT)) {
6328 hsw_disable_ips(intel_crtc);
6329 reenable_ips = true;
6330 }
6201 6331
6202 for (i = 0; i < 256; i++) { 6332 for (i = 0; i < 256; i++) {
6203 I915_WRITE(palreg + 4 * i, 6333 I915_WRITE(palreg + 4 * i,
@@ -6205,6 +6335,9 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
6205 (intel_crtc->lut_g[i] << 8) | 6335 (intel_crtc->lut_g[i] << 8) |
6206 intel_crtc->lut_b[i]); 6336 intel_crtc->lut_b[i]);
6207 } 6337 }
6338
6339 if (reenable_ips)
6340 hsw_enable_ips(intel_crtc);
6208} 6341}
6209 6342
6210static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 6343static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -6451,7 +6584,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6451 intel_crtc->cursor_width = width; 6584 intel_crtc->cursor_width = width;
6452 intel_crtc->cursor_height = height; 6585 intel_crtc->cursor_height = height;
6453 6586
6454 intel_crtc_update_cursor(crtc, true); 6587 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
6455 6588
6456 return 0; 6589 return 0;
6457fail_unpin: 6590fail_unpin:
@@ -6470,7 +6603,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6470 intel_crtc->cursor_x = x; 6603 intel_crtc->cursor_x = x;
6471 intel_crtc->cursor_y = y; 6604 intel_crtc->cursor_y = y;
6472 6605
6473 intel_crtc_update_cursor(crtc, true); 6606 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
6474 6607
6475 return 0; 6608 return 0;
6476} 6609}
@@ -6791,8 +6924,10 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6791 return 0; 6924 return 0;
6792 } 6925 }
6793 6926
6794 /* XXX: Handle the 100Mhz refclk */ 6927 if (IS_PINEVIEW(dev))
6795 intel_clock(dev, 96000, &clock); 6928 pineview_clock(96000, &clock);
6929 else
6930 i9xx_clock(96000, &clock);
6796 } else { 6931 } else {
6797 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 6932 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
6798 6933
@@ -6804,9 +6939,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6804 if ((dpll & PLL_REF_INPUT_MASK) == 6939 if ((dpll & PLL_REF_INPUT_MASK) ==
6805 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 6940 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6806 /* XXX: might not be 66MHz */ 6941 /* XXX: might not be 66MHz */
6807 intel_clock(dev, 66000, &clock); 6942 i9xx_clock(66000, &clock);
6808 } else 6943 } else
6809 intel_clock(dev, 48000, &clock); 6944 i9xx_clock(48000, &clock);
6810 } else { 6945 } else {
6811 if (dpll & PLL_P1_DIVIDE_BY_TWO) 6946 if (dpll & PLL_P1_DIVIDE_BY_TWO)
6812 clock.p1 = 2; 6947 clock.p1 = 2;
@@ -6819,7 +6954,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6819 else 6954 else
6820 clock.p2 = 2; 6955 clock.p2 = 2;
6821 6956
6822 intel_clock(dev, 48000, &clock); 6957 i9xx_clock(48000, &clock);
6823 } 6958 }
6824 } 6959 }
6825 6960
@@ -6950,7 +7085,8 @@ void intel_mark_idle(struct drm_device *dev)
6950 } 7085 }
6951} 7086}
6952 7087
6953void intel_mark_fb_busy(struct drm_i915_gem_object *obj) 7088void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
7089 struct intel_ring_buffer *ring)
6954{ 7090{
6955 struct drm_device *dev = obj->base.dev; 7091 struct drm_device *dev = obj->base.dev;
6956 struct drm_crtc *crtc; 7092 struct drm_crtc *crtc;
@@ -6962,8 +7098,12 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
6962 if (!crtc->fb) 7098 if (!crtc->fb)
6963 continue; 7099 continue;
6964 7100
6965 if (to_intel_framebuffer(crtc->fb)->obj == obj) 7101 if (to_intel_framebuffer(crtc->fb)->obj != obj)
6966 intel_increase_pllclock(crtc); 7102 continue;
7103
7104 intel_increase_pllclock(crtc);
7105 if (ring && intel_fbc_enabled(dev))
7106 ring->fbc_dirty = true;
6967 } 7107 }
6968} 7108}
6969 7109
@@ -6984,6 +7124,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
6984 kfree(work); 7124 kfree(work);
6985 } 7125 }
6986 7126
7127 intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
7128
6987 drm_crtc_cleanup(crtc); 7129 drm_crtc_cleanup(crtc);
6988 7130
6989 kfree(intel_crtc); 7131 kfree(intel_crtc);
@@ -7411,7 +7553,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7411 goto cleanup_pending; 7553 goto cleanup_pending;
7412 7554
7413 intel_disable_fbc(dev); 7555 intel_disable_fbc(dev);
7414 intel_mark_fb_busy(obj); 7556 intel_mark_fb_busy(obj, NULL);
7415 mutex_unlock(&dev->struct_mutex); 7557 mutex_unlock(&dev->struct_mutex);
7416 7558
7417 trace_i915_flip_request(intel_crtc->plane, obj); 7559 trace_i915_flip_request(intel_crtc->plane, obj);
@@ -7442,28 +7584,6 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
7442 .load_lut = intel_crtc_load_lut, 7584 .load_lut = intel_crtc_load_lut,
7443}; 7585};
7444 7586
7445bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
7446{
7447 struct intel_encoder *other_encoder;
7448 struct drm_crtc *crtc = &encoder->new_crtc->base;
7449
7450 if (WARN_ON(!crtc))
7451 return false;
7452
7453 list_for_each_entry(other_encoder,
7454 &crtc->dev->mode_config.encoder_list,
7455 base.head) {
7456
7457 if (&other_encoder->new_crtc->base != crtc ||
7458 encoder == other_encoder)
7459 continue;
7460 else
7461 return true;
7462 }
7463
7464 return false;
7465}
7466
7467static bool intel_encoder_crtc_ok(struct drm_encoder *encoder, 7587static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
7468 struct drm_crtc *crtc) 7588 struct drm_crtc *crtc)
7469{ 7589{
@@ -7531,13 +7651,39 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
7531 } 7651 }
7532} 7652}
7533 7653
7654static void
7655connected_sink_compute_bpp(struct intel_connector * connector,
7656 struct intel_crtc_config *pipe_config)
7657{
7658 int bpp = pipe_config->pipe_bpp;
7659
7660 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
7661 connector->base.base.id,
7662 drm_get_connector_name(&connector->base));
7663
7664 /* Don't use an invalid EDID bpc value */
7665 if (connector->base.display_info.bpc &&
7666 connector->base.display_info.bpc * 3 < bpp) {
7667 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
7668 bpp, connector->base.display_info.bpc*3);
7669 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
7670 }
7671
7672 /* Clamp bpp to 8 on screens without EDID 1.4 */
7673 if (connector->base.display_info.bpc == 0 && bpp > 24) {
7674 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
7675 bpp);
7676 pipe_config->pipe_bpp = 24;
7677 }
7678}
7679
7534static int 7680static int
7535pipe_config_set_bpp(struct drm_crtc *crtc, 7681compute_baseline_pipe_bpp(struct intel_crtc *crtc,
7536 struct drm_framebuffer *fb, 7682 struct drm_framebuffer *fb,
7537 struct intel_crtc_config *pipe_config) 7683 struct intel_crtc_config *pipe_config)
7538{ 7684{
7539 struct drm_device *dev = crtc->dev; 7685 struct drm_device *dev = crtc->base.dev;
7540 struct drm_connector *connector; 7686 struct intel_connector *connector;
7541 int bpp; 7687 int bpp;
7542 7688
7543 switch (fb->pixel_format) { 7689 switch (fb->pixel_format) {
@@ -7580,22 +7726,66 @@ pipe_config_set_bpp(struct drm_crtc *crtc,
7580 7726
7581 /* Clamp display bpp to EDID value */ 7727 /* Clamp display bpp to EDID value */
7582 list_for_each_entry(connector, &dev->mode_config.connector_list, 7728 list_for_each_entry(connector, &dev->mode_config.connector_list,
7583 head) { 7729 base.head) {
7584 if (connector->encoder && connector->encoder->crtc != crtc) 7730 if (!connector->new_encoder ||
7731 connector->new_encoder->new_crtc != crtc)
7585 continue; 7732 continue;
7586 7733
7587 /* Don't use an invalid EDID bpc value */ 7734 connected_sink_compute_bpp(connector, pipe_config);
7588 if (connector->display_info.bpc &&
7589 connector->display_info.bpc * 3 < bpp) {
7590 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
7591 bpp, connector->display_info.bpc*3);
7592 pipe_config->pipe_bpp = connector->display_info.bpc*3;
7593 }
7594 } 7735 }
7595 7736
7596 return bpp; 7737 return bpp;
7597} 7738}
7598 7739
7740static void intel_dump_pipe_config(struct intel_crtc *crtc,
7741 struct intel_crtc_config *pipe_config,
7742 const char *context)
7743{
7744 DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
7745 context, pipe_name(crtc->pipe));
7746
7747 DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
7748 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
7749 pipe_config->pipe_bpp, pipe_config->dither);
7750 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7751 pipe_config->has_pch_encoder,
7752 pipe_config->fdi_lanes,
7753 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
7754 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
7755 pipe_config->fdi_m_n.tu);
7756 DRM_DEBUG_KMS("requested mode:\n");
7757 drm_mode_debug_printmodeline(&pipe_config->requested_mode);
7758 DRM_DEBUG_KMS("adjusted mode:\n");
7759 drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
7760 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
7761 pipe_config->gmch_pfit.control,
7762 pipe_config->gmch_pfit.pgm_ratios,
7763 pipe_config->gmch_pfit.lvds_border_bits);
7764 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x\n",
7765 pipe_config->pch_pfit.pos,
7766 pipe_config->pch_pfit.size);
7767 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
7768}
7769
7770static bool check_encoder_cloning(struct drm_crtc *crtc)
7771{
7772 int num_encoders = 0;
7773 bool uncloneable_encoders = false;
7774 struct intel_encoder *encoder;
7775
7776 list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list,
7777 base.head) {
7778 if (&encoder->new_crtc->base != crtc)
7779 continue;
7780
7781 num_encoders++;
7782 if (!encoder->cloneable)
7783 uncloneable_encoders = true;
7784 }
7785
7786 return !(num_encoders > 1 && uncloneable_encoders);
7787}
7788
7599static struct intel_crtc_config * 7789static struct intel_crtc_config *
7600intel_modeset_pipe_config(struct drm_crtc *crtc, 7790intel_modeset_pipe_config(struct drm_crtc *crtc,
7601 struct drm_framebuffer *fb, 7791 struct drm_framebuffer *fb,
@@ -7605,7 +7795,13 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
7605 struct drm_encoder_helper_funcs *encoder_funcs; 7795 struct drm_encoder_helper_funcs *encoder_funcs;
7606 struct intel_encoder *encoder; 7796 struct intel_encoder *encoder;
7607 struct intel_crtc_config *pipe_config; 7797 struct intel_crtc_config *pipe_config;
7608 int plane_bpp; 7798 int plane_bpp, ret = -EINVAL;
7799 bool retry = true;
7800
7801 if (!check_encoder_cloning(crtc)) {
7802 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
7803 return ERR_PTR(-EINVAL);
7804 }
7609 7805
7610 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 7806 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7611 if (!pipe_config) 7807 if (!pipe_config)
@@ -7613,11 +7809,23 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
7613 7809
7614 drm_mode_copy(&pipe_config->adjusted_mode, mode); 7810 drm_mode_copy(&pipe_config->adjusted_mode, mode);
7615 drm_mode_copy(&pipe_config->requested_mode, mode); 7811 drm_mode_copy(&pipe_config->requested_mode, mode);
7616 7812 pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe;
7617 plane_bpp = pipe_config_set_bpp(crtc, fb, pipe_config); 7813 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7814
7815 /* Compute a starting value for pipe_config->pipe_bpp taking the source
7816 * plane pixel format and any sink constraints into account. Returns the
7817 * source plane bpp so that dithering can be selected on mismatches
7818 * after encoders and crtc also have had their say. */
7819 plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
7820 fb, pipe_config);
7618 if (plane_bpp < 0) 7821 if (plane_bpp < 0)
7619 goto fail; 7822 goto fail;
7620 7823
7824encoder_retry:
7825 /* Ensure the port clock defaults are reset when retrying. */
7826 pipe_config->port_clock = 0;
7827 pipe_config->pixel_multiplier = 1;
7828
7621 /* Pass our mode to the connectors and the CRTC to give them a chance to 7829 /* Pass our mode to the connectors and the CRTC to give them a chance to
7622 * adjust it according to limitations or connector properties, and also 7830 * adjust it according to limitations or connector properties, and also
7623 * a chance to reject the mode entirely. 7831 * a chance to reject the mode entirely.
@@ -7646,11 +7854,27 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
7646 } 7854 }
7647 } 7855 }
7648 7856
7649 if (!(intel_crtc_compute_config(crtc, pipe_config))) { 7857 /* Set default port clock if not overwritten by the encoder. Needs to be
7858 * done afterwards in case the encoder adjusts the mode. */
7859 if (!pipe_config->port_clock)
7860 pipe_config->port_clock = pipe_config->adjusted_mode.clock;
7861
7862 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
7863 if (ret < 0) {
7650 DRM_DEBUG_KMS("CRTC fixup failed\n"); 7864 DRM_DEBUG_KMS("CRTC fixup failed\n");
7651 goto fail; 7865 goto fail;
7652 } 7866 }
7653 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 7867
7868 if (ret == RETRY) {
7869 if (WARN(!retry, "loop in pipe configuration computation\n")) {
7870 ret = -EINVAL;
7871 goto fail;
7872 }
7873
7874 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
7875 retry = false;
7876 goto encoder_retry;
7877 }
7654 7878
7655 pipe_config->dither = pipe_config->pipe_bpp != plane_bpp; 7879 pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
7656 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", 7880 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
@@ -7659,7 +7883,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
7659 return pipe_config; 7883 return pipe_config;
7660fail: 7884fail:
7661 kfree(pipe_config); 7885 kfree(pipe_config);
7662 return ERR_PTR(-EINVAL); 7886 return ERR_PTR(ret);
7663} 7887}
7664 7888
7665/* Computes which crtcs are affected and sets the relevant bits in the mask. For 7889/* Computes which crtcs are affected and sets the relevant bits in the mask. For
@@ -7755,6 +7979,9 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
7755 */ 7979 */
7756 *modeset_pipes &= 1 << intel_crtc->pipe; 7980 *modeset_pipes &= 1 << intel_crtc->pipe;
7757 *prepare_pipes &= 1 << intel_crtc->pipe; 7981 *prepare_pipes &= 1 << intel_crtc->pipe;
7982
7983 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
7984 *modeset_pipes, *prepare_pipes, *disable_pipes);
7758} 7985}
7759 7986
7760static bool intel_crtc_in_use(struct drm_crtc *crtc) 7987static bool intel_crtc_in_use(struct drm_crtc *crtc)
@@ -7821,31 +8048,114 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
7821 list_for_each_entry((intel_crtc), \ 8048 list_for_each_entry((intel_crtc), \
7822 &(dev)->mode_config.crtc_list, \ 8049 &(dev)->mode_config.crtc_list, \
7823 base.head) \ 8050 base.head) \
7824 if (mask & (1 <<(intel_crtc)->pipe)) \ 8051 if (mask & (1 <<(intel_crtc)->pipe))
7825 8052
7826static bool 8053static bool
7827intel_pipe_config_compare(struct intel_crtc_config *current_config, 8054intel_pipe_config_compare(struct drm_device *dev,
8055 struct intel_crtc_config *current_config,
7828 struct intel_crtc_config *pipe_config) 8056 struct intel_crtc_config *pipe_config)
7829{ 8057{
7830 if (current_config->has_pch_encoder != pipe_config->has_pch_encoder) { 8058#define PIPE_CONF_CHECK_X(name) \
7831 DRM_ERROR("mismatch in has_pch_encoder " 8059 if (current_config->name != pipe_config->name) { \
7832 "(expected %i, found %i)\n", 8060 DRM_ERROR("mismatch in " #name " " \
7833 current_config->has_pch_encoder, 8061 "(expected 0x%08x, found 0x%08x)\n", \
7834 pipe_config->has_pch_encoder); 8062 current_config->name, \
7835 return false; 8063 pipe_config->name); \
7836 } 8064 return false; \
8065 }
8066
8067#define PIPE_CONF_CHECK_I(name) \
8068 if (current_config->name != pipe_config->name) { \
8069 DRM_ERROR("mismatch in " #name " " \
8070 "(expected %i, found %i)\n", \
8071 current_config->name, \
8072 pipe_config->name); \
8073 return false; \
8074 }
8075
8076#define PIPE_CONF_CHECK_FLAGS(name, mask) \
8077 if ((current_config->name ^ pipe_config->name) & (mask)) { \
8078 DRM_ERROR("mismatch in " #name " " \
8079 "(expected %i, found %i)\n", \
8080 current_config->name & (mask), \
8081 pipe_config->name & (mask)); \
8082 return false; \
8083 }
8084
8085#define PIPE_CONF_QUIRK(quirk) \
8086 ((current_config->quirks | pipe_config->quirks) & (quirk))
8087
8088 PIPE_CONF_CHECK_I(cpu_transcoder);
8089
8090 PIPE_CONF_CHECK_I(has_pch_encoder);
8091 PIPE_CONF_CHECK_I(fdi_lanes);
8092 PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
8093 PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
8094 PIPE_CONF_CHECK_I(fdi_m_n.link_m);
8095 PIPE_CONF_CHECK_I(fdi_m_n.link_n);
8096 PIPE_CONF_CHECK_I(fdi_m_n.tu);
8097
8098 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
8099 PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
8100 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
8101 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
8102 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
8103 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
8104
8105 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
8106 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
8107 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
8108 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
8109 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
8110 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
8111
8112 if (!HAS_PCH_SPLIT(dev))
8113 PIPE_CONF_CHECK_I(pixel_multiplier);
8114
8115 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8116 DRM_MODE_FLAG_INTERLACE);
8117
8118 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8119 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8120 DRM_MODE_FLAG_PHSYNC);
8121 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8122 DRM_MODE_FLAG_NHSYNC);
8123 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8124 DRM_MODE_FLAG_PVSYNC);
8125 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8126 DRM_MODE_FLAG_NVSYNC);
8127 }
8128
8129 PIPE_CONF_CHECK_I(requested_mode.hdisplay);
8130 PIPE_CONF_CHECK_I(requested_mode.vdisplay);
8131
8132 PIPE_CONF_CHECK_I(gmch_pfit.control);
8133 /* pfit ratios are autocomputed by the hw on gen4+ */
8134 if (INTEL_INFO(dev)->gen < 4)
8135 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
8136 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
8137 PIPE_CONF_CHECK_I(pch_pfit.pos);
8138 PIPE_CONF_CHECK_I(pch_pfit.size);
8139
8140 PIPE_CONF_CHECK_I(ips_enabled);
8141
8142 PIPE_CONF_CHECK_I(shared_dpll);
8143 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8144 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8145 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8146
8147#undef PIPE_CONF_CHECK_X
8148#undef PIPE_CONF_CHECK_I
8149#undef PIPE_CONF_CHECK_FLAGS
8150#undef PIPE_CONF_QUIRK
7837 8151
7838 return true; 8152 return true;
7839} 8153}
7840 8154
7841void 8155static void
7842intel_modeset_check_state(struct drm_device *dev) 8156check_connector_state(struct drm_device *dev)
7843{ 8157{
7844 drm_i915_private_t *dev_priv = dev->dev_private;
7845 struct intel_crtc *crtc;
7846 struct intel_encoder *encoder;
7847 struct intel_connector *connector; 8158 struct intel_connector *connector;
7848 struct intel_crtc_config pipe_config;
7849 8159
7850 list_for_each_entry(connector, &dev->mode_config.connector_list, 8160 list_for_each_entry(connector, &dev->mode_config.connector_list,
7851 base.head) { 8161 base.head) {
@@ -7856,6 +8166,13 @@ intel_modeset_check_state(struct drm_device *dev)
7856 WARN(&connector->new_encoder->base != connector->base.encoder, 8166 WARN(&connector->new_encoder->base != connector->base.encoder,
7857 "connector's staged encoder doesn't match current encoder\n"); 8167 "connector's staged encoder doesn't match current encoder\n");
7858 } 8168 }
8169}
8170
8171static void
8172check_encoder_state(struct drm_device *dev)
8173{
8174 struct intel_encoder *encoder;
8175 struct intel_connector *connector;
7859 8176
7860 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 8177 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7861 base.head) { 8178 base.head) {
@@ -7907,12 +8224,23 @@ intel_modeset_check_state(struct drm_device *dev)
7907 tracked_pipe, pipe); 8224 tracked_pipe, pipe);
7908 8225
7909 } 8226 }
8227}
8228
8229static void
8230check_crtc_state(struct drm_device *dev)
8231{
8232 drm_i915_private_t *dev_priv = dev->dev_private;
8233 struct intel_crtc *crtc;
8234 struct intel_encoder *encoder;
8235 struct intel_crtc_config pipe_config;
7910 8236
7911 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 8237 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
7912 base.head) { 8238 base.head) {
7913 bool enabled = false; 8239 bool enabled = false;
7914 bool active = false; 8240 bool active = false;
7915 8241
8242 memset(&pipe_config, 0, sizeof(pipe_config));
8243
7916 DRM_DEBUG_KMS("[CRTC:%d]\n", 8244 DRM_DEBUG_KMS("[CRTC:%d]\n",
7917 crtc->base.base.id); 8245 crtc->base.base.id);
7918 8246
@@ -7927,6 +8255,7 @@ intel_modeset_check_state(struct drm_device *dev)
7927 if (encoder->connectors_active) 8255 if (encoder->connectors_active)
7928 active = true; 8256 active = true;
7929 } 8257 }
8258
7930 WARN(active != crtc->active, 8259 WARN(active != crtc->active,
7931 "crtc's computed active state doesn't match tracked active state " 8260 "crtc's computed active state doesn't match tracked active state "
7932 "(expected %i, found %i)\n", active, crtc->active); 8261 "(expected %i, found %i)\n", active, crtc->active);
@@ -7934,7 +8263,6 @@ intel_modeset_check_state(struct drm_device *dev)
7934 "crtc's computed enabled state doesn't match tracked enabled state " 8263 "crtc's computed enabled state doesn't match tracked enabled state "
7935 "(expected %i, found %i)\n", enabled, crtc->base.enabled); 8264 "(expected %i, found %i)\n", enabled, crtc->base.enabled);
7936 8265
7937 memset(&pipe_config, 0, sizeof(pipe_config));
7938 active = dev_priv->display.get_pipe_config(crtc, 8266 active = dev_priv->display.get_pipe_config(crtc,
7939 &pipe_config); 8267 &pipe_config);
7940 8268
@@ -7942,16 +8270,86 @@ intel_modeset_check_state(struct drm_device *dev)
7942 if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 8270 if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
7943 active = crtc->active; 8271 active = crtc->active;
7944 8272
8273 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8274 base.head) {
8275 if (encoder->base.crtc != &crtc->base)
8276 continue;
8277 if (encoder->get_config)
8278 encoder->get_config(encoder, &pipe_config);
8279 }
8280
7945 WARN(crtc->active != active, 8281 WARN(crtc->active != active,
7946 "crtc active state doesn't match with hw state " 8282 "crtc active state doesn't match with hw state "
7947 "(expected %i, found %i)\n", crtc->active, active); 8283 "(expected %i, found %i)\n", crtc->active, active);
7948 8284
7949 WARN(active && 8285 if (active &&
7950 !intel_pipe_config_compare(&crtc->config, &pipe_config), 8286 !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
7951 "pipe state doesn't match!\n"); 8287 WARN(1, "pipe state doesn't match!\n");
8288 intel_dump_pipe_config(crtc, &pipe_config,
8289 "[hw state]");
8290 intel_dump_pipe_config(crtc, &crtc->config,
8291 "[sw state]");
8292 }
7952 } 8293 }
7953} 8294}
7954 8295
8296static void
8297check_shared_dpll_state(struct drm_device *dev)
8298{
8299 drm_i915_private_t *dev_priv = dev->dev_private;
8300 struct intel_crtc *crtc;
8301 struct intel_dpll_hw_state dpll_hw_state;
8302 int i;
8303
8304 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8305 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
8306 int enabled_crtcs = 0, active_crtcs = 0;
8307 bool active;
8308
8309 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
8310
8311 DRM_DEBUG_KMS("%s\n", pll->name);
8312
8313 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
8314
8315 WARN(pll->active > pll->refcount,
8316 "more active pll users than references: %i vs %i\n",
8317 pll->active, pll->refcount);
8318 WARN(pll->active && !pll->on,
8319 "pll in active use but not on in sw tracking\n");
8320 WARN(pll->on != active,
8321 "pll on state mismatch (expected %i, found %i)\n",
8322 pll->on, active);
8323
8324 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
8325 base.head) {
8326 if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
8327 enabled_crtcs++;
8328 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
8329 active_crtcs++;
8330 }
8331 WARN(pll->active != active_crtcs,
8332 "pll active crtcs mismatch (expected %i, found %i)\n",
8333 pll->active, active_crtcs);
8334 WARN(pll->refcount != enabled_crtcs,
8335 "pll enabled crtcs mismatch (expected %i, found %i)\n",
8336 pll->refcount, enabled_crtcs);
8337
8338 WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
8339 sizeof(dpll_hw_state)),
8340 "pll hw state mismatch\n");
8341 }
8342}
8343
8344void
8345intel_modeset_check_state(struct drm_device *dev)
8346{
8347 check_connector_state(dev);
8348 check_encoder_state(dev);
8349 check_crtc_state(dev);
8350 check_shared_dpll_state(dev);
8351}
8352
7955static int __intel_set_mode(struct drm_crtc *crtc, 8353static int __intel_set_mode(struct drm_crtc *crtc,
7956 struct drm_display_mode *mode, 8354 struct drm_display_mode *mode,
7957 int x, int y, struct drm_framebuffer *fb) 8355 int x, int y, struct drm_framebuffer *fb)
@@ -7988,11 +8386,10 @@ static int __intel_set_mode(struct drm_crtc *crtc,
7988 8386
7989 goto out; 8387 goto out;
7990 } 8388 }
8389 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
8390 "[modeset]");
7991 } 8391 }
7992 8392
7993 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
7994 modeset_pipes, prepare_pipes, disable_pipes);
7995
7996 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) 8393 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
7997 intel_crtc_disable(&intel_crtc->base); 8394 intel_crtc_disable(&intel_crtc->base);
7998 8395
@@ -8005,12 +8402,10 @@ static int __intel_set_mode(struct drm_crtc *crtc,
8005 * to set it here already despite that we pass it down the callchain. 8402 * to set it here already despite that we pass it down the callchain.
8006 */ 8403 */
8007 if (modeset_pipes) { 8404 if (modeset_pipes) {
8008 enum transcoder tmp = to_intel_crtc(crtc)->config.cpu_transcoder;
8009 crtc->mode = *mode; 8405 crtc->mode = *mode;
8010 /* mode_set/enable/disable functions rely on a correct pipe 8406 /* mode_set/enable/disable functions rely on a correct pipe
8011 * config. */ 8407 * config. */
8012 to_intel_crtc(crtc)->config = *pipe_config; 8408 to_intel_crtc(crtc)->config = *pipe_config;
8013 to_intel_crtc(crtc)->config.cpu_transcoder = tmp;
8014 } 8409 }
8015 8410
8016 /* Only after disabling all output pipelines that will be changed can we 8411 /* Only after disabling all output pipelines that will be changed can we
@@ -8349,12 +8744,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
8349 goto fail; 8744 goto fail;
8350 8745
8351 if (config->mode_changed) { 8746 if (config->mode_changed) {
8352 if (set->mode) {
8353 DRM_DEBUG_KMS("attempting to set mode from"
8354 " userspace\n");
8355 drm_mode_debug_printmodeline(set->mode);
8356 }
8357
8358 ret = intel_set_mode(set->crtc, set->mode, 8747 ret = intel_set_mode(set->crtc, set->mode,
8359 set->x, set->y, set->fb); 8748 set->x, set->y, set->fb);
8360 } else if (config->fb_changed) { 8749 } else if (config->fb_changed) {
@@ -8365,8 +8754,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
8365 } 8754 }
8366 8755
8367 if (ret) { 8756 if (ret) {
8368 DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n", 8757 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
8369 set->crtc->base.id, ret); 8758 set->crtc->base.id, ret);
8370fail: 8759fail:
8371 intel_set_config_restore_state(dev, config); 8760 intel_set_config_restore_state(dev, config);
8372 8761
@@ -8397,23 +8786,93 @@ static void intel_cpu_pll_init(struct drm_device *dev)
8397 intel_ddi_pll_init(dev); 8786 intel_ddi_pll_init(dev);
8398} 8787}
8399 8788
8400static void intel_pch_pll_init(struct drm_device *dev) 8789static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
8790 struct intel_shared_dpll *pll,
8791 struct intel_dpll_hw_state *hw_state)
8401{ 8792{
8402 drm_i915_private_t *dev_priv = dev->dev_private; 8793 uint32_t val;
8403 int i;
8404 8794
8405 if (dev_priv->num_pch_pll == 0) { 8795 val = I915_READ(PCH_DPLL(pll->id));
8406 DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n"); 8796 hw_state->dpll = val;
8407 return; 8797 hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
8798 hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
8799
8800 return val & DPLL_VCO_ENABLE;
8801}
8802
8803static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
8804 struct intel_shared_dpll *pll)
8805{
8806 uint32_t reg, val;
8807
8808 /* PCH refclock must be enabled first */
8809 assert_pch_refclk_enabled(dev_priv);
8810
8811 reg = PCH_DPLL(pll->id);
8812 val = I915_READ(reg);
8813 val |= DPLL_VCO_ENABLE;
8814 I915_WRITE(reg, val);
8815 POSTING_READ(reg);
8816 udelay(200);
8817}
8818
8819static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
8820 struct intel_shared_dpll *pll)
8821{
8822 struct drm_device *dev = dev_priv->dev;
8823 struct intel_crtc *crtc;
8824 uint32_t reg, val;
8825
8826 /* Make sure no transcoder isn't still depending on us. */
8827 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
8828 if (intel_crtc_to_shared_dpll(crtc) == pll)
8829 assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
8408 } 8830 }
8409 8831
8410 for (i = 0; i < dev_priv->num_pch_pll; i++) { 8832 reg = PCH_DPLL(pll->id);
8411 dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i); 8833 val = I915_READ(reg);
8412 dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i); 8834 val &= ~DPLL_VCO_ENABLE;
8413 dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i); 8835 I915_WRITE(reg, val);
8836 POSTING_READ(reg);
8837 udelay(200);
8838}
8839
8840static char *ibx_pch_dpll_names[] = {
8841 "PCH DPLL A",
8842 "PCH DPLL B",
8843};
8844
8845static void ibx_pch_dpll_init(struct drm_device *dev)
8846{
8847 struct drm_i915_private *dev_priv = dev->dev_private;
8848 int i;
8849
8850 dev_priv->num_shared_dpll = 2;
8851
8852 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8853 dev_priv->shared_dplls[i].id = i;
8854 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
8855 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
8856 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
8857 dev_priv->shared_dplls[i].get_hw_state =
8858 ibx_pch_dpll_get_hw_state;
8414 } 8859 }
8415} 8860}
8416 8861
8862static void intel_shared_dpll_init(struct drm_device *dev)
8863{
8864 struct drm_i915_private *dev_priv = dev->dev_private;
8865
8866 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8867 ibx_pch_dpll_init(dev);
8868 else
8869 dev_priv->num_shared_dpll = 0;
8870
8871 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
8872 DRM_DEBUG_KMS("%i shared PLLs initialized\n",
8873 dev_priv->num_shared_dpll);
8874}
8875
8417static void intel_crtc_init(struct drm_device *dev, int pipe) 8876static void intel_crtc_init(struct drm_device *dev, int pipe)
8418{ 8877{
8419 drm_i915_private_t *dev_priv = dev->dev_private; 8878 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -8436,7 +8895,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
8436 /* Swap pipes & planes for FBC on pre-965 */ 8895 /* Swap pipes & planes for FBC on pre-965 */
8437 intel_crtc->pipe = pipe; 8896 intel_crtc->pipe = pipe;
8438 intel_crtc->plane = pipe; 8897 intel_crtc->plane = pipe;
8439 intel_crtc->config.cpu_transcoder = pipe;
8440 if (IS_MOBILE(dev) && IS_GEN3(dev)) { 8898 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
8441 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 8899 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
8442 intel_crtc->plane = !pipe; 8900 intel_crtc->plane = !pipe;
@@ -8519,13 +8977,8 @@ static void intel_setup_outputs(struct drm_device *dev)
8519 struct drm_i915_private *dev_priv = dev->dev_private; 8977 struct drm_i915_private *dev_priv = dev->dev_private;
8520 struct intel_encoder *encoder; 8978 struct intel_encoder *encoder;
8521 bool dpd_is_edp = false; 8979 bool dpd_is_edp = false;
8522 bool has_lvds;
8523 8980
8524 has_lvds = intel_lvds_init(dev); 8981 intel_lvds_init(dev);
8525 if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
8526 /* disable the panel fitter on everything but LVDS */
8527 I915_WRITE(PFIT_CONTROL, 0);
8528 }
8529 8982
8530 if (!IS_ULT(dev)) 8983 if (!IS_ULT(dev))
8531 intel_crt_init(dev); 8984 intel_crt_init(dev);
@@ -8598,10 +9051,8 @@ static void intel_setup_outputs(struct drm_device *dev)
8598 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 9051 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
8599 } 9052 }
8600 9053
8601 if (!found && SUPPORTS_INTEGRATED_DP(dev)) { 9054 if (!found && SUPPORTS_INTEGRATED_DP(dev))
8602 DRM_DEBUG_KMS("probing DP_B\n");
8603 intel_dp_init(dev, DP_B, PORT_B); 9055 intel_dp_init(dev, DP_B, PORT_B);
8604 }
8605 } 9056 }
8606 9057
8607 /* Before G4X SDVOC doesn't have its own detect register */ 9058 /* Before G4X SDVOC doesn't have its own detect register */
@@ -8617,17 +9068,13 @@ static void intel_setup_outputs(struct drm_device *dev)
8617 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 9068 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
8618 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); 9069 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
8619 } 9070 }
8620 if (SUPPORTS_INTEGRATED_DP(dev)) { 9071 if (SUPPORTS_INTEGRATED_DP(dev))
8621 DRM_DEBUG_KMS("probing DP_C\n");
8622 intel_dp_init(dev, DP_C, PORT_C); 9072 intel_dp_init(dev, DP_C, PORT_C);
8623 }
8624 } 9073 }
8625 9074
8626 if (SUPPORTS_INTEGRATED_DP(dev) && 9075 if (SUPPORTS_INTEGRATED_DP(dev) &&
8627 (I915_READ(DP_D) & DP_DETECTED)) { 9076 (I915_READ(DP_D) & DP_DETECTED))
8628 DRM_DEBUG_KMS("probing DP_D\n");
8629 intel_dp_init(dev, DP_D, PORT_D); 9077 intel_dp_init(dev, DP_D, PORT_D);
8630 }
8631 } else if (IS_GEN2(dev)) 9078 } else if (IS_GEN2(dev))
8632 intel_dvo_init(dev); 9079 intel_dvo_init(dev);
8633 9080
@@ -8675,6 +9122,7 @@ int intel_framebuffer_init(struct drm_device *dev,
8675 struct drm_mode_fb_cmd2 *mode_cmd, 9122 struct drm_mode_fb_cmd2 *mode_cmd,
8676 struct drm_i915_gem_object *obj) 9123 struct drm_i915_gem_object *obj)
8677{ 9124{
9125 int pitch_limit;
8678 int ret; 9126 int ret;
8679 9127
8680 if (obj->tiling_mode == I915_TILING_Y) { 9128 if (obj->tiling_mode == I915_TILING_Y) {
@@ -8688,10 +9136,26 @@ int intel_framebuffer_init(struct drm_device *dev,
8688 return -EINVAL; 9136 return -EINVAL;
8689 } 9137 }
8690 9138
8691 /* FIXME <= Gen4 stride limits are bit unclear */ 9139 if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
8692 if (mode_cmd->pitches[0] > 32768) { 9140 pitch_limit = 32*1024;
8693 DRM_DEBUG("pitch (%d) must be at less than 32768\n", 9141 } else if (INTEL_INFO(dev)->gen >= 4) {
8694 mode_cmd->pitches[0]); 9142 if (obj->tiling_mode)
9143 pitch_limit = 16*1024;
9144 else
9145 pitch_limit = 32*1024;
9146 } else if (INTEL_INFO(dev)->gen >= 3) {
9147 if (obj->tiling_mode)
9148 pitch_limit = 8*1024;
9149 else
9150 pitch_limit = 16*1024;
9151 } else
9152 /* XXX DSPC is limited to 4k tiled */
9153 pitch_limit = 8*1024;
9154
9155 if (mode_cmd->pitches[0] > pitch_limit) {
9156 DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
9157 obj->tiling_mode ? "tiled" : "linear",
9158 mode_cmd->pitches[0], pitch_limit);
8695 return -EINVAL; 9159 return -EINVAL;
8696 } 9160 }
8697 9161
@@ -8712,7 +9176,8 @@ int intel_framebuffer_init(struct drm_device *dev,
8712 case DRM_FORMAT_XRGB1555: 9176 case DRM_FORMAT_XRGB1555:
8713 case DRM_FORMAT_ARGB1555: 9177 case DRM_FORMAT_ARGB1555:
8714 if (INTEL_INFO(dev)->gen > 3) { 9178 if (INTEL_INFO(dev)->gen > 3) {
8715 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); 9179 DRM_DEBUG("unsupported pixel format: %s\n",
9180 drm_get_format_name(mode_cmd->pixel_format));
8716 return -EINVAL; 9181 return -EINVAL;
8717 } 9182 }
8718 break; 9183 break;
@@ -8723,7 +9188,8 @@ int intel_framebuffer_init(struct drm_device *dev,
8723 case DRM_FORMAT_XBGR2101010: 9188 case DRM_FORMAT_XBGR2101010:
8724 case DRM_FORMAT_ABGR2101010: 9189 case DRM_FORMAT_ABGR2101010:
8725 if (INTEL_INFO(dev)->gen < 4) { 9190 if (INTEL_INFO(dev)->gen < 4) {
8726 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); 9191 DRM_DEBUG("unsupported pixel format: %s\n",
9192 drm_get_format_name(mode_cmd->pixel_format));
8727 return -EINVAL; 9193 return -EINVAL;
8728 } 9194 }
8729 break; 9195 break;
@@ -8732,12 +9198,14 @@ int intel_framebuffer_init(struct drm_device *dev,
8732 case DRM_FORMAT_YVYU: 9198 case DRM_FORMAT_YVYU:
8733 case DRM_FORMAT_VYUY: 9199 case DRM_FORMAT_VYUY:
8734 if (INTEL_INFO(dev)->gen < 5) { 9200 if (INTEL_INFO(dev)->gen < 5) {
8735 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); 9201 DRM_DEBUG("unsupported pixel format: %s\n",
9202 drm_get_format_name(mode_cmd->pixel_format));
8736 return -EINVAL; 9203 return -EINVAL;
8737 } 9204 }
8738 break; 9205 break;
8739 default: 9206 default:
8740 DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); 9207 DRM_DEBUG("unsupported pixel format: %s\n",
9208 drm_get_format_name(mode_cmd->pixel_format));
8741 return -EINVAL; 9209 return -EINVAL;
8742 } 9210 }
8743 9211
@@ -8782,6 +9250,15 @@ static void intel_init_display(struct drm_device *dev)
8782{ 9250{
8783 struct drm_i915_private *dev_priv = dev->dev_private; 9251 struct drm_i915_private *dev_priv = dev->dev_private;
8784 9252
9253 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
9254 dev_priv->display.find_dpll = g4x_find_best_dpll;
9255 else if (IS_VALLEYVIEW(dev))
9256 dev_priv->display.find_dpll = vlv_find_best_dpll;
9257 else if (IS_PINEVIEW(dev))
9258 dev_priv->display.find_dpll = pnv_find_best_dpll;
9259 else
9260 dev_priv->display.find_dpll = i9xx_find_best_dpll;
9261
8785 if (HAS_DDI(dev)) { 9262 if (HAS_DDI(dev)) {
8786 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 9263 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
8787 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; 9264 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
@@ -8796,6 +9273,13 @@ static void intel_init_display(struct drm_device *dev)
8796 dev_priv->display.crtc_disable = ironlake_crtc_disable; 9273 dev_priv->display.crtc_disable = ironlake_crtc_disable;
8797 dev_priv->display.off = ironlake_crtc_off; 9274 dev_priv->display.off = ironlake_crtc_off;
8798 dev_priv->display.update_plane = ironlake_update_plane; 9275 dev_priv->display.update_plane = ironlake_update_plane;
9276 } else if (IS_VALLEYVIEW(dev)) {
9277 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9278 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9279 dev_priv->display.crtc_enable = valleyview_crtc_enable;
9280 dev_priv->display.crtc_disable = i9xx_crtc_disable;
9281 dev_priv->display.off = i9xx_crtc_off;
9282 dev_priv->display.update_plane = i9xx_update_plane;
8799 } else { 9283 } else {
8800 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 9284 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
8801 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 9285 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
@@ -9037,6 +9521,11 @@ void intel_modeset_init_hw(struct drm_device *dev)
9037 mutex_unlock(&dev->struct_mutex); 9521 mutex_unlock(&dev->struct_mutex);
9038} 9522}
9039 9523
9524void intel_modeset_suspend_hw(struct drm_device *dev)
9525{
9526 intel_suspend_hw(dev);
9527}
9528
9040void intel_modeset_init(struct drm_device *dev) 9529void intel_modeset_init(struct drm_device *dev)
9041{ 9530{
9042 struct drm_i915_private *dev_priv = dev->dev_private; 9531 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -9082,13 +9571,13 @@ void intel_modeset_init(struct drm_device *dev)
9082 for (j = 0; j < dev_priv->num_plane; j++) { 9571 for (j = 0; j < dev_priv->num_plane; j++) {
9083 ret = intel_plane_init(dev, i, j); 9572 ret = intel_plane_init(dev, i, j);
9084 if (ret) 9573 if (ret)
9085 DRM_DEBUG_KMS("pipe %d plane %d init failed: %d\n", 9574 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
9086 i, j, ret); 9575 pipe_name(i), sprite_name(i, j), ret);
9087 } 9576 }
9088 } 9577 }
9089 9578
9090 intel_cpu_pll_init(dev); 9579 intel_cpu_pll_init(dev);
9091 intel_pch_pll_init(dev); 9580 intel_shared_dpll_init(dev);
9092 9581
9093 /* Just disable it once at startup */ 9582 /* Just disable it once at startup */
9094 i915_disable_vga(dev); 9583 i915_disable_vga(dev);
@@ -9289,57 +9778,18 @@ void i915_redisable_vga(struct drm_device *dev)
9289 } 9778 }
9290} 9779}
9291 9780
9292/* Scan out the current hw modeset state, sanitizes it and maps it into the drm 9781static void intel_modeset_readout_hw_state(struct drm_device *dev)
9293 * and i915 state tracking structures. */
9294void intel_modeset_setup_hw_state(struct drm_device *dev,
9295 bool force_restore)
9296{ 9782{
9297 struct drm_i915_private *dev_priv = dev->dev_private; 9783 struct drm_i915_private *dev_priv = dev->dev_private;
9298 enum pipe pipe; 9784 enum pipe pipe;
9299 u32 tmp;
9300 struct drm_plane *plane;
9301 struct intel_crtc *crtc; 9785 struct intel_crtc *crtc;
9302 struct intel_encoder *encoder; 9786 struct intel_encoder *encoder;
9303 struct intel_connector *connector; 9787 struct intel_connector *connector;
9788 int i;
9304 9789
9305 if (HAS_DDI(dev)) {
9306 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9307
9308 if (tmp & TRANS_DDI_FUNC_ENABLE) {
9309 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9310 case TRANS_DDI_EDP_INPUT_A_ON:
9311 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9312 pipe = PIPE_A;
9313 break;
9314 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9315 pipe = PIPE_B;
9316 break;
9317 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9318 pipe = PIPE_C;
9319 break;
9320 default:
9321 /* A bogus value has been programmed, disable
9322 * the transcoder */
9323 WARN(1, "Bogus eDP source %08x\n", tmp);
9324 intel_ddi_disable_transcoder_func(dev_priv,
9325 TRANSCODER_EDP);
9326 goto setup_pipes;
9327 }
9328
9329 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9330 crtc->config.cpu_transcoder = TRANSCODER_EDP;
9331
9332 DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
9333 pipe_name(pipe));
9334 }
9335 }
9336
9337setup_pipes:
9338 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 9790 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9339 base.head) { 9791 base.head) {
9340 enum transcoder tmp = crtc->config.cpu_transcoder;
9341 memset(&crtc->config, 0, sizeof(crtc->config)); 9792 memset(&crtc->config, 0, sizeof(crtc->config));
9342 crtc->config.cpu_transcoder = tmp;
9343 9793
9344 crtc->active = dev_priv->display.get_pipe_config(crtc, 9794 crtc->active = dev_priv->display.get_pipe_config(crtc,
9345 &crtc->config); 9795 &crtc->config);
@@ -9351,16 +9801,35 @@ setup_pipes:
9351 crtc->active ? "enabled" : "disabled"); 9801 crtc->active ? "enabled" : "disabled");
9352 } 9802 }
9353 9803
9804 /* FIXME: Smash this into the new shared dpll infrastructure. */
9354 if (HAS_DDI(dev)) 9805 if (HAS_DDI(dev))
9355 intel_ddi_setup_hw_pll_state(dev); 9806 intel_ddi_setup_hw_pll_state(dev);
9356 9807
9808 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9809 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
9810
9811 pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
9812 pll->active = 0;
9813 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9814 base.head) {
9815 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
9816 pll->active++;
9817 }
9818 pll->refcount = pll->active;
9819
9820 DRM_DEBUG_KMS("%s hw state readout: refcount %i\n",
9821 pll->name, pll->refcount);
9822 }
9823
9357 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9824 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9358 base.head) { 9825 base.head) {
9359 pipe = 0; 9826 pipe = 0;
9360 9827
9361 if (encoder->get_hw_state(encoder, &pipe)) { 9828 if (encoder->get_hw_state(encoder, &pipe)) {
9362 encoder->base.crtc = 9829 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9363 dev_priv->pipe_to_crtc_mapping[pipe]; 9830 encoder->base.crtc = &crtc->base;
9831 if (encoder->get_config)
9832 encoder->get_config(encoder, &crtc->config);
9364 } else { 9833 } else {
9365 encoder->base.crtc = NULL; 9834 encoder->base.crtc = NULL;
9366 } 9835 }
@@ -9388,6 +9857,20 @@ setup_pipes:
9388 drm_get_connector_name(&connector->base), 9857 drm_get_connector_name(&connector->base),
9389 connector->base.encoder ? "enabled" : "disabled"); 9858 connector->base.encoder ? "enabled" : "disabled");
9390 } 9859 }
9860}
9861
9862/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
9863 * and i915 state tracking structures. */
9864void intel_modeset_setup_hw_state(struct drm_device *dev,
9865 bool force_restore)
9866{
9867 struct drm_i915_private *dev_priv = dev->dev_private;
9868 enum pipe pipe;
9869 struct drm_plane *plane;
9870 struct intel_crtc *crtc;
9871 struct intel_encoder *encoder;
9872
9873 intel_modeset_readout_hw_state(dev);
9391 9874
9392 /* HW state is read out, now we need to sanitize this mess. */ 9875 /* HW state is read out, now we need to sanitize this mess. */
9393 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9876 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -9398,6 +9881,7 @@ setup_pipes:
9398 for_each_pipe(pipe) { 9881 for_each_pipe(pipe) {
9399 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 9882 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9400 intel_sanitize_crtc(crtc); 9883 intel_sanitize_crtc(crtc);
9884 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
9401 } 9885 }
9402 9886
9403 if (force_restore) { 9887 if (force_restore) {
@@ -9440,12 +9924,23 @@ void intel_modeset_cleanup(struct drm_device *dev)
9440 struct drm_crtc *crtc; 9924 struct drm_crtc *crtc;
9441 struct intel_crtc *intel_crtc; 9925 struct intel_crtc *intel_crtc;
9442 9926
9927 /*
9928 * Interrupts and polling as the first thing to avoid creating havoc.
9929 * Too much stuff here (turning of rps, connectors, ...) would
9930 * experience fancy races otherwise.
9931 */
9932 drm_irq_uninstall(dev);
9933 cancel_work_sync(&dev_priv->hotplug_work);
9934 /*
9935 * Due to the hpd irq storm handling the hotplug work can re-arm the
9936 * poll handlers. Hence disable polling after hpd handling is shut down.
9937 */
9443 drm_kms_helper_poll_fini(dev); 9938 drm_kms_helper_poll_fini(dev);
9939
9444 mutex_lock(&dev->struct_mutex); 9940 mutex_lock(&dev->struct_mutex);
9445 9941
9446 intel_unregister_dsm_handler(); 9942 intel_unregister_dsm_handler();
9447 9943
9448
9449 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 9944 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9450 /* Skip inactive CRTCs */ 9945 /* Skip inactive CRTCs */
9451 if (!crtc->fb) 9946 if (!crtc->fb)
@@ -9461,17 +9956,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
9461 9956
9462 ironlake_teardown_rc6(dev); 9957 ironlake_teardown_rc6(dev);
9463 9958
9464 if (IS_VALLEYVIEW(dev))
9465 vlv_init_dpio(dev);
9466
9467 mutex_unlock(&dev->struct_mutex); 9959 mutex_unlock(&dev->struct_mutex);
9468 9960
9469 /* Disable the irq before mode object teardown, for the irq might
9470 * enqueue unpin/hotplug work. */
9471 drm_irq_uninstall(dev);
9472 cancel_work_sync(&dev_priv->hotplug_work);
9473 cancel_work_sync(&dev_priv->rps.work);
9474
9475 /* flush any delayed tasks or pending work */ 9961 /* flush any delayed tasks or pending work */
9476 flush_scheduled_work(); 9962 flush_scheduled_work();
9477 9963
@@ -9520,6 +10006,9 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
9520#include <linux/seq_file.h> 10006#include <linux/seq_file.h>
9521 10007
9522struct intel_display_error_state { 10008struct intel_display_error_state {
10009
10010 u32 power_well_driver;
10011
9523 struct intel_cursor_error_state { 10012 struct intel_cursor_error_state {
9524 u32 control; 10013 u32 control;
9525 u32 position; 10014 u32 position;
@@ -9528,6 +10017,7 @@ struct intel_display_error_state {
9528 } cursor[I915_MAX_PIPES]; 10017 } cursor[I915_MAX_PIPES];
9529 10018
9530 struct intel_pipe_error_state { 10019 struct intel_pipe_error_state {
10020 enum transcoder cpu_transcoder;
9531 u32 conf; 10021 u32 conf;
9532 u32 source; 10022 u32 source;
9533 10023
@@ -9562,8 +10052,12 @@ intel_display_capture_error_state(struct drm_device *dev)
9562 if (error == NULL) 10052 if (error == NULL)
9563 return NULL; 10053 return NULL;
9564 10054
10055 if (HAS_POWER_WELL(dev))
10056 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
10057
9565 for_each_pipe(i) { 10058 for_each_pipe(i) {
9566 cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); 10059 cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
10060 error->pipe[i].cpu_transcoder = cpu_transcoder;
9567 10061
9568 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { 10062 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
9569 error->cursor[i].control = I915_READ(CURCNTR(i)); 10063 error->cursor[i].control = I915_READ(CURCNTR(i));
@@ -9598,46 +10092,60 @@ intel_display_capture_error_state(struct drm_device *dev)
9598 error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 10092 error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
9599 } 10093 }
9600 10094
10095 /* In the code above we read the registers without checking if the power
10096 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
10097 * prevent the next I915_WRITE from detecting it and printing an error
10098 * message. */
10099 if (HAS_POWER_WELL(dev))
10100 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
10101
9601 return error; 10102 return error;
9602} 10103}
9603 10104
10105#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
10106
9604void 10107void
9605intel_display_print_error_state(struct seq_file *m, 10108intel_display_print_error_state(struct drm_i915_error_state_buf *m,
9606 struct drm_device *dev, 10109 struct drm_device *dev,
9607 struct intel_display_error_state *error) 10110 struct intel_display_error_state *error)
9608{ 10111{
9609 int i; 10112 int i;
9610 10113
9611 seq_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 10114 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
10115 if (HAS_POWER_WELL(dev))
10116 err_printf(m, "PWR_WELL_CTL2: %08x\n",
10117 error->power_well_driver);
9612 for_each_pipe(i) { 10118 for_each_pipe(i) {
9613 seq_printf(m, "Pipe [%d]:\n", i); 10119 err_printf(m, "Pipe [%d]:\n", i);
9614 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); 10120 err_printf(m, " CPU transcoder: %c\n",
9615 seq_printf(m, " SRC: %08x\n", error->pipe[i].source); 10121 transcoder_name(error->pipe[i].cpu_transcoder));
9616 seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); 10122 err_printf(m, " CONF: %08x\n", error->pipe[i].conf);
9617 seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); 10123 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
9618 seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); 10124 err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
9619 seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); 10125 err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
9620 seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); 10126 err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
9621 seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); 10127 err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
9622 10128 err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
9623 seq_printf(m, "Plane [%d]:\n", i); 10129 err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
9624 seq_printf(m, " CNTR: %08x\n", error->plane[i].control); 10130
9625 seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 10131 err_printf(m, "Plane [%d]:\n", i);
10132 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
10133 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
9626 if (INTEL_INFO(dev)->gen <= 3) { 10134 if (INTEL_INFO(dev)->gen <= 3) {
9627 seq_printf(m, " SIZE: %08x\n", error->plane[i].size); 10135 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
9628 seq_printf(m, " POS: %08x\n", error->plane[i].pos); 10136 err_printf(m, " POS: %08x\n", error->plane[i].pos);
9629 } 10137 }
9630 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 10138 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
9631 seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); 10139 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
9632 if (INTEL_INFO(dev)->gen >= 4) { 10140 if (INTEL_INFO(dev)->gen >= 4) {
9633 seq_printf(m, " SURF: %08x\n", error->plane[i].surface); 10141 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
9634 seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 10142 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
9635 } 10143 }
9636 10144
9637 seq_printf(m, "Cursor [%d]:\n", i); 10145 err_printf(m, "Cursor [%d]:\n", i);
9638 seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); 10146 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
9639 seq_printf(m, " POS: %08x\n", error->cursor[i].position); 10147 err_printf(m, " POS: %08x\n", error->cursor[i].position);
9640 seq_printf(m, " BASE: %08x\n", error->cursor[i].base); 10148 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
9641 } 10149 }
9642} 10150}
9643#endif 10151#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 70789b1b5642..b73971234013 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -52,30 +52,6 @@ static bool is_edp(struct intel_dp *intel_dp)
52 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 52 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
53} 53}
54 54
55/**
56 * is_pch_edp - is the port on the PCH and attached to an eDP panel?
57 * @intel_dp: DP struct
58 *
59 * Returns true if the given DP struct corresponds to a PCH DP port attached
60 * to an eDP panel, false otherwise. Helpful for determining whether we
61 * may need FDI resources for a given DP output or not.
62 */
63static bool is_pch_edp(struct intel_dp *intel_dp)
64{
65 return intel_dp->is_pch_edp;
66}
67
68/**
69 * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
70 * @intel_dp: DP struct
71 *
72 * Returns true if the given DP struct corresponds to a CPU eDP port.
73 */
74static bool is_cpu_edp(struct intel_dp *intel_dp)
75{
76 return is_edp(intel_dp) && !is_pch_edp(intel_dp);
77}
78
79static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) 55static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
80{ 56{
81 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 57 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -88,25 +64,6 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
88 return enc_to_intel_dp(&intel_attached_encoder(connector)->base); 64 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
89} 65}
90 66
91/**
92 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
93 * @encoder: DRM encoder
94 *
95 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed
96 * by intel_display.c.
97 */
98bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
99{
100 struct intel_dp *intel_dp;
101
102 if (!encoder)
103 return false;
104
105 intel_dp = enc_to_intel_dp(encoder);
106
107 return is_pch_edp(intel_dp);
108}
109
110static void intel_dp_link_down(struct intel_dp *intel_dp); 67static void intel_dp_link_down(struct intel_dp *intel_dp);
111 68
112static int 69static int
@@ -344,11 +301,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
344 * Note that PCH attached eDP panels should use a 125MHz input 301 * Note that PCH attached eDP panels should use a 125MHz input
345 * clock divider. 302 * clock divider.
346 */ 303 */
347 if (is_cpu_edp(intel_dp)) { 304 if (IS_VALLEYVIEW(dev)) {
305 aux_clock_divider = 100;
306 } else if (intel_dig_port->port == PORT_A) {
348 if (HAS_DDI(dev)) 307 if (HAS_DDI(dev))
349 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; 308 aux_clock_divider = DIV_ROUND_CLOSEST(
350 else if (IS_VALLEYVIEW(dev)) 309 intel_ddi_get_cdclk_freq(dev_priv), 2000);
351 aux_clock_divider = 100;
352 else if (IS_GEN6(dev) || IS_GEN7(dev)) 310 else if (IS_GEN6(dev) || IS_GEN7(dev))
353 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 311 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
354 else 312 else
@@ -660,6 +618,49 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
660 return ret; 618 return ret;
661} 619}
662 620
621static void
622intel_dp_set_clock(struct intel_encoder *encoder,
623 struct intel_crtc_config *pipe_config, int link_bw)
624{
625 struct drm_device *dev = encoder->base.dev;
626
627 if (IS_G4X(dev)) {
628 if (link_bw == DP_LINK_BW_1_62) {
629 pipe_config->dpll.p1 = 2;
630 pipe_config->dpll.p2 = 10;
631 pipe_config->dpll.n = 2;
632 pipe_config->dpll.m1 = 23;
633 pipe_config->dpll.m2 = 8;
634 } else {
635 pipe_config->dpll.p1 = 1;
636 pipe_config->dpll.p2 = 10;
637 pipe_config->dpll.n = 1;
638 pipe_config->dpll.m1 = 14;
639 pipe_config->dpll.m2 = 2;
640 }
641 pipe_config->clock_set = true;
642 } else if (IS_HASWELL(dev)) {
643 /* Haswell has special-purpose DP DDI clocks. */
644 } else if (HAS_PCH_SPLIT(dev)) {
645 if (link_bw == DP_LINK_BW_1_62) {
646 pipe_config->dpll.n = 1;
647 pipe_config->dpll.p1 = 2;
648 pipe_config->dpll.p2 = 10;
649 pipe_config->dpll.m1 = 12;
650 pipe_config->dpll.m2 = 9;
651 } else {
652 pipe_config->dpll.n = 2;
653 pipe_config->dpll.p1 = 1;
654 pipe_config->dpll.p2 = 10;
655 pipe_config->dpll.m1 = 14;
656 pipe_config->dpll.m2 = 8;
657 }
658 pipe_config->clock_set = true;
659 } else if (IS_VALLEYVIEW(dev)) {
660 /* FIXME: Need to figure out optimized DP clocks for vlv. */
661 }
662}
663
663bool 664bool
664intel_dp_compute_config(struct intel_encoder *encoder, 665intel_dp_compute_config(struct intel_encoder *encoder,
665 struct intel_crtc_config *pipe_config) 666 struct intel_crtc_config *pipe_config)
@@ -667,17 +668,18 @@ intel_dp_compute_config(struct intel_encoder *encoder,
667 struct drm_device *dev = encoder->base.dev; 668 struct drm_device *dev = encoder->base.dev;
668 struct drm_i915_private *dev_priv = dev->dev_private; 669 struct drm_i915_private *dev_priv = dev->dev_private;
669 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 670 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
670 struct drm_display_mode *mode = &pipe_config->requested_mode;
671 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 671 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
672 enum port port = dp_to_dig_port(intel_dp)->port;
673 struct intel_crtc *intel_crtc = encoder->new_crtc;
672 struct intel_connector *intel_connector = intel_dp->attached_connector; 674 struct intel_connector *intel_connector = intel_dp->attached_connector;
673 int lane_count, clock; 675 int lane_count, clock;
674 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 676 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
675 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 677 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
676 int bpp, mode_rate; 678 int bpp, mode_rate;
677 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 679 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
678 int target_clock, link_avail, link_clock; 680 int link_avail, link_clock;
679 681
680 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && !is_cpu_edp(intel_dp)) 682 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
681 pipe_config->has_pch_encoder = true; 683 pipe_config->has_pch_encoder = true;
682 684
683 pipe_config->has_dp_encoder = true; 685 pipe_config->has_dp_encoder = true;
@@ -685,12 +687,13 @@ intel_dp_compute_config(struct intel_encoder *encoder,
685 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 687 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
686 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 688 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
687 adjusted_mode); 689 adjusted_mode);
688 intel_pch_panel_fitting(dev, 690 if (!HAS_PCH_SPLIT(dev))
689 intel_connector->panel.fitting_mode, 691 intel_gmch_panel_fitting(intel_crtc, pipe_config,
690 mode, adjusted_mode); 692 intel_connector->panel.fitting_mode);
693 else
694 intel_pch_panel_fitting(intel_crtc, pipe_config,
695 intel_connector->panel.fitting_mode);
691 } 696 }
692 /* We need to take the panel's fixed mode into account. */
693 target_clock = adjusted_mode->clock;
694 697
695 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 698 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
696 return false; 699 return false;
@@ -701,12 +704,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
701 704
702 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 705 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
703 * bpc in between. */ 706 * bpc in between. */
704 bpp = min_t(int, 8*3, pipe_config->pipe_bpp); 707 bpp = pipe_config->pipe_bpp;
705 if (is_edp(intel_dp) && dev_priv->edp.bpp) 708 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
706 bpp = min_t(int, bpp, dev_priv->edp.bpp); 709 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
707 710
708 for (; bpp >= 6*3; bpp -= 2*3) { 711 for (; bpp >= 6*3; bpp -= 2*3) {
709 mode_rate = intel_dp_link_required(target_clock, bpp); 712 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
710 713
711 for (clock = 0; clock <= max_clock; clock++) { 714 for (clock = 0; clock <= max_clock; clock++) {
712 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 715 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
@@ -741,20 +744,21 @@ found:
741 744
742 intel_dp->link_bw = bws[clock]; 745 intel_dp->link_bw = bws[clock];
743 intel_dp->lane_count = lane_count; 746 intel_dp->lane_count = lane_count;
744 adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
745 pipe_config->pipe_bpp = bpp; 747 pipe_config->pipe_bpp = bpp;
746 pipe_config->pixel_target_clock = target_clock; 748 pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
747 749
748 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", 750 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
749 intel_dp->link_bw, intel_dp->lane_count, 751 intel_dp->link_bw, intel_dp->lane_count,
750 adjusted_mode->clock, bpp); 752 pipe_config->port_clock, bpp);
751 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 753 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
752 mode_rate, link_avail); 754 mode_rate, link_avail);
753 755
754 intel_link_compute_m_n(bpp, lane_count, 756 intel_link_compute_m_n(bpp, lane_count,
755 target_clock, adjusted_mode->clock, 757 adjusted_mode->clock, pipe_config->port_clock,
756 &pipe_config->dp_m_n); 758 &pipe_config->dp_m_n);
757 759
760 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
761
758 return true; 762 return true;
759} 763}
760 764
@@ -773,24 +777,28 @@ void intel_dp_init_link_config(struct intel_dp *intel_dp)
773 } 777 }
774} 778}
775 779
776static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) 780static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
777{ 781{
778 struct drm_device *dev = crtc->dev; 782 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
783 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
784 struct drm_device *dev = crtc->base.dev;
779 struct drm_i915_private *dev_priv = dev->dev_private; 785 struct drm_i915_private *dev_priv = dev->dev_private;
780 u32 dpa_ctl; 786 u32 dpa_ctl;
781 787
782 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); 788 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
783 dpa_ctl = I915_READ(DP_A); 789 dpa_ctl = I915_READ(DP_A);
784 dpa_ctl &= ~DP_PLL_FREQ_MASK; 790 dpa_ctl &= ~DP_PLL_FREQ_MASK;
785 791
786 if (clock < 200000) { 792 if (crtc->config.port_clock == 162000) {
787 /* For a long time we've carried around a ILK-DevA w/a for the 793 /* For a long time we've carried around a ILK-DevA w/a for the
788 * 160MHz clock. If we're really unlucky, it's still required. 794 * 160MHz clock. If we're really unlucky, it's still required.
789 */ 795 */
790 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); 796 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
791 dpa_ctl |= DP_PLL_FREQ_160MHZ; 797 dpa_ctl |= DP_PLL_FREQ_160MHZ;
798 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
792 } else { 799 } else {
793 dpa_ctl |= DP_PLL_FREQ_270MHZ; 800 dpa_ctl |= DP_PLL_FREQ_270MHZ;
801 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
794 } 802 }
795 803
796 I915_WRITE(DP_A, dpa_ctl); 804 I915_WRITE(DP_A, dpa_ctl);
@@ -806,8 +814,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
806 struct drm_device *dev = encoder->dev; 814 struct drm_device *dev = encoder->dev;
807 struct drm_i915_private *dev_priv = dev->dev_private; 815 struct drm_i915_private *dev_priv = dev->dev_private;
808 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 816 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
809 struct drm_crtc *crtc = encoder->crtc; 817 enum port port = dp_to_dig_port(intel_dp)->port;
810 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 818 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
811 819
812 /* 820 /*
813 * There are four kinds of DP registers: 821 * There are four kinds of DP registers:
@@ -833,21 +841,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
833 841
834 /* Handle DP bits in common between all three register formats */ 842 /* Handle DP bits in common between all three register formats */
835 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 843 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
844 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
836 845
837 switch (intel_dp->lane_count) {
838 case 1:
839 intel_dp->DP |= DP_PORT_WIDTH_1;
840 break;
841 case 2:
842 intel_dp->DP |= DP_PORT_WIDTH_2;
843 break;
844 case 4:
845 intel_dp->DP |= DP_PORT_WIDTH_4;
846 break;
847 }
848 if (intel_dp->has_audio) { 846 if (intel_dp->has_audio) {
849 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 847 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
850 pipe_name(intel_crtc->pipe)); 848 pipe_name(crtc->pipe));
851 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 849 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
852 intel_write_eld(encoder, adjusted_mode); 850 intel_write_eld(encoder, adjusted_mode);
853 } 851 }
@@ -856,7 +854,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
856 854
857 /* Split out the IBX/CPU vs CPT settings */ 855 /* Split out the IBX/CPU vs CPT settings */
858 856
859 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 857 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
860 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 858 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
861 intel_dp->DP |= DP_SYNC_HS_HIGH; 859 intel_dp->DP |= DP_SYNC_HS_HIGH;
862 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 860 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -866,14 +864,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
866 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 864 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
867 intel_dp->DP |= DP_ENHANCED_FRAMING; 865 intel_dp->DP |= DP_ENHANCED_FRAMING;
868 866
869 intel_dp->DP |= intel_crtc->pipe << 29; 867 intel_dp->DP |= crtc->pipe << 29;
870 868 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
871 /* don't miss out required setting for eDP */
872 if (adjusted_mode->clock < 200000)
873 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
874 else
875 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
876 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
877 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) 869 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
878 intel_dp->DP |= intel_dp->color_range; 870 intel_dp->DP |= intel_dp->color_range;
879 871
@@ -886,22 +878,14 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
886 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 878 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
887 intel_dp->DP |= DP_ENHANCED_FRAMING; 879 intel_dp->DP |= DP_ENHANCED_FRAMING;
888 880
889 if (intel_crtc->pipe == 1) 881 if (crtc->pipe == 1)
890 intel_dp->DP |= DP_PIPEB_SELECT; 882 intel_dp->DP |= DP_PIPEB_SELECT;
891
892 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
893 /* don't miss out required setting for eDP */
894 if (adjusted_mode->clock < 200000)
895 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
896 else
897 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
898 }
899 } else { 883 } else {
900 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 884 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
901 } 885 }
902 886
903 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) 887 if (port == PORT_A && !IS_VALLEYVIEW(dev))
904 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 888 ironlake_set_pll_cpu_edp(intel_dp);
905} 889}
906 890
907#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 891#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
@@ -1290,6 +1274,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1290 enum pipe *pipe) 1274 enum pipe *pipe)
1291{ 1275{
1292 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1276 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1277 enum port port = dp_to_dig_port(intel_dp)->port;
1293 struct drm_device *dev = encoder->base.dev; 1278 struct drm_device *dev = encoder->base.dev;
1294 struct drm_i915_private *dev_priv = dev->dev_private; 1279 struct drm_i915_private *dev_priv = dev->dev_private;
1295 u32 tmp = I915_READ(intel_dp->output_reg); 1280 u32 tmp = I915_READ(intel_dp->output_reg);
@@ -1297,9 +1282,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1297 if (!(tmp & DP_PORT_EN)) 1282 if (!(tmp & DP_PORT_EN))
1298 return false; 1283 return false;
1299 1284
1300 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 1285 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1301 *pipe = PORT_TO_PIPE_CPT(tmp); 1286 *pipe = PORT_TO_PIPE_CPT(tmp);
1302 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 1287 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1303 *pipe = PORT_TO_PIPE(tmp); 1288 *pipe = PORT_TO_PIPE(tmp);
1304 } else { 1289 } else {
1305 u32 trans_sel; 1290 u32 trans_sel;
@@ -1335,9 +1320,48 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1335 return true; 1320 return true;
1336} 1321}
1337 1322
1323static void intel_dp_get_config(struct intel_encoder *encoder,
1324 struct intel_crtc_config *pipe_config)
1325{
1326 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1327 u32 tmp, flags = 0;
1328 struct drm_device *dev = encoder->base.dev;
1329 struct drm_i915_private *dev_priv = dev->dev_private;
1330 enum port port = dp_to_dig_port(intel_dp)->port;
1331 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1332
1333 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1334 tmp = I915_READ(intel_dp->output_reg);
1335 if (tmp & DP_SYNC_HS_HIGH)
1336 flags |= DRM_MODE_FLAG_PHSYNC;
1337 else
1338 flags |= DRM_MODE_FLAG_NHSYNC;
1339
1340 if (tmp & DP_SYNC_VS_HIGH)
1341 flags |= DRM_MODE_FLAG_PVSYNC;
1342 else
1343 flags |= DRM_MODE_FLAG_NVSYNC;
1344 } else {
1345 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1346 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1347 flags |= DRM_MODE_FLAG_PHSYNC;
1348 else
1349 flags |= DRM_MODE_FLAG_NHSYNC;
1350
1351 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1352 flags |= DRM_MODE_FLAG_PVSYNC;
1353 else
1354 flags |= DRM_MODE_FLAG_NVSYNC;
1355 }
1356
1357 pipe_config->adjusted_mode.flags |= flags;
1358}
1359
1338static void intel_disable_dp(struct intel_encoder *encoder) 1360static void intel_disable_dp(struct intel_encoder *encoder)
1339{ 1361{
1340 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1362 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1363 enum port port = dp_to_dig_port(intel_dp)->port;
1364 struct drm_device *dev = encoder->base.dev;
1341 1365
1342 /* Make sure the panel is off before trying to change the mode. But also 1366 /* Make sure the panel is off before trying to change the mode. But also
1343 * ensure that we have vdd while we switch off the panel. */ 1367 * ensure that we have vdd while we switch off the panel. */
@@ -1347,16 +1371,17 @@ static void intel_disable_dp(struct intel_encoder *encoder)
1347 ironlake_edp_panel_off(intel_dp); 1371 ironlake_edp_panel_off(intel_dp);
1348 1372
1349 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1373 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1350 if (!is_cpu_edp(intel_dp)) 1374 if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
1351 intel_dp_link_down(intel_dp); 1375 intel_dp_link_down(intel_dp);
1352} 1376}
1353 1377
1354static void intel_post_disable_dp(struct intel_encoder *encoder) 1378static void intel_post_disable_dp(struct intel_encoder *encoder)
1355{ 1379{
1356 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1380 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1381 enum port port = dp_to_dig_port(intel_dp)->port;
1357 struct drm_device *dev = encoder->base.dev; 1382 struct drm_device *dev = encoder->base.dev;
1358 1383
1359 if (is_cpu_edp(intel_dp)) { 1384 if (port == PORT_A || IS_VALLEYVIEW(dev)) {
1360 intel_dp_link_down(intel_dp); 1385 intel_dp_link_down(intel_dp);
1361 if (!IS_VALLEYVIEW(dev)) 1386 if (!IS_VALLEYVIEW(dev))
1362 ironlake_edp_pll_off(intel_dp); 1387 ironlake_edp_pll_off(intel_dp);
@@ -1381,15 +1406,73 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1381 intel_dp_complete_link_train(intel_dp); 1406 intel_dp_complete_link_train(intel_dp);
1382 intel_dp_stop_link_train(intel_dp); 1407 intel_dp_stop_link_train(intel_dp);
1383 ironlake_edp_backlight_on(intel_dp); 1408 ironlake_edp_backlight_on(intel_dp);
1409
1410 if (IS_VALLEYVIEW(dev)) {
1411 struct intel_digital_port *dport =
1412 enc_to_dig_port(&encoder->base);
1413 int channel = vlv_dport_to_channel(dport);
1414
1415 vlv_wait_port_ready(dev_priv, channel);
1416 }
1384} 1417}
1385 1418
1386static void intel_pre_enable_dp(struct intel_encoder *encoder) 1419static void intel_pre_enable_dp(struct intel_encoder *encoder)
1387{ 1420{
1388 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1421 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1422 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1389 struct drm_device *dev = encoder->base.dev; 1423 struct drm_device *dev = encoder->base.dev;
1424 struct drm_i915_private *dev_priv = dev->dev_private;
1390 1425
1391 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) 1426 if (dport->port == PORT_A && !IS_VALLEYVIEW(dev))
1392 ironlake_edp_pll_on(intel_dp); 1427 ironlake_edp_pll_on(intel_dp);
1428
1429 if (IS_VALLEYVIEW(dev)) {
1430 struct intel_crtc *intel_crtc =
1431 to_intel_crtc(encoder->base.crtc);
1432 int port = vlv_dport_to_channel(dport);
1433 int pipe = intel_crtc->pipe;
1434 u32 val;
1435
1436 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
1437 val = 0;
1438 if (pipe)
1439 val |= (1<<21);
1440 else
1441 val &= ~(1<<21);
1442 val |= 0x001000c4;
1443 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
1444
1445 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
1446 0x00760018);
1447 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
1448 0x00400888);
1449 }
1450}
1451
1452static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
1453{
1454 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1455 struct drm_device *dev = encoder->base.dev;
1456 struct drm_i915_private *dev_priv = dev->dev_private;
1457 int port = vlv_dport_to_channel(dport);
1458
1459 if (!IS_VALLEYVIEW(dev))
1460 return;
1461
1462 /* Program Tx lane resets to default */
1463 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
1464 DPIO_PCS_TX_LANE2_RESET |
1465 DPIO_PCS_TX_LANE1_RESET);
1466 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
1467 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1468 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1469 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1470 DPIO_PCS_CLK_SOFT_RESET);
1471
1472 /* Fix up inter-pair skew failure */
1473 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
1474 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
1475 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
1393} 1476}
1394 1477
1395/* 1478/*
@@ -1451,10 +1534,13 @@ static uint8_t
1451intel_dp_voltage_max(struct intel_dp *intel_dp) 1534intel_dp_voltage_max(struct intel_dp *intel_dp)
1452{ 1535{
1453 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1536 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1537 enum port port = dp_to_dig_port(intel_dp)->port;
1454 1538
1455 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1539 if (IS_VALLEYVIEW(dev))
1540 return DP_TRAIN_VOLTAGE_SWING_1200;
1541 else if (IS_GEN7(dev) && port == PORT_A)
1456 return DP_TRAIN_VOLTAGE_SWING_800; 1542 return DP_TRAIN_VOLTAGE_SWING_800;
1457 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1543 else if (HAS_PCH_CPT(dev) && port != PORT_A)
1458 return DP_TRAIN_VOLTAGE_SWING_1200; 1544 return DP_TRAIN_VOLTAGE_SWING_1200;
1459 else 1545 else
1460 return DP_TRAIN_VOLTAGE_SWING_800; 1546 return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1464,6 +1550,7 @@ static uint8_t
1464intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1550intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1465{ 1551{
1466 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1552 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1553 enum port port = dp_to_dig_port(intel_dp)->port;
1467 1554
1468 if (HAS_DDI(dev)) { 1555 if (HAS_DDI(dev)) {
1469 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1556 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
@@ -1477,7 +1564,19 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1477 default: 1564 default:
1478 return DP_TRAIN_PRE_EMPHASIS_0; 1565 return DP_TRAIN_PRE_EMPHASIS_0;
1479 } 1566 }
1480 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1567 } else if (IS_VALLEYVIEW(dev)) {
1568 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1569 case DP_TRAIN_VOLTAGE_SWING_400:
1570 return DP_TRAIN_PRE_EMPHASIS_9_5;
1571 case DP_TRAIN_VOLTAGE_SWING_600:
1572 return DP_TRAIN_PRE_EMPHASIS_6;
1573 case DP_TRAIN_VOLTAGE_SWING_800:
1574 return DP_TRAIN_PRE_EMPHASIS_3_5;
1575 case DP_TRAIN_VOLTAGE_SWING_1200:
1576 default:
1577 return DP_TRAIN_PRE_EMPHASIS_0;
1578 }
1579 } else if (IS_GEN7(dev) && port == PORT_A) {
1481 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1580 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1482 case DP_TRAIN_VOLTAGE_SWING_400: 1581 case DP_TRAIN_VOLTAGE_SWING_400:
1483 return DP_TRAIN_PRE_EMPHASIS_6; 1582 return DP_TRAIN_PRE_EMPHASIS_6;
@@ -1502,6 +1601,101 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1502 } 1601 }
1503} 1602}
1504 1603
1604static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1605{
1606 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1607 struct drm_i915_private *dev_priv = dev->dev_private;
1608 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1609 unsigned long demph_reg_value, preemph_reg_value,
1610 uniqtranscale_reg_value;
1611 uint8_t train_set = intel_dp->train_set[0];
1612 int port = vlv_dport_to_channel(dport);
1613
1614 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1615 case DP_TRAIN_PRE_EMPHASIS_0:
1616 preemph_reg_value = 0x0004000;
1617 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1618 case DP_TRAIN_VOLTAGE_SWING_400:
1619 demph_reg_value = 0x2B405555;
1620 uniqtranscale_reg_value = 0x552AB83A;
1621 break;
1622 case DP_TRAIN_VOLTAGE_SWING_600:
1623 demph_reg_value = 0x2B404040;
1624 uniqtranscale_reg_value = 0x5548B83A;
1625 break;
1626 case DP_TRAIN_VOLTAGE_SWING_800:
1627 demph_reg_value = 0x2B245555;
1628 uniqtranscale_reg_value = 0x5560B83A;
1629 break;
1630 case DP_TRAIN_VOLTAGE_SWING_1200:
1631 demph_reg_value = 0x2B405555;
1632 uniqtranscale_reg_value = 0x5598DA3A;
1633 break;
1634 default:
1635 return 0;
1636 }
1637 break;
1638 case DP_TRAIN_PRE_EMPHASIS_3_5:
1639 preemph_reg_value = 0x0002000;
1640 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1641 case DP_TRAIN_VOLTAGE_SWING_400:
1642 demph_reg_value = 0x2B404040;
1643 uniqtranscale_reg_value = 0x5552B83A;
1644 break;
1645 case DP_TRAIN_VOLTAGE_SWING_600:
1646 demph_reg_value = 0x2B404848;
1647 uniqtranscale_reg_value = 0x5580B83A;
1648 break;
1649 case DP_TRAIN_VOLTAGE_SWING_800:
1650 demph_reg_value = 0x2B404040;
1651 uniqtranscale_reg_value = 0x55ADDA3A;
1652 break;
1653 default:
1654 return 0;
1655 }
1656 break;
1657 case DP_TRAIN_PRE_EMPHASIS_6:
1658 preemph_reg_value = 0x0000000;
1659 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1660 case DP_TRAIN_VOLTAGE_SWING_400:
1661 demph_reg_value = 0x2B305555;
1662 uniqtranscale_reg_value = 0x5570B83A;
1663 break;
1664 case DP_TRAIN_VOLTAGE_SWING_600:
1665 demph_reg_value = 0x2B2B4040;
1666 uniqtranscale_reg_value = 0x55ADDA3A;
1667 break;
1668 default:
1669 return 0;
1670 }
1671 break;
1672 case DP_TRAIN_PRE_EMPHASIS_9_5:
1673 preemph_reg_value = 0x0006000;
1674 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1675 case DP_TRAIN_VOLTAGE_SWING_400:
1676 demph_reg_value = 0x1B405555;
1677 uniqtranscale_reg_value = 0x55ADDA3A;
1678 break;
1679 default:
1680 return 0;
1681 }
1682 break;
1683 default:
1684 return 0;
1685 }
1686
1687 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
1688 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
1689 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
1690 uniqtranscale_reg_value);
1691 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
1692 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
1693 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
1694 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
1695
1696 return 0;
1697}
1698
1505static void 1699static void
1506intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1700intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1507{ 1701{
@@ -1669,6 +1863,7 @@ static void
1669intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) 1863intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
1670{ 1864{
1671 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1865 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1866 enum port port = intel_dig_port->port;
1672 struct drm_device *dev = intel_dig_port->base.base.dev; 1867 struct drm_device *dev = intel_dig_port->base.base.dev;
1673 uint32_t signal_levels, mask; 1868 uint32_t signal_levels, mask;
1674 uint8_t train_set = intel_dp->train_set[0]; 1869 uint8_t train_set = intel_dp->train_set[0];
@@ -1676,10 +1871,13 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
1676 if (HAS_DDI(dev)) { 1871 if (HAS_DDI(dev)) {
1677 signal_levels = intel_hsw_signal_levels(train_set); 1872 signal_levels = intel_hsw_signal_levels(train_set);
1678 mask = DDI_BUF_EMP_MASK; 1873 mask = DDI_BUF_EMP_MASK;
1679 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1874 } else if (IS_VALLEYVIEW(dev)) {
1875 signal_levels = intel_vlv_signal_levels(intel_dp);
1876 mask = 0;
1877 } else if (IS_GEN7(dev) && port == PORT_A) {
1680 signal_levels = intel_gen7_edp_signal_levels(train_set); 1878 signal_levels = intel_gen7_edp_signal_levels(train_set);
1681 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 1879 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
1682 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1880 } else if (IS_GEN6(dev) && port == PORT_A) {
1683 signal_levels = intel_gen6_edp_signal_levels(train_set); 1881 signal_levels = intel_gen6_edp_signal_levels(train_set);
1684 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 1882 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
1685 } else { 1883 } else {
@@ -1729,8 +1927,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1729 } 1927 }
1730 I915_WRITE(DP_TP_CTL(port), temp); 1928 I915_WRITE(DP_TP_CTL(port), temp);
1731 1929
1732 } else if (HAS_PCH_CPT(dev) && 1930 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
1733 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1734 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 1931 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1735 1932
1736 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1933 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -1981,6 +2178,7 @@ static void
1981intel_dp_link_down(struct intel_dp *intel_dp) 2178intel_dp_link_down(struct intel_dp *intel_dp)
1982{ 2179{
1983 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2180 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2181 enum port port = intel_dig_port->port;
1984 struct drm_device *dev = intel_dig_port->base.base.dev; 2182 struct drm_device *dev = intel_dig_port->base.base.dev;
1985 struct drm_i915_private *dev_priv = dev->dev_private; 2183 struct drm_i915_private *dev_priv = dev->dev_private;
1986 struct intel_crtc *intel_crtc = 2184 struct intel_crtc *intel_crtc =
@@ -2010,7 +2208,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2010 2208
2011 DRM_DEBUG_KMS("\n"); 2209 DRM_DEBUG_KMS("\n");
2012 2210
2013 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 2211 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2014 DP &= ~DP_LINK_TRAIN_MASK_CPT; 2212 DP &= ~DP_LINK_TRAIN_MASK_CPT;
2015 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 2213 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
2016 } else { 2214 } else {
@@ -2301,11 +2499,10 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2301 return NULL; 2499 return NULL;
2302 2500
2303 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; 2501 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
2304 edid = kmalloc(size, GFP_KERNEL); 2502 edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
2305 if (!edid) 2503 if (!edid)
2306 return NULL; 2504 return NULL;
2307 2505
2308 memcpy(edid, intel_connector->edid, size);
2309 return edid; 2506 return edid;
2310 } 2507 }
2311 2508
@@ -2499,15 +2696,16 @@ done:
2499} 2696}
2500 2697
2501static void 2698static void
2502intel_dp_destroy(struct drm_connector *connector) 2699intel_dp_connector_destroy(struct drm_connector *connector)
2503{ 2700{
2504 struct intel_dp *intel_dp = intel_attached_dp(connector);
2505 struct intel_connector *intel_connector = to_intel_connector(connector); 2701 struct intel_connector *intel_connector = to_intel_connector(connector);
2506 2702
2507 if (!IS_ERR_OR_NULL(intel_connector->edid)) 2703 if (!IS_ERR_OR_NULL(intel_connector->edid))
2508 kfree(intel_connector->edid); 2704 kfree(intel_connector->edid);
2509 2705
2510 if (is_edp(intel_dp)) 2706 /* Can't call is_edp() since the encoder may have been destroyed
2707 * already. */
2708 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2511 intel_panel_fini(&intel_connector->panel); 2709 intel_panel_fini(&intel_connector->panel);
2512 2710
2513 drm_sysfs_connector_remove(connector); 2711 drm_sysfs_connector_remove(connector);
@@ -2541,7 +2739,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
2541 .detect = intel_dp_detect, 2739 .detect = intel_dp_detect,
2542 .fill_modes = drm_helper_probe_single_connector_modes, 2740 .fill_modes = drm_helper_probe_single_connector_modes,
2543 .set_property = intel_dp_set_property, 2741 .set_property = intel_dp_set_property,
2544 .destroy = intel_dp_destroy, 2742 .destroy = intel_dp_connector_destroy,
2545}; 2743};
2546 2744
2547static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 2745static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
@@ -2588,11 +2786,11 @@ bool intel_dpd_is_edp(struct drm_device *dev)
2588 struct child_device_config *p_child; 2786 struct child_device_config *p_child;
2589 int i; 2787 int i;
2590 2788
2591 if (!dev_priv->child_dev_num) 2789 if (!dev_priv->vbt.child_dev_num)
2592 return false; 2790 return false;
2593 2791
2594 for (i = 0; i < dev_priv->child_dev_num; i++) { 2792 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
2595 p_child = dev_priv->child_dev + i; 2793 p_child = dev_priv->vbt.child_dev + i;
2596 2794
2597 if (p_child->dvo_port == PORT_IDPD && 2795 if (p_child->dvo_port == PORT_IDPD &&
2598 p_child->device_type == DEVICE_TYPE_eDP) 2796 p_child->device_type == DEVICE_TYPE_eDP)
@@ -2670,7 +2868,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2670 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2868 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2671 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 2869 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2672 2870
2673 vbt = dev_priv->edp.pps; 2871 vbt = dev_priv->vbt.edp_pps;
2674 2872
2675 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 2873 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
2676 * our hw here, which are all in 100usec. */ 2874 * our hw here, which are all in 100usec. */
@@ -2738,9 +2936,6 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2738 pp_div_reg = PIPEA_PP_DIVISOR; 2936 pp_div_reg = PIPEA_PP_DIVISOR;
2739 } 2937 }
2740 2938
2741 if (IS_VALLEYVIEW(dev))
2742 port_sel = I915_READ(pp_on_reg) & 0xc0000000;
2743
2744 /* And finally store the new values in the power sequencer. */ 2939 /* And finally store the new values in the power sequencer. */
2745 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 2940 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
2746 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 2941 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
@@ -2754,8 +2949,10 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2754 2949
2755 /* Haswell doesn't have any port selection bits for the panel 2950 /* Haswell doesn't have any port selection bits for the panel
2756 * power sequencer any more. */ 2951 * power sequencer any more. */
2757 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 2952 if (IS_VALLEYVIEW(dev)) {
2758 if (is_cpu_edp(intel_dp)) 2953 port_sel = I915_READ(pp_on_reg) & 0xc0000000;
2954 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
2955 if (dp_to_dig_port(intel_dp)->port == PORT_A)
2759 port_sel = PANEL_POWER_PORT_DP_A; 2956 port_sel = PANEL_POWER_PORT_DP_A;
2760 else 2957 else
2761 port_sel = PANEL_POWER_PORT_DP_D; 2958 port_sel = PANEL_POWER_PORT_DP_D;
@@ -2773,7 +2970,85 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2773 I915_READ(pp_div_reg)); 2970 I915_READ(pp_div_reg));
2774} 2971}
2775 2972
2776void 2973static bool intel_edp_init_connector(struct intel_dp *intel_dp,
2974 struct intel_connector *intel_connector)
2975{
2976 struct drm_connector *connector = &intel_connector->base;
2977 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2978 struct drm_device *dev = intel_dig_port->base.base.dev;
2979 struct drm_i915_private *dev_priv = dev->dev_private;
2980 struct drm_display_mode *fixed_mode = NULL;
2981 struct edp_power_seq power_seq = { 0 };
2982 bool has_dpcd;
2983 struct drm_display_mode *scan;
2984 struct edid *edid;
2985
2986 if (!is_edp(intel_dp))
2987 return true;
2988
2989 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2990
2991 /* Cache DPCD and EDID for edp. */
2992 ironlake_edp_panel_vdd_on(intel_dp);
2993 has_dpcd = intel_dp_get_dpcd(intel_dp);
2994 ironlake_edp_panel_vdd_off(intel_dp, false);
2995
2996 if (has_dpcd) {
2997 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2998 dev_priv->no_aux_handshake =
2999 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3000 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3001 } else {
3002 /* if this fails, presume the device is a ghost */
3003 DRM_INFO("failed to retrieve link info, disabling eDP\n");
3004 return false;
3005 }
3006
3007 /* We now know it's not a ghost, init power sequence regs. */
3008 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3009 &power_seq);
3010
3011 ironlake_edp_panel_vdd_on(intel_dp);
3012 edid = drm_get_edid(connector, &intel_dp->adapter);
3013 if (edid) {
3014 if (drm_add_edid_modes(connector, edid)) {
3015 drm_mode_connector_update_edid_property(connector,
3016 edid);
3017 drm_edid_to_eld(connector, edid);
3018 } else {
3019 kfree(edid);
3020 edid = ERR_PTR(-EINVAL);
3021 }
3022 } else {
3023 edid = ERR_PTR(-ENOENT);
3024 }
3025 intel_connector->edid = edid;
3026
3027 /* prefer fixed mode from EDID if available */
3028 list_for_each_entry(scan, &connector->probed_modes, head) {
3029 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3030 fixed_mode = drm_mode_duplicate(dev, scan);
3031 break;
3032 }
3033 }
3034
3035 /* fallback to VBT if available for eDP */
3036 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
3037 fixed_mode = drm_mode_duplicate(dev,
3038 dev_priv->vbt.lfp_lvds_vbt_mode);
3039 if (fixed_mode)
3040 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3041 }
3042
3043 ironlake_edp_panel_vdd_off(intel_dp, false);
3044
3045 intel_panel_init(&intel_connector->panel, fixed_mode);
3046 intel_panel_setup_backlight(connector);
3047
3048 return true;
3049}
3050
3051bool
2777intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 3052intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2778 struct intel_connector *intel_connector) 3053 struct intel_connector *intel_connector)
2779{ 3054{
@@ -2782,38 +3057,47 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2782 struct intel_encoder *intel_encoder = &intel_dig_port->base; 3057 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2783 struct drm_device *dev = intel_encoder->base.dev; 3058 struct drm_device *dev = intel_encoder->base.dev;
2784 struct drm_i915_private *dev_priv = dev->dev_private; 3059 struct drm_i915_private *dev_priv = dev->dev_private;
2785 struct drm_display_mode *fixed_mode = NULL;
2786 struct edp_power_seq power_seq = { 0 };
2787 enum port port = intel_dig_port->port; 3060 enum port port = intel_dig_port->port;
2788 const char *name = NULL; 3061 const char *name = NULL;
2789 int type; 3062 int type, error;
2790 3063
2791 /* Preserve the current hw state. */ 3064 /* Preserve the current hw state. */
2792 intel_dp->DP = I915_READ(intel_dp->output_reg); 3065 intel_dp->DP = I915_READ(intel_dp->output_reg);
2793 intel_dp->attached_connector = intel_connector; 3066 intel_dp->attached_connector = intel_connector;
2794 3067
2795 if (HAS_PCH_SPLIT(dev) && port == PORT_D) 3068 type = DRM_MODE_CONNECTOR_DisplayPort;
2796 if (intel_dpd_is_edp(dev))
2797 intel_dp->is_pch_edp = true;
2798
2799 /* 3069 /*
2800 * FIXME : We need to initialize built-in panels before external panels. 3070 * FIXME : We need to initialize built-in panels before external panels.
2801 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup 3071 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
2802 */ 3072 */
2803 if (IS_VALLEYVIEW(dev) && port == PORT_C) { 3073 switch (port) {
2804 type = DRM_MODE_CONNECTOR_eDP; 3074 case PORT_A:
2805 intel_encoder->type = INTEL_OUTPUT_EDP;
2806 } else if (port == PORT_A || is_pch_edp(intel_dp)) {
2807 type = DRM_MODE_CONNECTOR_eDP; 3075 type = DRM_MODE_CONNECTOR_eDP;
2808 intel_encoder->type = INTEL_OUTPUT_EDP; 3076 break;
2809 } else { 3077 case PORT_C:
2810 /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for 3078 if (IS_VALLEYVIEW(dev))
2811 * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't 3079 type = DRM_MODE_CONNECTOR_eDP;
2812 * rewrite it. 3080 break;
2813 */ 3081 case PORT_D:
2814 type = DRM_MODE_CONNECTOR_DisplayPort; 3082 if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
3083 type = DRM_MODE_CONNECTOR_eDP;
3084 break;
3085 default: /* silence GCC warning */
3086 break;
2815 } 3087 }
2816 3088
3089 /*
3090 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
3091 * for DP the encoder type can be set by the caller to
3092 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
3093 */
3094 if (type == DRM_MODE_CONNECTOR_eDP)
3095 intel_encoder->type = INTEL_OUTPUT_EDP;
3096
3097 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
3098 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
3099 port_name(port));
3100
2817 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 3101 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
2818 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 3102 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
2819 3103
@@ -2873,74 +3157,21 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2873 BUG(); 3157 BUG();
2874 } 3158 }
2875 3159
2876 if (is_edp(intel_dp)) 3160 error = intel_dp_i2c_init(intel_dp, intel_connector, name);
2877 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 3161 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
2878 3162 error, port_name(port));
2879 intel_dp_i2c_init(intel_dp, intel_connector, name);
2880
2881 /* Cache DPCD and EDID for edp. */
2882 if (is_edp(intel_dp)) {
2883 bool ret;
2884 struct drm_display_mode *scan;
2885 struct edid *edid;
2886
2887 ironlake_edp_panel_vdd_on(intel_dp);
2888 ret = intel_dp_get_dpcd(intel_dp);
2889 ironlake_edp_panel_vdd_off(intel_dp, false);
2890 3163
2891 if (ret) { 3164 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
2892 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 3165 i2c_del_adapter(&intel_dp->adapter);
2893 dev_priv->no_aux_handshake = 3166 if (is_edp(intel_dp)) {
2894 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 3167 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2895 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 3168 mutex_lock(&dev->mode_config.mutex);
2896 } else { 3169 ironlake_panel_vdd_off_sync(intel_dp);
2897 /* if this fails, presume the device is a ghost */ 3170 mutex_unlock(&dev->mode_config.mutex);
2898 DRM_INFO("failed to retrieve link info, disabling eDP\n");
2899 intel_dp_encoder_destroy(&intel_encoder->base);
2900 intel_dp_destroy(connector);
2901 return;
2902 }
2903
2904 /* We now know it's not a ghost, init power sequence regs. */
2905 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2906 &power_seq);
2907
2908 ironlake_edp_panel_vdd_on(intel_dp);
2909 edid = drm_get_edid(connector, &intel_dp->adapter);
2910 if (edid) {
2911 if (drm_add_edid_modes(connector, edid)) {
2912 drm_mode_connector_update_edid_property(connector, edid);
2913 drm_edid_to_eld(connector, edid);
2914 } else {
2915 kfree(edid);
2916 edid = ERR_PTR(-EINVAL);
2917 }
2918 } else {
2919 edid = ERR_PTR(-ENOENT);
2920 } 3171 }
2921 intel_connector->edid = edid; 3172 drm_sysfs_connector_remove(connector);
2922 3173 drm_connector_cleanup(connector);
2923 /* prefer fixed mode from EDID if available */ 3174 return false;
2924 list_for_each_entry(scan, &connector->probed_modes, head) {
2925 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
2926 fixed_mode = drm_mode_duplicate(dev, scan);
2927 break;
2928 }
2929 }
2930
2931 /* fallback to VBT if available for eDP */
2932 if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
2933 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
2934 if (fixed_mode)
2935 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
2936 }
2937
2938 ironlake_edp_panel_vdd_off(intel_dp, false);
2939 }
2940
2941 if (is_edp(intel_dp)) {
2942 intel_panel_init(&intel_connector->panel, fixed_mode);
2943 intel_panel_setup_backlight(connector);
2944 } 3175 }
2945 3176
2946 intel_dp_add_properties(intel_dp, connector); 3177 intel_dp_add_properties(intel_dp, connector);
@@ -2953,6 +3184,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2953 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 3184 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
2954 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 3185 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
2955 } 3186 }
3187
3188 return true;
2956} 3189}
2957 3190
2958void 3191void
@@ -2986,6 +3219,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2986 intel_encoder->disable = intel_disable_dp; 3219 intel_encoder->disable = intel_disable_dp;
2987 intel_encoder->post_disable = intel_post_disable_dp; 3220 intel_encoder->post_disable = intel_post_disable_dp;
2988 intel_encoder->get_hw_state = intel_dp_get_hw_state; 3221 intel_encoder->get_hw_state = intel_dp_get_hw_state;
3222 intel_encoder->get_config = intel_dp_get_config;
3223 if (IS_VALLEYVIEW(dev))
3224 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
2989 3225
2990 intel_dig_port->port = port; 3226 intel_dig_port->port = port;
2991 intel_dig_port->dp.output_reg = output_reg; 3227 intel_dig_port->dp.output_reg = output_reg;
@@ -2995,5 +3231,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2995 intel_encoder->cloneable = false; 3231 intel_encoder->cloneable = false;
2996 intel_encoder->hot_plug = intel_dp_hot_plug; 3232 intel_encoder->hot_plug = intel_dp_hot_plug;
2997 3233
2998 intel_dp_init_connector(intel_dig_port, intel_connector); 3234 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
3235 drm_encoder_cleanup(encoder);
3236 kfree(intel_dig_port);
3237 kfree(intel_connector);
3238 }
2999} 3239}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 624a9e6b8d71..c8c9b6f48230 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -120,7 +120,6 @@ struct intel_encoder {
120 struct intel_crtc *new_crtc; 120 struct intel_crtc *new_crtc;
121 121
122 int type; 122 int type;
123 bool needs_tv_clock;
124 /* 123 /*
125 * Intel hw has only one MUX where encoders could be clone, hence a 124 * Intel hw has only one MUX where encoders could be clone, hence a
126 * simple flag is enough to compute the possible_clones mask. 125 * simple flag is enough to compute the possible_clones mask.
@@ -140,6 +139,12 @@ struct intel_encoder {
140 * the encoder is active. If the encoder is enabled it also set the pipe 139 * the encoder is active. If the encoder is enabled it also set the pipe
141 * it is connected to in the pipe parameter. */ 140 * it is connected to in the pipe parameter. */
142 bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe); 141 bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
142 /* Reconstructs the equivalent mode flags for the current hardware
143 * state. This must be called _after_ display->get_pipe_config has
144 * pre-filled the pipe config. Note that intel_encoder->base.crtc must
145 * be set correctly before calling this function. */
146 void (*get_config)(struct intel_encoder *,
147 struct intel_crtc_config *pipe_config);
143 int crtc_mask; 148 int crtc_mask;
144 enum hpd_pin hpd_pin; 149 enum hpd_pin hpd_pin;
145}; 150};
@@ -177,7 +182,30 @@ struct intel_connector {
177 u8 polled; 182 u8 polled;
178}; 183};
179 184
185typedef struct dpll {
186 /* given values */
187 int n;
188 int m1, m2;
189 int p1, p2;
190 /* derived values */
191 int dot;
192 int vco;
193 int m;
194 int p;
195} intel_clock_t;
196
180struct intel_crtc_config { 197struct intel_crtc_config {
198 /**
199 * quirks - bitfield with hw state readout quirks
200 *
201 * For various reasons the hw state readout code might not be able to
202 * completely faithfully read out the current state. These cases are
203 * tracked with quirk flags so that fastboot and state checker can act
204 * accordingly.
205 */
206#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
207 unsigned long quirks;
208
181 struct drm_display_mode requested_mode; 209 struct drm_display_mode requested_mode;
182 struct drm_display_mode adjusted_mode; 210 struct drm_display_mode adjusted_mode;
183 /* This flag must be set by the encoder's compute_config callback if it 211 /* This flag must be set by the encoder's compute_config callback if it
@@ -201,29 +229,67 @@ struct intel_crtc_config {
201 /* DP has a bunch of special case unfortunately, so mark the pipe 229 /* DP has a bunch of special case unfortunately, so mark the pipe
202 * accordingly. */ 230 * accordingly. */
203 bool has_dp_encoder; 231 bool has_dp_encoder;
232
233 /*
234 * Enable dithering, used when the selected pipe bpp doesn't match the
235 * plane bpp.
236 */
204 bool dither; 237 bool dither;
205 238
206 /* Controls for the clock computation, to override various stages. */ 239 /* Controls for the clock computation, to override various stages. */
207 bool clock_set; 240 bool clock_set;
208 241
242 /* SDVO TV has a bunch of special case. To make multifunction encoders
243 * work correctly, we need to track this at runtime.*/
244 bool sdvo_tv_clock;
245
246 /*
247 * crtc bandwidth limit, don't increase pipe bpp or clock if not really
248 * required. This is set in the 2nd loop of calling encoder's
249 * ->compute_config if the first pick doesn't work out.
250 */
251 bool bw_constrained;
252
209 /* Settings for the intel dpll used on pretty much everything but 253 /* Settings for the intel dpll used on pretty much everything but
210 * haswell. */ 254 * haswell. */
211 struct dpll { 255 struct dpll dpll;
212 unsigned n; 256
213 unsigned m1, m2; 257 /* Selected dpll when shared or DPLL_ID_PRIVATE. */
214 unsigned p1, p2; 258 enum intel_dpll_id shared_dpll;
215 } dpll; 259
260 /* Actual register state of the dpll, for shared dpll cross-checking. */
261 struct intel_dpll_hw_state dpll_hw_state;
216 262
217 int pipe_bpp; 263 int pipe_bpp;
218 struct intel_link_m_n dp_m_n; 264 struct intel_link_m_n dp_m_n;
219 /** 265
220 * This is currently used by DP and HDMI encoders since those can have a 266 /*
221 * target pixel clock != the port link clock (which is currently stored 267 * Frequence the dpll for the port should run at. Differs from the
222 * in adjusted_mode->clock). 268 * adjusted dotclock e.g. for DP or 12bpc hdmi mode.
223 */ 269 */
224 int pixel_target_clock; 270 int port_clock;
271
225 /* Used by SDVO (and if we ever fix it, HDMI). */ 272 /* Used by SDVO (and if we ever fix it, HDMI). */
226 unsigned pixel_multiplier; 273 unsigned pixel_multiplier;
274
275 /* Panel fitter controls for gen2-gen4 + VLV */
276 struct {
277 u32 control;
278 u32 pgm_ratios;
279 u32 lvds_border_bits;
280 } gmch_pfit;
281
282 /* Panel fitter placement and size for Ironlake+ */
283 struct {
284 u32 pos;
285 u32 size;
286 } pch_pfit;
287
288 /* FDI configuration, only valid if has_pch_encoder is set. */
289 int fdi_lanes;
290 struct intel_link_m_n fdi_m_n;
291
292 bool ips_enabled;
227}; 293};
228 294
229struct intel_crtc { 295struct intel_crtc {
@@ -242,7 +308,6 @@ struct intel_crtc {
242 bool lowfreq_avail; 308 bool lowfreq_avail;
243 struct intel_overlay *overlay; 309 struct intel_overlay *overlay;
244 struct intel_unpin_work *unpin_work; 310 struct intel_unpin_work *unpin_work;
245 int fdi_lanes;
246 311
247 atomic_t unpin_work_count; 312 atomic_t unpin_work_count;
248 313
@@ -259,12 +324,14 @@ struct intel_crtc {
259 324
260 struct intel_crtc_config config; 325 struct intel_crtc_config config;
261 326
262 /* We can share PLLs across outputs if the timings match */
263 struct intel_pch_pll *pch_pll;
264 uint32_t ddi_pll_sel; 327 uint32_t ddi_pll_sel;
265 328
266 /* reset counter value when the last flip was submitted */ 329 /* reset counter value when the last flip was submitted */
267 unsigned int reset_counter; 330 unsigned int reset_counter;
331
332 /* Access to these should be protected by dev_priv->irq_lock. */
333 bool cpu_fifo_underrun_disabled;
334 bool pch_fifo_underrun_disabled;
268}; 335};
269 336
270struct intel_plane { 337struct intel_plane {
@@ -279,6 +346,18 @@ struct intel_plane {
279 unsigned int crtc_w, crtc_h; 346 unsigned int crtc_w, crtc_h;
280 uint32_t src_x, src_y; 347 uint32_t src_x, src_y;
281 uint32_t src_w, src_h; 348 uint32_t src_w, src_h;
349
350 /* Since we need to change the watermarks before/after
351 * enabling/disabling the planes, we need to store the parameters here
352 * as the other pieces of the struct may not reflect the values we want
353 * for the watermark calculations. Currently only Haswell uses this.
354 */
355 struct {
356 bool enable;
357 uint8_t bytes_per_pixel;
358 uint32_t horiz_pixels;
359 } wm;
360
282 void (*update_plane)(struct drm_plane *plane, 361 void (*update_plane)(struct drm_plane *plane,
283 struct drm_framebuffer *fb, 362 struct drm_framebuffer *fb,
284 struct drm_i915_gem_object *obj, 363 struct drm_i915_gem_object *obj,
@@ -411,7 +490,6 @@ struct intel_dp {
411 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; 490 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
412 struct i2c_adapter adapter; 491 struct i2c_adapter adapter;
413 struct i2c_algo_dp_aux_data algo; 492 struct i2c_algo_dp_aux_data algo;
414 bool is_pch_edp;
415 uint8_t train_set[4]; 493 uint8_t train_set[4];
416 int panel_power_up_delay; 494 int panel_power_up_delay;
417 int panel_power_down_delay; 495 int panel_power_down_delay;
@@ -431,6 +509,19 @@ struct intel_digital_port {
431 struct intel_hdmi hdmi; 509 struct intel_hdmi hdmi;
432}; 510};
433 511
512static inline int
513vlv_dport_to_channel(struct intel_digital_port *dport)
514{
515 switch (dport->port) {
516 case PORT_B:
517 return 0;
518 case PORT_C:
519 return 1;
520 default:
521 BUG();
522 }
523}
524
434static inline struct drm_crtc * 525static inline struct drm_crtc *
435intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) 526intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
436{ 527{
@@ -474,6 +565,7 @@ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
474extern void intel_attach_force_audio_property(struct drm_connector *connector); 565extern void intel_attach_force_audio_property(struct drm_connector *connector);
475extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); 566extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
476 567
568extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
477extern void intel_crt_init(struct drm_device *dev); 569extern void intel_crt_init(struct drm_device *dev);
478extern void intel_hdmi_init(struct drm_device *dev, 570extern void intel_hdmi_init(struct drm_device *dev,
479 int hdmi_reg, enum port port); 571 int hdmi_reg, enum port port);
@@ -488,13 +580,14 @@ extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
488extern void intel_dvo_init(struct drm_device *dev); 580extern void intel_dvo_init(struct drm_device *dev);
489extern void intel_tv_init(struct drm_device *dev); 581extern void intel_tv_init(struct drm_device *dev);
490extern void intel_mark_busy(struct drm_device *dev); 582extern void intel_mark_busy(struct drm_device *dev);
491extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); 583extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
584 struct intel_ring_buffer *ring);
492extern void intel_mark_idle(struct drm_device *dev); 585extern void intel_mark_idle(struct drm_device *dev);
493extern bool intel_lvds_init(struct drm_device *dev); 586extern void intel_lvds_init(struct drm_device *dev);
494extern bool intel_is_dual_link_lvds(struct drm_device *dev); 587extern bool intel_is_dual_link_lvds(struct drm_device *dev);
495extern void intel_dp_init(struct drm_device *dev, int output_reg, 588extern void intel_dp_init(struct drm_device *dev, int output_reg,
496 enum port port); 589 enum port port);
497extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 590extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
498 struct intel_connector *intel_connector); 591 struct intel_connector *intel_connector);
499extern void intel_dp_init_link_config(struct intel_dp *intel_dp); 592extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
500extern void intel_dp_start_link_train(struct intel_dp *intel_dp); 593extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
@@ -512,7 +605,6 @@ extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
512extern void ironlake_edp_panel_off(struct intel_dp *intel_dp); 605extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
513extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); 606extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
514extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 607extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
515extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
516extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); 608extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
517extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, 609extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
518 enum plane plane); 610 enum plane plane);
@@ -524,12 +616,14 @@ extern void intel_panel_fini(struct intel_panel *panel);
524 616
525extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 617extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
526 struct drm_display_mode *adjusted_mode); 618 struct drm_display_mode *adjusted_mode);
527extern void intel_pch_panel_fitting(struct drm_device *dev, 619extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
528 int fitting_mode, 620 struct intel_crtc_config *pipe_config,
529 const struct drm_display_mode *mode, 621 int fitting_mode);
530 struct drm_display_mode *adjusted_mode); 622extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
531extern u32 intel_panel_get_max_backlight(struct drm_device *dev); 623 struct intel_crtc_config *pipe_config,
532extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); 624 int fitting_mode);
625extern void intel_panel_set_backlight(struct drm_device *dev,
626 u32 level, u32 max);
533extern int intel_panel_setup_backlight(struct drm_connector *connector); 627extern int intel_panel_setup_backlight(struct drm_connector *connector);
534extern void intel_panel_enable_backlight(struct drm_device *dev, 628extern void intel_panel_enable_backlight(struct drm_device *dev,
535 enum pipe pipe); 629 enum pipe pipe);
@@ -553,11 +647,11 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc);
553extern void intel_crtc_update_dpms(struct drm_crtc *crtc); 647extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
554extern void intel_encoder_destroy(struct drm_encoder *encoder); 648extern void intel_encoder_destroy(struct drm_encoder *encoder);
555extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode); 649extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
556extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
557extern void intel_connector_dpms(struct drm_connector *, int mode); 650extern void intel_connector_dpms(struct drm_connector *, int mode);
558extern bool intel_connector_get_hw_state(struct intel_connector *connector); 651extern bool intel_connector_get_hw_state(struct intel_connector *connector);
559extern void intel_modeset_check_state(struct drm_device *dev); 652extern void intel_modeset_check_state(struct drm_device *dev);
560extern void intel_plane_restore(struct drm_plane *plane); 653extern void intel_plane_restore(struct drm_plane *plane);
654extern void intel_plane_disable(struct drm_plane *plane);
561 655
562 656
563static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) 657static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
@@ -565,19 +659,17 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector
565 return to_intel_connector(connector)->encoder; 659 return to_intel_connector(connector)->encoder;
566} 660}
567 661
568static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
569{
570 struct intel_digital_port *intel_dig_port =
571 container_of(encoder, struct intel_digital_port, base.base);
572 return &intel_dig_port->dp;
573}
574
575static inline struct intel_digital_port * 662static inline struct intel_digital_port *
576enc_to_dig_port(struct drm_encoder *encoder) 663enc_to_dig_port(struct drm_encoder *encoder)
577{ 664{
578 return container_of(encoder, struct intel_digital_port, base.base); 665 return container_of(encoder, struct intel_digital_port, base.base);
579} 666}
580 667
668static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
669{
670 return &enc_to_dig_port(encoder)->dp;
671}
672
581static inline struct intel_digital_port * 673static inline struct intel_digital_port *
582dp_to_dig_port(struct intel_dp *intel_dp) 674dp_to_dig_port(struct intel_dp *intel_dp)
583{ 675{
@@ -607,6 +699,7 @@ intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
607extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 699extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
608extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); 700extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
609extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 701extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
702extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
610 703
611struct intel_load_detect_pipe { 704struct intel_load_detect_pipe {
612 struct drm_framebuffer *release_fb; 705 struct drm_framebuffer *release_fb;
@@ -660,13 +753,9 @@ extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
660#define assert_pipe_disabled(d, p) assert_pipe(d, p, false) 753#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
661 754
662extern void intel_init_clock_gating(struct drm_device *dev); 755extern void intel_init_clock_gating(struct drm_device *dev);
756extern void intel_suspend_hw(struct drm_device *dev);
663extern void intel_write_eld(struct drm_encoder *encoder, 757extern void intel_write_eld(struct drm_encoder *encoder,
664 struct drm_display_mode *mode); 758 struct drm_display_mode *mode);
665extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
666extern void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
667 struct intel_link_m_n *m_n);
668extern void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
669 struct intel_link_m_n *m_n);
670extern void intel_prepare_ddi(struct drm_device *dev); 759extern void intel_prepare_ddi(struct drm_device *dev);
671extern void hsw_fdi_link_train(struct drm_crtc *crtc); 760extern void hsw_fdi_link_train(struct drm_crtc *crtc);
672extern void intel_ddi_init(struct drm_device *dev, enum port port); 761extern void intel_ddi_init(struct drm_device *dev, enum port port);
@@ -675,9 +764,7 @@ extern void intel_ddi_init(struct drm_device *dev, enum port port);
675extern void intel_update_watermarks(struct drm_device *dev); 764extern void intel_update_watermarks(struct drm_device *dev);
676extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, 765extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
677 uint32_t sprite_width, 766 uint32_t sprite_width,
678 int pixel_size); 767 int pixel_size, bool enable);
679extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
680 struct drm_display_mode *mode);
681 768
682extern unsigned long intel_gen4_compute_page_offset(int *x, int *y, 769extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
683 unsigned int tiling_mode, 770 unsigned int tiling_mode,
@@ -689,8 +776,6 @@ extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
689extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, 776extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
690 struct drm_file *file_priv); 777 struct drm_file *file_priv);
691 778
692extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
693
694/* Power-related functions, located in intel_pm.c */ 779/* Power-related functions, located in intel_pm.c */
695extern void intel_init_pm(struct drm_device *dev); 780extern void intel_init_pm(struct drm_device *dev);
696/* FBC */ 781/* FBC */
@@ -701,7 +786,12 @@ extern void intel_update_fbc(struct drm_device *dev);
701extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 786extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
702extern void intel_gpu_ips_teardown(void); 787extern void intel_gpu_ips_teardown(void);
703 788
704extern bool intel_using_power_well(struct drm_device *dev); 789/* Power well */
790extern int i915_init_power_well(struct drm_device *dev);
791extern void i915_remove_power_well(struct drm_device *dev);
792
793extern bool intel_display_power_enabled(struct drm_device *dev,
794 enum intel_display_power_domain domain);
705extern void intel_init_power_well(struct drm_device *dev); 795extern void intel_init_power_well(struct drm_device *dev);
706extern void intel_set_power_well(struct drm_device *dev, bool enable); 796extern void intel_set_power_well(struct drm_device *dev, bool enable);
707extern void intel_enable_gt_powersave(struct drm_device *dev); 797extern void intel_enable_gt_powersave(struct drm_device *dev);
@@ -719,7 +809,7 @@ extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
719extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); 809extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
720extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); 810extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
721extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev); 811extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
722extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock); 812extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
723extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc); 813extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
724extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); 814extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
725extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); 815extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
@@ -728,5 +818,11 @@ intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
728extern void intel_ddi_fdi_disable(struct drm_crtc *crtc); 818extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
729 819
730extern void intel_display_handle_reset(struct drm_device *dev); 820extern void intel_display_handle_reset(struct drm_device *dev);
821extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
822 enum pipe pipe,
823 bool enable);
824extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
825 enum transcoder pch_transcoder,
826 bool enable);
731 827
732#endif /* __INTEL_DRV_H__ */ 828#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index cc70b16d5d42..eb2020eb2b7e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -54,6 +54,13 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
54 .dev_ops = &ch7xxx_ops, 54 .dev_ops = &ch7xxx_ops,
55 }, 55 },
56 { 56 {
57 .type = INTEL_DVO_CHIP_TMDS,
58 .name = "ch7xxx",
59 .dvo_reg = DVOC,
60 .slave_addr = 0x75, /* For some ch7010 */
61 .dev_ops = &ch7xxx_ops,
62 },
63 {
57 .type = INTEL_DVO_CHIP_LVDS, 64 .type = INTEL_DVO_CHIP_LVDS,
58 .name = "ivch", 65 .name = "ivch",
59 .dvo_reg = DVOA, 66 .dvo_reg = DVOA,
@@ -129,6 +136,26 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
129 return true; 136 return true;
130} 137}
131 138
139static void intel_dvo_get_config(struct intel_encoder *encoder,
140 struct intel_crtc_config *pipe_config)
141{
142 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
143 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
144 u32 tmp, flags = 0;
145
146 tmp = I915_READ(intel_dvo->dev.dvo_reg);
147 if (tmp & DVO_HSYNC_ACTIVE_HIGH)
148 flags |= DRM_MODE_FLAG_PHSYNC;
149 else
150 flags |= DRM_MODE_FLAG_NHSYNC;
151 if (tmp & DVO_VSYNC_ACTIVE_HIGH)
152 flags |= DRM_MODE_FLAG_PVSYNC;
153 else
154 flags |= DRM_MODE_FLAG_NVSYNC;
155
156 pipe_config->adjusted_mode.flags |= flags;
157}
158
132static void intel_disable_dvo(struct intel_encoder *encoder) 159static void intel_disable_dvo(struct intel_encoder *encoder)
133{ 160{
134 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 161 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
@@ -153,6 +180,7 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
153 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); 180 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
154} 181}
155 182
183/* Special dpms function to support cloning between dvo/sdvo/crt. */
156static void intel_dvo_dpms(struct drm_connector *connector, int mode) 184static void intel_dvo_dpms(struct drm_connector *connector, int mode)
157{ 185{
158 struct intel_dvo *intel_dvo = intel_attached_dvo(connector); 186 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
@@ -174,6 +202,8 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
174 return; 202 return;
175 } 203 }
176 204
205 /* We call connector dpms manually below in case pipe dpms doesn't
206 * change due to cloning. */
177 if (mode == DRM_MODE_DPMS_ON) { 207 if (mode == DRM_MODE_DPMS_ON) {
178 intel_dvo->base.connectors_active = true; 208 intel_dvo->base.connectors_active = true;
179 209
@@ -440,6 +470,7 @@ void intel_dvo_init(struct drm_device *dev)
440 intel_encoder->disable = intel_disable_dvo; 470 intel_encoder->disable = intel_disable_dvo;
441 intel_encoder->enable = intel_enable_dvo; 471 intel_encoder->enable = intel_enable_dvo;
442 intel_encoder->get_hw_state = intel_dvo_get_hw_state; 472 intel_encoder->get_hw_state = intel_dvo_get_hw_state;
473 intel_encoder->get_config = intel_dvo_get_config;
443 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; 474 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
444 475
445 /* Now, try to find a controller */ 476 /* Now, try to find a controller */
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 6b7c3ca2c035..dff669e2387f 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -60,8 +60,9 @@ static struct fb_ops intelfb_ops = {
60static int intelfb_create(struct drm_fb_helper *helper, 60static int intelfb_create(struct drm_fb_helper *helper,
61 struct drm_fb_helper_surface_size *sizes) 61 struct drm_fb_helper_surface_size *sizes)
62{ 62{
63 struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper; 63 struct intel_fbdev *ifbdev =
64 struct drm_device *dev = ifbdev->helper.dev; 64 container_of(helper, struct intel_fbdev, helper);
65 struct drm_device *dev = helper->dev;
65 struct drm_i915_private *dev_priv = dev->dev_private; 66 struct drm_i915_private *dev_priv = dev->dev_private;
66 struct fb_info *info; 67 struct fb_info *info;
67 struct drm_framebuffer *fb; 68 struct drm_framebuffer *fb;
@@ -108,7 +109,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
108 goto out_unpin; 109 goto out_unpin;
109 } 110 }
110 111
111 info->par = ifbdev; 112 info->par = helper;
112 113
113 ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); 114 ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
114 if (ret) 115 if (ret)
@@ -217,7 +218,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
217int intel_fbdev_init(struct drm_device *dev) 218int intel_fbdev_init(struct drm_device *dev)
218{ 219{
219 struct intel_fbdev *ifbdev; 220 struct intel_fbdev *ifbdev;
220 drm_i915_private_t *dev_priv = dev->dev_private; 221 struct drm_i915_private *dev_priv = dev->dev_private;
221 int ret; 222 int ret;
222 223
223 ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); 224 ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
@@ -242,7 +243,7 @@ int intel_fbdev_init(struct drm_device *dev)
242 243
243void intel_fbdev_initial_config(struct drm_device *dev) 244void intel_fbdev_initial_config(struct drm_device *dev)
244{ 245{
245 drm_i915_private_t *dev_priv = dev->dev_private; 246 struct drm_i915_private *dev_priv = dev->dev_private;
246 247
247 /* Due to peculiar init order wrt to hpd handling this is separate. */ 248 /* Due to peculiar init order wrt to hpd handling this is separate. */
248 drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32); 249 drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
@@ -250,7 +251,7 @@ void intel_fbdev_initial_config(struct drm_device *dev)
250 251
251void intel_fbdev_fini(struct drm_device *dev) 252void intel_fbdev_fini(struct drm_device *dev)
252{ 253{
253 drm_i915_private_t *dev_priv = dev->dev_private; 254 struct drm_i915_private *dev_priv = dev->dev_private;
254 if (!dev_priv->fbdev) 255 if (!dev_priv->fbdev)
255 return; 256 return;
256 257
@@ -261,7 +262,7 @@ void intel_fbdev_fini(struct drm_device *dev)
261 262
262void intel_fbdev_set_suspend(struct drm_device *dev, int state) 263void intel_fbdev_set_suspend(struct drm_device *dev, int state)
263{ 264{
264 drm_i915_private_t *dev_priv = dev->dev_private; 265 struct drm_i915_private *dev_priv = dev->dev_private;
265 struct intel_fbdev *ifbdev = dev_priv->fbdev; 266 struct intel_fbdev *ifbdev = dev_priv->fbdev;
266 struct fb_info *info; 267 struct fb_info *info;
267 268
@@ -274,7 +275,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
274 * been restored from swap. If the object is stolen however, it will be 275 * been restored from swap. If the object is stolen however, it will be
275 * full of whatever garbage was left in there. 276 * full of whatever garbage was left in there.
276 */ 277 */
277 if (!state && ifbdev->ifb.obj->stolen) 278 if (state == FBINFO_STATE_RUNNING && ifbdev->ifb.obj->stolen)
278 memset_io(info->screen_base, 0, info->screen_size); 279 memset_io(info->screen_base, 0, info->screen_size);
279 280
280 fb_set_suspend(info, state); 281 fb_set_suspend(info, state);
@@ -284,16 +285,14 @@ MODULE_LICENSE("GPL and additional rights");
284 285
285void intel_fb_output_poll_changed(struct drm_device *dev) 286void intel_fb_output_poll_changed(struct drm_device *dev)
286{ 287{
287 drm_i915_private_t *dev_priv = dev->dev_private; 288 struct drm_i915_private *dev_priv = dev->dev_private;
288 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); 289 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
289} 290}
290 291
291void intel_fb_restore_mode(struct drm_device *dev) 292void intel_fb_restore_mode(struct drm_device *dev)
292{ 293{
293 int ret; 294 int ret;
294 drm_i915_private_t *dev_priv = dev->dev_private; 295 struct drm_i915_private *dev_priv = dev->dev_private;
295 struct drm_mode_config *config = &dev->mode_config;
296 struct drm_plane *plane;
297 296
298 if (INTEL_INFO(dev)->num_pipes == 0) 297 if (INTEL_INFO(dev)->num_pipes == 0)
299 return; 298 return;
@@ -304,10 +303,5 @@ void intel_fb_restore_mode(struct drm_device *dev)
304 if (ret) 303 if (ret)
305 DRM_DEBUG("failed to restore crtc mode\n"); 304 DRM_DEBUG("failed to restore crtc mode\n");
306 305
307 /* Be sure to shut off any planes that may be active */
308 list_for_each_entry(plane, &config->plane_list, head)
309 if (plane->enabled)
310 plane->funcs->disable_plane(plane);
311
312 drm_modeset_unlock_all(dev); 306 drm_modeset_unlock_all(dev);
313} 307}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a9057930f2b2..98df2a0c85bd 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -602,7 +602,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
602 u32 hdmi_val; 602 u32 hdmi_val;
603 603
604 hdmi_val = SDVO_ENCODING_HDMI; 604 hdmi_val = SDVO_ENCODING_HDMI;
605 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) 605 if (!HAS_PCH_SPLIT(dev))
606 hdmi_val |= intel_hdmi->color_range; 606 hdmi_val |= intel_hdmi->color_range;
607 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 607 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
608 hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH; 608 hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
@@ -658,6 +658,28 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
658 return true; 658 return true;
659} 659}
660 660
661static void intel_hdmi_get_config(struct intel_encoder *encoder,
662 struct intel_crtc_config *pipe_config)
663{
664 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
665 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
666 u32 tmp, flags = 0;
667
668 tmp = I915_READ(intel_hdmi->hdmi_reg);
669
670 if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
671 flags |= DRM_MODE_FLAG_PHSYNC;
672 else
673 flags |= DRM_MODE_FLAG_NHSYNC;
674
675 if (tmp & SDVO_VSYNC_ACTIVE_HIGH)
676 flags |= DRM_MODE_FLAG_PVSYNC;
677 else
678 flags |= DRM_MODE_FLAG_NVSYNC;
679
680 pipe_config->adjusted_mode.flags |= flags;
681}
682
661static void intel_enable_hdmi(struct intel_encoder *encoder) 683static void intel_enable_hdmi(struct intel_encoder *encoder)
662{ 684{
663 struct drm_device *dev = encoder->base.dev; 685 struct drm_device *dev = encoder->base.dev;
@@ -697,6 +719,14 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
697 I915_WRITE(intel_hdmi->hdmi_reg, temp); 719 I915_WRITE(intel_hdmi->hdmi_reg, temp);
698 POSTING_READ(intel_hdmi->hdmi_reg); 720 POSTING_READ(intel_hdmi->hdmi_reg);
699 } 721 }
722
723 if (IS_VALLEYVIEW(dev)) {
724 struct intel_digital_port *dport =
725 enc_to_dig_port(&encoder->base);
726 int channel = vlv_dport_to_channel(dport);
727
728 vlv_wait_port_ready(dev_priv, channel);
729 }
700} 730}
701 731
702static void intel_disable_hdmi(struct intel_encoder *encoder) 732static void intel_disable_hdmi(struct intel_encoder *encoder)
@@ -775,6 +805,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
775 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 805 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
776 struct drm_device *dev = encoder->base.dev; 806 struct drm_device *dev = encoder->base.dev;
777 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 807 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
808 int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
809 int desired_bpp;
778 810
779 if (intel_hdmi->color_range_auto) { 811 if (intel_hdmi->color_range_auto) {
780 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 812 /* See CEA-861-E - 5.1 Default Encoding Parameters */
@@ -794,14 +826,29 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
794 /* 826 /*
795 * HDMI is either 12 or 8, so if the display lets 10bpc sneak 827 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
796 * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi 828 * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
797 * outputs. 829 * outputs. We also need to check that the higher clock still fits
830 * within limits.
798 */ 831 */
799 if (pipe_config->pipe_bpp > 8*3 && HAS_PCH_SPLIT(dev)) { 832 if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000
800 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); 833 && HAS_PCH_SPLIT(dev)) {
801 pipe_config->pipe_bpp = 12*3; 834 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
835 desired_bpp = 12*3;
836
837 /* Need to adjust the port link by 1.5x for 12bpc. */
838 pipe_config->port_clock = clock_12bpc;
802 } else { 839 } else {
803 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); 840 DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
804 pipe_config->pipe_bpp = 8*3; 841 desired_bpp = 8*3;
842 }
843
844 if (!pipe_config->bw_constrained) {
845 DRM_DEBUG_KMS("forcing pipe bpc to %i for HDMI\n", desired_bpp);
846 pipe_config->pipe_bpp = desired_bpp;
847 }
848
849 if (adjusted_mode->clock > 225000) {
850 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
851 return false;
805 } 852 }
806 853
807 return true; 854 return true;
@@ -955,6 +1002,97 @@ done:
955 return 0; 1002 return 0;
956} 1003}
957 1004
1005static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
1006{
1007 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1008 struct drm_device *dev = encoder->base.dev;
1009 struct drm_i915_private *dev_priv = dev->dev_private;
1010 struct intel_crtc *intel_crtc =
1011 to_intel_crtc(encoder->base.crtc);
1012 int port = vlv_dport_to_channel(dport);
1013 int pipe = intel_crtc->pipe;
1014 u32 val;
1015
1016 if (!IS_VALLEYVIEW(dev))
1017 return;
1018
1019 /* Enable clock channels for this port */
1020 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
1021 val = 0;
1022 if (pipe)
1023 val |= (1<<21);
1024 else
1025 val &= ~(1<<21);
1026 val |= 0x001000c4;
1027 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
1028
1029 /* HDMI 1.0V-2dB */
1030 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0);
1031 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port),
1032 0x2b245f5f);
1033 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
1034 0x5578b83a);
1035 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port),
1036 0x0c782040);
1037 vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port),
1038 0x2b247878);
1039 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
1040 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
1041 0x00002000);
1042 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
1043 DPIO_TX_OCALINIT_EN);
1044
1045 /* Program lane clock */
1046 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
1047 0x00760018);
1048 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
1049 0x00400888);
1050}
1051
1052static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1053{
1054 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1055 struct drm_device *dev = encoder->base.dev;
1056 struct drm_i915_private *dev_priv = dev->dev_private;
1057 int port = vlv_dport_to_channel(dport);
1058
1059 if (!IS_VALLEYVIEW(dev))
1060 return;
1061
1062 /* Program Tx lane resets to default */
1063 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
1064 DPIO_PCS_TX_LANE2_RESET |
1065 DPIO_PCS_TX_LANE1_RESET);
1066 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
1067 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1068 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1069 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1070 DPIO_PCS_CLK_SOFT_RESET);
1071
1072 /* Fix up inter-pair skew failure */
1073 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
1074 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
1075 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
1076
1077 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
1078 0x00002000);
1079 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
1080 DPIO_TX_OCALINIT_EN);
1081}
1082
1083static void intel_hdmi_post_disable(struct intel_encoder *encoder)
1084{
1085 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1086 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1087 int port = vlv_dport_to_channel(dport);
1088
1089 /* Reset lanes to avoid HDMI flicker (VLV w/a) */
1090 mutex_lock(&dev_priv->dpio_lock);
1091 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000);
1092 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060);
1093 mutex_unlock(&dev_priv->dpio_lock);
1094}
1095
958static void intel_hdmi_destroy(struct drm_connector *connector) 1096static void intel_hdmi_destroy(struct drm_connector *connector)
959{ 1097{
960 drm_sysfs_connector_remove(connector); 1098 drm_sysfs_connector_remove(connector);
@@ -1094,6 +1232,12 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1094 intel_encoder->enable = intel_enable_hdmi; 1232 intel_encoder->enable = intel_enable_hdmi;
1095 intel_encoder->disable = intel_disable_hdmi; 1233 intel_encoder->disable = intel_disable_hdmi;
1096 intel_encoder->get_hw_state = intel_hdmi_get_hw_state; 1234 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1235 intel_encoder->get_config = intel_hdmi_get_config;
1236 if (IS_VALLEYVIEW(dev)) {
1237 intel_encoder->pre_enable = intel_hdmi_pre_enable;
1238 intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
1239 intel_encoder->post_disable = intel_hdmi_post_disable;
1240 }
1097 1241
1098 intel_encoder->type = INTEL_OUTPUT_HDMI; 1242 intel_encoder->type = INTEL_OUTPUT_HDMI;
1099 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 1243 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 29412cc89c7a..2abb2d3c727b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -49,8 +49,6 @@ struct intel_lvds_connector {
49struct intel_lvds_encoder { 49struct intel_lvds_encoder {
50 struct intel_encoder base; 50 struct intel_encoder base;
51 51
52 u32 pfit_control;
53 u32 pfit_pgm_ratios;
54 bool is_dual_link; 52 bool is_dual_link;
55 u32 reg; 53 u32 reg;
56 54
@@ -88,6 +86,31 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
88 return true; 86 return true;
89} 87}
90 88
89static void intel_lvds_get_config(struct intel_encoder *encoder,
90 struct intel_crtc_config *pipe_config)
91{
92 struct drm_device *dev = encoder->base.dev;
93 struct drm_i915_private *dev_priv = dev->dev_private;
94 u32 lvds_reg, tmp, flags = 0;
95
96 if (HAS_PCH_SPLIT(dev))
97 lvds_reg = PCH_LVDS;
98 else
99 lvds_reg = LVDS;
100
101 tmp = I915_READ(lvds_reg);
102 if (tmp & LVDS_HSYNC_POLARITY)
103 flags |= DRM_MODE_FLAG_NHSYNC;
104 else
105 flags |= DRM_MODE_FLAG_PHSYNC;
106 if (tmp & LVDS_VSYNC_POLARITY)
107 flags |= DRM_MODE_FLAG_NVSYNC;
108 else
109 flags |= DRM_MODE_FLAG_PVSYNC;
110
111 pipe_config->adjusted_mode.flags |= flags;
112}
113
91/* The LVDS pin pair needs to be on before the DPLLs are enabled. 114/* The LVDS pin pair needs to be on before the DPLLs are enabled.
92 * This is an exception to the general rule that mode_set doesn't turn 115 * This is an exception to the general rule that mode_set doesn't turn
93 * things on. 116 * things on.
@@ -118,7 +141,8 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
118 } 141 }
119 142
120 /* set the corresponsding LVDS_BORDER bit */ 143 /* set the corresponsding LVDS_BORDER bit */
121 temp |= dev_priv->lvds_border_bits; 144 temp &= ~LVDS_BORDER_ENABLE;
145 temp |= intel_crtc->config.gmch_pfit.lvds_border_bits;
122 /* Set the B0-B3 data pairs corresponding to whether we're going to 146 /* Set the B0-B3 data pairs corresponding to whether we're going to
123 * set the DPLLs for dual-channel mode or not. 147 * set the DPLLs for dual-channel mode or not.
124 */ 148 */
@@ -136,7 +160,10 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
136 * special lvds dither control bit on pch-split platforms, dithering is 160 * special lvds dither control bit on pch-split platforms, dithering is
137 * only controlled through the PIPECONF reg. */ 161 * only controlled through the PIPECONF reg. */
138 if (INTEL_INFO(dev)->gen == 4) { 162 if (INTEL_INFO(dev)->gen == 4) {
139 if (dev_priv->lvds_dither) 163 /* Bspec wording suggests that LVDS port dithering only exists
164 * for 18bpp panels. */
165 if (intel_crtc->config.dither &&
166 intel_crtc->config.pipe_bpp == 18)
140 temp |= LVDS_ENABLE_DITHER; 167 temp |= LVDS_ENABLE_DITHER;
141 else 168 else
142 temp &= ~LVDS_ENABLE_DITHER; 169 temp &= ~LVDS_ENABLE_DITHER;
@@ -150,29 +177,6 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
150 I915_WRITE(lvds_encoder->reg, temp); 177 I915_WRITE(lvds_encoder->reg, temp);
151} 178}
152 179
153static void intel_pre_enable_lvds(struct intel_encoder *encoder)
154{
155 struct drm_device *dev = encoder->base.dev;
156 struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base);
157 struct drm_i915_private *dev_priv = dev->dev_private;
158
159 if (HAS_PCH_SPLIT(dev) || !enc->pfit_control)
160 return;
161
162 /*
163 * Enable automatic panel scaling so that non-native modes
164 * fill the screen. The panel fitter should only be
165 * adjusted whilst the pipe is disabled, according to
166 * register description and PRM.
167 */
168 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
169 enc->pfit_control,
170 enc->pfit_pgm_ratios);
171
172 I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios);
173 I915_WRITE(PFIT_CONTROL, enc->pfit_control);
174}
175
176/** 180/**
177 * Sets the power state for the panel. 181 * Sets the power state for the panel.
178 */ 182 */
@@ -241,62 +245,6 @@ static int intel_lvds_mode_valid(struct drm_connector *connector,
241 return MODE_OK; 245 return MODE_OK;
242} 246}
243 247
244static void
245centre_horizontally(struct drm_display_mode *mode,
246 int width)
247{
248 u32 border, sync_pos, blank_width, sync_width;
249
250 /* keep the hsync and hblank widths constant */
251 sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
252 blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
253 sync_pos = (blank_width - sync_width + 1) / 2;
254
255 border = (mode->hdisplay - width + 1) / 2;
256 border += border & 1; /* make the border even */
257
258 mode->crtc_hdisplay = width;
259 mode->crtc_hblank_start = width + border;
260 mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
261
262 mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
263 mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
264}
265
266static void
267centre_vertically(struct drm_display_mode *mode,
268 int height)
269{
270 u32 border, sync_pos, blank_width, sync_width;
271
272 /* keep the vsync and vblank widths constant */
273 sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
274 blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
275 sync_pos = (blank_width - sync_width + 1) / 2;
276
277 border = (mode->vdisplay - height + 1) / 2;
278
279 mode->crtc_vdisplay = height;
280 mode->crtc_vblank_start = height + border;
281 mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
282
283 mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
284 mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
285}
286
287static inline u32 panel_fitter_scaling(u32 source, u32 target)
288{
289 /*
290 * Floating point operation is not supported. So the FACTOR
291 * is defined, which can avoid the floating point computation
292 * when calculating the panel ratio.
293 */
294#define ACCURACY 12
295#define FACTOR (1 << ACCURACY)
296 u32 ratio = source * FACTOR / target;
297 return (FACTOR * ratio + FACTOR/2) / FACTOR;
298}
299
300static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, 248static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
301 struct intel_crtc_config *pipe_config) 249 struct intel_crtc_config *pipe_config)
302{ 250{
@@ -307,11 +255,8 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
307 struct intel_connector *intel_connector = 255 struct intel_connector *intel_connector =
308 &lvds_encoder->attached_connector->base; 256 &lvds_encoder->attached_connector->base;
309 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 257 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
310 struct drm_display_mode *mode = &pipe_config->requested_mode;
311 struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc; 258 struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
312 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
313 unsigned int lvds_bpp; 259 unsigned int lvds_bpp;
314 int pipe;
315 260
316 /* Should never happen!! */ 261 /* Should never happen!! */
317 if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) { 262 if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
@@ -319,20 +264,18 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
319 return false; 264 return false;
320 } 265 }
321 266
322 if (intel_encoder_check_is_cloned(&lvds_encoder->base))
323 return false;
324
325 if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) == 267 if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
326 LVDS_A3_POWER_UP) 268 LVDS_A3_POWER_UP)
327 lvds_bpp = 8*3; 269 lvds_bpp = 8*3;
328 else 270 else
329 lvds_bpp = 6*3; 271 lvds_bpp = 6*3;
330 272
331 if (lvds_bpp != pipe_config->pipe_bpp) { 273 if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
332 DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n", 274 DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
333 pipe_config->pipe_bpp, lvds_bpp); 275 pipe_config->pipe_bpp, lvds_bpp);
334 pipe_config->pipe_bpp = lvds_bpp; 276 pipe_config->pipe_bpp = lvds_bpp;
335 } 277 }
278
336 /* 279 /*
337 * We have timings from the BIOS for the panel, put them in 280 * We have timings from the BIOS for the panel, put them in
338 * to the adjusted mode. The CRTC will be set up for this mode, 281 * to the adjusted mode. The CRTC will be set up for this mode,
@@ -345,139 +288,17 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
345 if (HAS_PCH_SPLIT(dev)) { 288 if (HAS_PCH_SPLIT(dev)) {
346 pipe_config->has_pch_encoder = true; 289 pipe_config->has_pch_encoder = true;
347 290
348 intel_pch_panel_fitting(dev, 291 intel_pch_panel_fitting(intel_crtc, pipe_config,
349 intel_connector->panel.fitting_mode, 292 intel_connector->panel.fitting_mode);
350 mode, adjusted_mode);
351 return true; 293 return true;
294 } else {
295 intel_gmch_panel_fitting(intel_crtc, pipe_config,
296 intel_connector->panel.fitting_mode);
352 } 297 }
353 298
354 /* Native modes don't need fitting */
355 if (adjusted_mode->hdisplay == mode->hdisplay &&
356 adjusted_mode->vdisplay == mode->vdisplay)
357 goto out;
358
359 /* 965+ wants fuzzy fitting */
360 if (INTEL_INFO(dev)->gen >= 4)
361 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
362 PFIT_FILTER_FUZZY);
363
364 /*
365 * Enable automatic panel scaling for non-native modes so that they fill
366 * the screen. Should be enabled before the pipe is enabled, according
367 * to register description and PRM.
368 * Change the value here to see the borders for debugging
369 */
370 for_each_pipe(pipe)
371 I915_WRITE(BCLRPAT(pipe), 0);
372
373 drm_mode_set_crtcinfo(adjusted_mode, 0); 299 drm_mode_set_crtcinfo(adjusted_mode, 0);
374 pipe_config->timings_set = true; 300 pipe_config->timings_set = true;
375 301
376 switch (intel_connector->panel.fitting_mode) {
377 case DRM_MODE_SCALE_CENTER:
378 /*
379 * For centered modes, we have to calculate border widths &
380 * heights and modify the values programmed into the CRTC.
381 */
382 centre_horizontally(adjusted_mode, mode->hdisplay);
383 centre_vertically(adjusted_mode, mode->vdisplay);
384 border = LVDS_BORDER_ENABLE;
385 break;
386
387 case DRM_MODE_SCALE_ASPECT:
388 /* Scale but preserve the aspect ratio */
389 if (INTEL_INFO(dev)->gen >= 4) {
390 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
391 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
392
393 /* 965+ is easy, it does everything in hw */
394 if (scaled_width > scaled_height)
395 pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
396 else if (scaled_width < scaled_height)
397 pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
398 else if (adjusted_mode->hdisplay != mode->hdisplay)
399 pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
400 } else {
401 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
402 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
403 /*
404 * For earlier chips we have to calculate the scaling
405 * ratio by hand and program it into the
406 * PFIT_PGM_RATIO register
407 */
408 if (scaled_width > scaled_height) { /* pillar */
409 centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay);
410
411 border = LVDS_BORDER_ENABLE;
412 if (mode->vdisplay != adjusted_mode->vdisplay) {
413 u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
414 pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
415 bits << PFIT_VERT_SCALE_SHIFT);
416 pfit_control |= (PFIT_ENABLE |
417 VERT_INTERP_BILINEAR |
418 HORIZ_INTERP_BILINEAR);
419 }
420 } else if (scaled_width < scaled_height) { /* letter */
421 centre_vertically(adjusted_mode, scaled_width / mode->hdisplay);
422
423 border = LVDS_BORDER_ENABLE;
424 if (mode->hdisplay != adjusted_mode->hdisplay) {
425 u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
426 pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
427 bits << PFIT_VERT_SCALE_SHIFT);
428 pfit_control |= (PFIT_ENABLE |
429 VERT_INTERP_BILINEAR |
430 HORIZ_INTERP_BILINEAR);
431 }
432 } else
433 /* Aspects match, Let hw scale both directions */
434 pfit_control |= (PFIT_ENABLE |
435 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
436 VERT_INTERP_BILINEAR |
437 HORIZ_INTERP_BILINEAR);
438 }
439 break;
440
441 case DRM_MODE_SCALE_FULLSCREEN:
442 /*
443 * Full scaling, even if it changes the aspect ratio.
444 * Fortunately this is all done for us in hw.
445 */
446 if (mode->vdisplay != adjusted_mode->vdisplay ||
447 mode->hdisplay != adjusted_mode->hdisplay) {
448 pfit_control |= PFIT_ENABLE;
449 if (INTEL_INFO(dev)->gen >= 4)
450 pfit_control |= PFIT_SCALING_AUTO;
451 else
452 pfit_control |= (VERT_AUTO_SCALE |
453 VERT_INTERP_BILINEAR |
454 HORIZ_AUTO_SCALE |
455 HORIZ_INTERP_BILINEAR);
456 }
457 break;
458
459 default:
460 break;
461 }
462
463out:
464 /* If not enabling scaling, be consistent and always use 0. */
465 if ((pfit_control & PFIT_ENABLE) == 0) {
466 pfit_control = 0;
467 pfit_pgm_ratios = 0;
468 }
469
470 /* Make sure pre-965 set dither correctly */
471 if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
472 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
473
474 if (pfit_control != lvds_encoder->pfit_control ||
475 pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
476 lvds_encoder->pfit_control = pfit_control;
477 lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
478 }
479 dev_priv->lvds_border_bits = border;
480
481 /* 302 /*
482 * XXX: It would be nice to support lower refresh rates on the 303 * XXX: It would be nice to support lower refresh rates on the
483 * panels to reduce power consumption, and perhaps match the 304 * panels to reduce power consumption, and perhaps match the
@@ -937,11 +758,11 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
937 struct drm_i915_private *dev_priv = dev->dev_private; 758 struct drm_i915_private *dev_priv = dev->dev_private;
938 int i; 759 int i;
939 760
940 if (!dev_priv->child_dev_num) 761 if (!dev_priv->vbt.child_dev_num)
941 return true; 762 return true;
942 763
943 for (i = 0; i < dev_priv->child_dev_num; i++) { 764 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
944 struct child_device_config *child = dev_priv->child_dev + i; 765 struct child_device_config *child = dev_priv->vbt.child_dev + i;
945 766
946 /* If the device type is not LFP, continue. 767 /* If the device type is not LFP, continue.
947 * We have to check both the new identifiers as well as the 768 * We have to check both the new identifiers as well as the
@@ -1029,7 +850,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
1029 */ 850 */
1030 val = I915_READ(lvds_encoder->reg); 851 val = I915_READ(lvds_encoder->reg);
1031 if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED))) 852 if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
1032 val = dev_priv->bios_lvds_val; 853 val = dev_priv->vbt.bios_lvds_val;
1033 854
1034 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; 855 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
1035} 856}
@@ -1056,7 +877,7 @@ static bool intel_lvds_supported(struct drm_device *dev)
1056 * Create the connector, register the LVDS DDC bus, and try to figure out what 877 * Create the connector, register the LVDS DDC bus, and try to figure out what
1057 * modes we can display on the LVDS panel (if present). 878 * modes we can display on the LVDS panel (if present).
1058 */ 879 */
1059bool intel_lvds_init(struct drm_device *dev) 880void intel_lvds_init(struct drm_device *dev)
1060{ 881{
1061 struct drm_i915_private *dev_priv = dev->dev_private; 882 struct drm_i915_private *dev_priv = dev->dev_private;
1062 struct intel_lvds_encoder *lvds_encoder; 883 struct intel_lvds_encoder *lvds_encoder;
@@ -1074,43 +895,39 @@ bool intel_lvds_init(struct drm_device *dev)
1074 u8 pin; 895 u8 pin;
1075 896
1076 if (!intel_lvds_supported(dev)) 897 if (!intel_lvds_supported(dev))
1077 return false; 898 return;
1078 899
1079 /* Skip init on machines we know falsely report LVDS */ 900 /* Skip init on machines we know falsely report LVDS */
1080 if (dmi_check_system(intel_no_lvds)) 901 if (dmi_check_system(intel_no_lvds))
1081 return false; 902 return;
1082 903
1083 pin = GMBUS_PORT_PANEL; 904 pin = GMBUS_PORT_PANEL;
1084 if (!lvds_is_present_in_vbt(dev, &pin)) { 905 if (!lvds_is_present_in_vbt(dev, &pin)) {
1085 DRM_DEBUG_KMS("LVDS is not present in VBT\n"); 906 DRM_DEBUG_KMS("LVDS is not present in VBT\n");
1086 return false; 907 return;
1087 } 908 }
1088 909
1089 if (HAS_PCH_SPLIT(dev)) { 910 if (HAS_PCH_SPLIT(dev)) {
1090 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 911 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
1091 return false; 912 return;
1092 if (dev_priv->edp.support) { 913 if (dev_priv->vbt.edp_support) {
1093 DRM_DEBUG_KMS("disable LVDS for eDP support\n"); 914 DRM_DEBUG_KMS("disable LVDS for eDP support\n");
1094 return false; 915 return;
1095 } 916 }
1096 } 917 }
1097 918
1098 lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL); 919 lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
1099 if (!lvds_encoder) 920 if (!lvds_encoder)
1100 return false; 921 return;
1101 922
1102 lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL); 923 lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
1103 if (!lvds_connector) { 924 if (!lvds_connector) {
1104 kfree(lvds_encoder); 925 kfree(lvds_encoder);
1105 return false; 926 return;
1106 } 927 }
1107 928
1108 lvds_encoder->attached_connector = lvds_connector; 929 lvds_encoder->attached_connector = lvds_connector;
1109 930
1110 if (!HAS_PCH_SPLIT(dev)) {
1111 lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
1112 }
1113
1114 intel_encoder = &lvds_encoder->base; 931 intel_encoder = &lvds_encoder->base;
1115 encoder = &intel_encoder->base; 932 encoder = &intel_encoder->base;
1116 intel_connector = &lvds_connector->base; 933 intel_connector = &lvds_connector->base;
@@ -1122,11 +939,11 @@ bool intel_lvds_init(struct drm_device *dev)
1122 DRM_MODE_ENCODER_LVDS); 939 DRM_MODE_ENCODER_LVDS);
1123 940
1124 intel_encoder->enable = intel_enable_lvds; 941 intel_encoder->enable = intel_enable_lvds;
1125 intel_encoder->pre_enable = intel_pre_enable_lvds;
1126 intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds; 942 intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
1127 intel_encoder->compute_config = intel_lvds_compute_config; 943 intel_encoder->compute_config = intel_lvds_compute_config;
1128 intel_encoder->disable = intel_disable_lvds; 944 intel_encoder->disable = intel_disable_lvds;
1129 intel_encoder->get_hw_state = intel_lvds_get_hw_state; 945 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
946 intel_encoder->get_config = intel_lvds_get_config;
1130 intel_connector->get_hw_state = intel_connector_get_hw_state; 947 intel_connector->get_hw_state = intel_connector_get_hw_state;
1131 948
1132 intel_connector_attach_encoder(intel_connector, intel_encoder); 949 intel_connector_attach_encoder(intel_connector, intel_encoder);
@@ -1212,11 +1029,11 @@ bool intel_lvds_init(struct drm_device *dev)
1212 } 1029 }
1213 1030
1214 /* Failed to get EDID, what about VBT? */ 1031 /* Failed to get EDID, what about VBT? */
1215 if (dev_priv->lfp_lvds_vbt_mode) { 1032 if (dev_priv->vbt.lfp_lvds_vbt_mode) {
1216 DRM_DEBUG_KMS("using mode from VBT: "); 1033 DRM_DEBUG_KMS("using mode from VBT: ");
1217 drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode); 1034 drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode);
1218 1035
1219 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 1036 fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
1220 if (fixed_mode) { 1037 if (fixed_mode) {
1221 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 1038 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
1222 goto out; 1039 goto out;
@@ -1277,7 +1094,7 @@ out:
1277 intel_panel_init(&intel_connector->panel, fixed_mode); 1094 intel_panel_init(&intel_connector->panel, fixed_mode);
1278 intel_panel_setup_backlight(connector); 1095 intel_panel_setup_backlight(connector);
1279 1096
1280 return true; 1097 return;
1281 1098
1282failed: 1099failed:
1283 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1100 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
@@ -1287,5 +1104,5 @@ failed:
1287 drm_mode_destroy(dev, fixed_mode); 1104 drm_mode_destroy(dev, fixed_mode);
1288 kfree(lvds_encoder); 1105 kfree(lvds_encoder);
1289 kfree(lvds_connector); 1106 kfree(lvds_connector);
1290 return false; 1107 return;
1291} 1108}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index a8117e614009..cfb8fb68f09c 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -110,6 +110,10 @@ struct opregion_asle {
110 u8 rsvd[102]; 110 u8 rsvd[102];
111} __attribute__((packed)); 111} __attribute__((packed));
112 112
113/* Driver readiness indicator */
114#define ASLE_ARDY_READY (1 << 0)
115#define ASLE_ARDY_NOT_READY (0 << 0)
116
113/* ASLE irq request bits */ 117/* ASLE irq request bits */
114#define ASLE_SET_ALS_ILLUM (1 << 0) 118#define ASLE_SET_ALS_ILLUM (1 << 0)
115#define ASLE_SET_BACKLIGHT (1 << 1) 119#define ASLE_SET_BACKLIGHT (1 << 1)
@@ -123,6 +127,12 @@ struct opregion_asle {
123#define ASLE_PFIT_FAILED (1<<14) 127#define ASLE_PFIT_FAILED (1<<14)
124#define ASLE_PWM_FREQ_FAILED (1<<16) 128#define ASLE_PWM_FREQ_FAILED (1<<16)
125 129
130/* Technology enabled indicator */
131#define ASLE_TCHE_ALS_EN (1 << 0)
132#define ASLE_TCHE_BLC_EN (1 << 1)
133#define ASLE_TCHE_PFIT_EN (1 << 2)
134#define ASLE_TCHE_PFMB_EN (1 << 3)
135
126/* ASLE backlight brightness to set */ 136/* ASLE backlight brightness to set */
127#define ASLE_BCLP_VALID (1<<31) 137#define ASLE_BCLP_VALID (1<<31)
128#define ASLE_BCLP_MSK (~(1<<31)) 138#define ASLE_BCLP_MSK (~(1<<31))
@@ -152,7 +162,6 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
152{ 162{
153 struct drm_i915_private *dev_priv = dev->dev_private; 163 struct drm_i915_private *dev_priv = dev->dev_private;
154 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 164 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
155 u32 max;
156 165
157 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 166 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
158 167
@@ -163,8 +172,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
163 if (bclp > 255) 172 if (bclp > 255)
164 return ASLE_BACKLIGHT_FAILED; 173 return ASLE_BACKLIGHT_FAILED;
165 174
166 max = intel_panel_get_max_backlight(dev); 175 intel_panel_set_backlight(dev, bclp, 255);
167 intel_panel_set_backlight(dev, bclp * max / 255);
168 iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv); 176 iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
169 177
170 return 0; 178 return 0;
@@ -174,29 +182,22 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
174{ 182{
175 /* alsi is the current ALS reading in lux. 0 indicates below sensor 183 /* alsi is the current ALS reading in lux. 0 indicates below sensor
176 range, 0xffff indicates above sensor range. 1-0xfffe are valid */ 184 range, 0xffff indicates above sensor range. 1-0xfffe are valid */
177 return 0; 185 DRM_DEBUG_DRIVER("Illum is not supported\n");
186 return ASLE_ALS_ILLUM_FAILED;
178} 187}
179 188
180static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) 189static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
181{ 190{
182 struct drm_i915_private *dev_priv = dev->dev_private; 191 DRM_DEBUG_DRIVER("PWM freq is not supported\n");
183 if (pfmb & ASLE_PFMB_PWM_VALID) { 192 return ASLE_PWM_FREQ_FAILED;
184 u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
185 u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
186 blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
187 pwm = pwm >> 9;
188 /* FIXME - what do we do with the PWM? */
189 }
190 return 0;
191} 193}
192 194
193static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) 195static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
194{ 196{
195 /* Panel fitting is currently controlled by the X code, so this is a 197 /* Panel fitting is currently controlled by the X code, so this is a
196 noop until modesetting support works fully */ 198 noop until modesetting support works fully */
197 if (!(pfit & ASLE_PFIT_VALID)) 199 DRM_DEBUG_DRIVER("Pfit is not supported\n");
198 return ASLE_PFIT_FAILED; 200 return ASLE_PFIT_FAILED;
199 return 0;
200} 201}
201 202
202void intel_opregion_asle_intr(struct drm_device *dev) 203void intel_opregion_asle_intr(struct drm_device *dev)
@@ -231,64 +232,6 @@ void intel_opregion_asle_intr(struct drm_device *dev)
231 iowrite32(asle_stat, &asle->aslc); 232 iowrite32(asle_stat, &asle->aslc);
232} 233}
233 234
234void intel_opregion_gse_intr(struct drm_device *dev)
235{
236 struct drm_i915_private *dev_priv = dev->dev_private;
237 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
238 u32 asle_stat = 0;
239 u32 asle_req;
240
241 if (!asle)
242 return;
243
244 asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
245
246 if (!asle_req) {
247 DRM_DEBUG_DRIVER("non asle set request??\n");
248 return;
249 }
250
251 if (asle_req & ASLE_SET_ALS_ILLUM) {
252 DRM_DEBUG_DRIVER("Illum is not supported\n");
253 asle_stat |= ASLE_ALS_ILLUM_FAILED;
254 }
255
256 if (asle_req & ASLE_SET_BACKLIGHT)
257 asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
258
259 if (asle_req & ASLE_SET_PFIT) {
260 DRM_DEBUG_DRIVER("Pfit is not supported\n");
261 asle_stat |= ASLE_PFIT_FAILED;
262 }
263
264 if (asle_req & ASLE_SET_PWM_FREQ) {
265 DRM_DEBUG_DRIVER("PWM freq is not supported\n");
266 asle_stat |= ASLE_PWM_FREQ_FAILED;
267 }
268
269 iowrite32(asle_stat, &asle->aslc);
270}
271#define ASLE_ALS_EN (1<<0)
272#define ASLE_BLC_EN (1<<1)
273#define ASLE_PFIT_EN (1<<2)
274#define ASLE_PFMB_EN (1<<3)
275
276void intel_opregion_enable_asle(struct drm_device *dev)
277{
278 struct drm_i915_private *dev_priv = dev->dev_private;
279 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
280
281 if (asle) {
282 if (IS_MOBILE(dev))
283 intel_enable_asle(dev);
284
285 iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
286 ASLE_PFMB_EN,
287 &asle->tche);
288 iowrite32(1, &asle->ardy);
289 }
290}
291
292#define ACPI_EV_DISPLAY_SWITCH (1<<0) 235#define ACPI_EV_DISPLAY_SWITCH (1<<0)
293#define ACPI_EV_LID (1<<1) 236#define ACPI_EV_LID (1<<1)
294#define ACPI_EV_DOCK (1<<2) 237#define ACPI_EV_DOCK (1<<2)
@@ -368,8 +311,8 @@ static void intel_didl_outputs(struct drm_device *dev)
368 311
369 list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { 312 list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
370 if (i >= 8) { 313 if (i >= 8) {
371 dev_printk(KERN_ERR, &dev->pdev->dev, 314 dev_dbg(&dev->pdev->dev,
372 "More than 8 outputs detected\n"); 315 "More than 8 outputs detected via ACPI\n");
373 return; 316 return;
374 } 317 }
375 status = 318 status =
@@ -395,8 +338,8 @@ blind_set:
395 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 338 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
396 int output_type = ACPI_OTHER_OUTPUT; 339 int output_type = ACPI_OTHER_OUTPUT;
397 if (i >= 8) { 340 if (i >= 8) {
398 dev_printk(KERN_ERR, &dev->pdev->dev, 341 dev_dbg(&dev->pdev->dev,
399 "More than 8 outputs detected\n"); 342 "More than 8 outputs in connector list\n");
400 return; 343 return;
401 } 344 }
402 switch (connector->connector_type) { 345 switch (connector->connector_type) {
@@ -472,8 +415,10 @@ void intel_opregion_init(struct drm_device *dev)
472 register_acpi_notifier(&intel_opregion_notifier); 415 register_acpi_notifier(&intel_opregion_notifier);
473 } 416 }
474 417
475 if (opregion->asle) 418 if (opregion->asle) {
476 intel_opregion_enable_asle(dev); 419 iowrite32(ASLE_TCHE_BLC_EN, &opregion->asle->tche);
420 iowrite32(ASLE_ARDY_READY, &opregion->asle->ardy);
421 }
477} 422}
478 423
479void intel_opregion_fini(struct drm_device *dev) 424void intel_opregion_fini(struct drm_device *dev)
@@ -484,6 +429,9 @@ void intel_opregion_fini(struct drm_device *dev)
484 if (!opregion->header) 429 if (!opregion->header)
485 return; 430 return;
486 431
432 if (opregion->asle)
433 iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
434
487 if (opregion->acpi) { 435 if (opregion->acpi) {
488 iowrite32(0, &opregion->acpi->drdy); 436 iowrite32(0, &opregion->acpi->drdy);
489 437
@@ -546,6 +494,8 @@ int intel_opregion_setup(struct drm_device *dev)
546 if (mboxes & MBOX_ASLE) { 494 if (mboxes & MBOX_ASLE) {
547 DRM_DEBUG_DRIVER("ASLE supported\n"); 495 DRM_DEBUG_DRIVER("ASLE supported\n");
548 opregion->asle = base + OPREGION_ASLE_OFFSET; 496 opregion->asle = base + OPREGION_ASLE_OFFSET;
497
498 iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
549 } 499 }
550 500
551 return 0; 501 return 0;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 67a2501d519d..a3698812e9c7 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -217,7 +217,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
217 int ret; 217 int ret;
218 218
219 BUG_ON(overlay->last_flip_req); 219 BUG_ON(overlay->last_flip_req);
220 ret = i915_add_request(ring, NULL, &overlay->last_flip_req); 220 ret = i915_add_request(ring, &overlay->last_flip_req);
221 if (ret) 221 if (ret)
222 return ret; 222 return ret;
223 223
@@ -286,7 +286,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
286 intel_ring_emit(ring, flip_addr); 286 intel_ring_emit(ring, flip_addr);
287 intel_ring_advance(ring); 287 intel_ring_advance(ring);
288 288
289 return i915_add_request(ring, NULL, &overlay->last_flip_req); 289 return i915_add_request(ring, &overlay->last_flip_req);
290} 290}
291 291
292static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) 292static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -1485,14 +1485,15 @@ err:
1485} 1485}
1486 1486
1487void 1487void
1488intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error) 1488intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
1489 struct intel_overlay_error_state *error)
1489{ 1490{
1490 seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n", 1491 i915_error_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
1491 error->dovsta, error->isr); 1492 error->dovsta, error->isr);
1492 seq_printf(m, " Register file at 0x%08lx:\n", 1493 i915_error_printf(m, " Register file at 0x%08lx:\n",
1493 error->base); 1494 error->base);
1494 1495
1495#define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x) 1496#define P(x) i915_error_printf(m, " " #x ": 0x%08x\n", error->regs.x)
1496 P(OBUF_0Y); 1497 P(OBUF_0Y);
1497 P(OBUF_1Y); 1498 P(OBUF_1Y);
1498 P(OBUF_0U); 1499 P(OBUF_0U);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index eb5e6e95f3c7..80bea1d3209f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -54,14 +54,16 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
54 54
55/* adjusted_mode has been preset to be the panel's fixed mode */ 55/* adjusted_mode has been preset to be the panel's fixed mode */
56void 56void
57intel_pch_panel_fitting(struct drm_device *dev, 57intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
58 int fitting_mode, 58 struct intel_crtc_config *pipe_config,
59 const struct drm_display_mode *mode, 59 int fitting_mode)
60 struct drm_display_mode *adjusted_mode)
61{ 60{
62 struct drm_i915_private *dev_priv = dev->dev_private; 61 struct drm_display_mode *mode, *adjusted_mode;
63 int x, y, width, height; 62 int x, y, width, height;
64 63
64 mode = &pipe_config->requested_mode;
65 adjusted_mode = &pipe_config->adjusted_mode;
66
65 x = y = width = height = 0; 67 x = y = width = height = 0;
66 68
67 /* Native modes don't need fitting */ 69 /* Native modes don't need fitting */
@@ -104,17 +106,209 @@ intel_pch_panel_fitting(struct drm_device *dev,
104 } 106 }
105 break; 107 break;
106 108
107 default:
108 case DRM_MODE_SCALE_FULLSCREEN: 109 case DRM_MODE_SCALE_FULLSCREEN:
109 x = y = 0; 110 x = y = 0;
110 width = adjusted_mode->hdisplay; 111 width = adjusted_mode->hdisplay;
111 height = adjusted_mode->vdisplay; 112 height = adjusted_mode->vdisplay;
112 break; 113 break;
114
115 default:
116 WARN(1, "bad panel fit mode: %d\n", fitting_mode);
117 return;
113 } 118 }
114 119
115done: 120done:
116 dev_priv->pch_pf_pos = (x << 16) | y; 121 pipe_config->pch_pfit.pos = (x << 16) | y;
117 dev_priv->pch_pf_size = (width << 16) | height; 122 pipe_config->pch_pfit.size = (width << 16) | height;
123}
124
125static void
126centre_horizontally(struct drm_display_mode *mode,
127 int width)
128{
129 u32 border, sync_pos, blank_width, sync_width;
130
131 /* keep the hsync and hblank widths constant */
132 sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
133 blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
134 sync_pos = (blank_width - sync_width + 1) / 2;
135
136 border = (mode->hdisplay - width + 1) / 2;
137 border += border & 1; /* make the border even */
138
139 mode->crtc_hdisplay = width;
140 mode->crtc_hblank_start = width + border;
141 mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
142
143 mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
144 mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
145}
146
147static void
148centre_vertically(struct drm_display_mode *mode,
149 int height)
150{
151 u32 border, sync_pos, blank_width, sync_width;
152
153 /* keep the vsync and vblank widths constant */
154 sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
155 blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
156 sync_pos = (blank_width - sync_width + 1) / 2;
157
158 border = (mode->vdisplay - height + 1) / 2;
159
160 mode->crtc_vdisplay = height;
161 mode->crtc_vblank_start = height + border;
162 mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
163
164 mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
165 mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
166}
167
168static inline u32 panel_fitter_scaling(u32 source, u32 target)
169{
170 /*
171 * Floating point operation is not supported. So the FACTOR
172 * is defined, which can avoid the floating point computation
173 * when calculating the panel ratio.
174 */
175#define ACCURACY 12
176#define FACTOR (1 << ACCURACY)
177 u32 ratio = source * FACTOR / target;
178 return (FACTOR * ratio + FACTOR/2) / FACTOR;
179}
180
181void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
182 struct intel_crtc_config *pipe_config,
183 int fitting_mode)
184{
185 struct drm_device *dev = intel_crtc->base.dev;
186 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
187 struct drm_display_mode *mode, *adjusted_mode;
188
189 mode = &pipe_config->requested_mode;
190 adjusted_mode = &pipe_config->adjusted_mode;
191
192 /* Native modes don't need fitting */
193 if (adjusted_mode->hdisplay == mode->hdisplay &&
194 adjusted_mode->vdisplay == mode->vdisplay)
195 goto out;
196
197 switch (fitting_mode) {
198 case DRM_MODE_SCALE_CENTER:
199 /*
200 * For centered modes, we have to calculate border widths &
201 * heights and modify the values programmed into the CRTC.
202 */
203 centre_horizontally(adjusted_mode, mode->hdisplay);
204 centre_vertically(adjusted_mode, mode->vdisplay);
205 border = LVDS_BORDER_ENABLE;
206 break;
207 case DRM_MODE_SCALE_ASPECT:
208 /* Scale but preserve the aspect ratio */
209 if (INTEL_INFO(dev)->gen >= 4) {
210 u32 scaled_width = adjusted_mode->hdisplay *
211 mode->vdisplay;
212 u32 scaled_height = mode->hdisplay *
213 adjusted_mode->vdisplay;
214
215 /* 965+ is easy, it does everything in hw */
216 if (scaled_width > scaled_height)
217 pfit_control |= PFIT_ENABLE |
218 PFIT_SCALING_PILLAR;
219 else if (scaled_width < scaled_height)
220 pfit_control |= PFIT_ENABLE |
221 PFIT_SCALING_LETTER;
222 else if (adjusted_mode->hdisplay != mode->hdisplay)
223 pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
224 } else {
225 u32 scaled_width = adjusted_mode->hdisplay *
226 mode->vdisplay;
227 u32 scaled_height = mode->hdisplay *
228 adjusted_mode->vdisplay;
229 /*
230 * For earlier chips we have to calculate the scaling
231 * ratio by hand and program it into the
232 * PFIT_PGM_RATIO register
233 */
234 if (scaled_width > scaled_height) { /* pillar */
235 centre_horizontally(adjusted_mode,
236 scaled_height /
237 mode->vdisplay);
238
239 border = LVDS_BORDER_ENABLE;
240 if (mode->vdisplay != adjusted_mode->vdisplay) {
241 u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
242 pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
243 bits << PFIT_VERT_SCALE_SHIFT);
244 pfit_control |= (PFIT_ENABLE |
245 VERT_INTERP_BILINEAR |
246 HORIZ_INTERP_BILINEAR);
247 }
248 } else if (scaled_width < scaled_height) { /* letter */
249 centre_vertically(adjusted_mode,
250 scaled_width /
251 mode->hdisplay);
252
253 border = LVDS_BORDER_ENABLE;
254 if (mode->hdisplay != adjusted_mode->hdisplay) {
255 u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
256 pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
257 bits << PFIT_VERT_SCALE_SHIFT);
258 pfit_control |= (PFIT_ENABLE |
259 VERT_INTERP_BILINEAR |
260 HORIZ_INTERP_BILINEAR);
261 }
262 } else {
263 /* Aspects match, Let hw scale both directions */
264 pfit_control |= (PFIT_ENABLE |
265 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
266 VERT_INTERP_BILINEAR |
267 HORIZ_INTERP_BILINEAR);
268 }
269 }
270 break;
271 case DRM_MODE_SCALE_FULLSCREEN:
272 /*
273 * Full scaling, even if it changes the aspect ratio.
274 * Fortunately this is all done for us in hw.
275 */
276 if (mode->vdisplay != adjusted_mode->vdisplay ||
277 mode->hdisplay != adjusted_mode->hdisplay) {
278 pfit_control |= PFIT_ENABLE;
279 if (INTEL_INFO(dev)->gen >= 4)
280 pfit_control |= PFIT_SCALING_AUTO;
281 else
282 pfit_control |= (VERT_AUTO_SCALE |
283 VERT_INTERP_BILINEAR |
284 HORIZ_AUTO_SCALE |
285 HORIZ_INTERP_BILINEAR);
286 }
287 break;
288 default:
289 WARN(1, "bad panel fit mode: %d\n", fitting_mode);
290 return;
291 }
292
293 /* 965+ wants fuzzy fitting */
294 /* FIXME: handle multiple panels by failing gracefully */
295 if (INTEL_INFO(dev)->gen >= 4)
296 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
297 PFIT_FILTER_FUZZY);
298
299out:
300 if ((pfit_control & PFIT_ENABLE) == 0) {
301 pfit_control = 0;
302 pfit_pgm_ratios = 0;
303 }
304
305 /* Make sure pre-965 set dither correctly for 18bpp panels. */
306 if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
307 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
308
309 pipe_config->gmch_pfit.control = pfit_control;
310 pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
311 pipe_config->gmch_pfit.lvds_border_bits = border;
118} 312}
119 313
120static int is_backlight_combination_mode(struct drm_device *dev) 314static int is_backlight_combination_mode(struct drm_device *dev)
@@ -130,11 +324,16 @@ static int is_backlight_combination_mode(struct drm_device *dev)
130 return 0; 324 return 0;
131} 325}
132 326
327/* XXX: query mode clock or hardware clock and program max PWM appropriately
328 * when it's 0.
329 */
133static u32 i915_read_blc_pwm_ctl(struct drm_device *dev) 330static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
134{ 331{
135 struct drm_i915_private *dev_priv = dev->dev_private; 332 struct drm_i915_private *dev_priv = dev->dev_private;
136 u32 val; 333 u32 val;
137 334
335 WARN_ON_SMP(!spin_is_locked(&dev_priv->backlight.lock));
336
138 /* Restore the CTL value if it lost, e.g. GPU reset */ 337 /* Restore the CTL value if it lost, e.g. GPU reset */
139 338
140 if (HAS_PCH_SPLIT(dev_priv->dev)) { 339 if (HAS_PCH_SPLIT(dev_priv->dev)) {
@@ -164,7 +363,7 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
164 return val; 363 return val;
165} 364}
166 365
167static u32 _intel_panel_get_max_backlight(struct drm_device *dev) 366static u32 intel_panel_get_max_backlight(struct drm_device *dev)
168{ 367{
169 u32 max; 368 u32 max;
170 369
@@ -182,23 +381,8 @@ static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
182 max *= 0xff; 381 max *= 0xff;
183 } 382 }
184 383
185 return max;
186}
187
188u32 intel_panel_get_max_backlight(struct drm_device *dev)
189{
190 u32 max;
191
192 max = _intel_panel_get_max_backlight(dev);
193 if (max == 0) {
194 /* XXX add code here to query mode clock or hardware clock
195 * and program max PWM appropriately.
196 */
197 pr_warn_once("fixme: max PWM is zero\n");
198 return 1;
199 }
200
201 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); 384 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
385
202 return max; 386 return max;
203} 387}
204 388
@@ -217,8 +401,11 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
217 return val; 401 return val;
218 402
219 if (i915_panel_invert_brightness > 0 || 403 if (i915_panel_invert_brightness > 0 ||
220 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) 404 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
221 return intel_panel_get_max_backlight(dev) - val; 405 u32 max = intel_panel_get_max_backlight(dev);
406 if (max)
407 return max - val;
408 }
222 409
223 return val; 410 return val;
224} 411}
@@ -227,6 +414,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev)
227{ 414{
228 struct drm_i915_private *dev_priv = dev->dev_private; 415 struct drm_i915_private *dev_priv = dev->dev_private;
229 u32 val; 416 u32 val;
417 unsigned long flags;
418
419 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
230 420
231 if (HAS_PCH_SPLIT(dev)) { 421 if (HAS_PCH_SPLIT(dev)) {
232 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 422 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
@@ -244,6 +434,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev)
244 } 434 }
245 435
246 val = intel_panel_compute_brightness(dev, val); 436 val = intel_panel_compute_brightness(dev, val);
437
438 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
439
247 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); 440 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
248 return val; 441 return val;
249} 442}
@@ -270,6 +463,10 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
270 u32 max = intel_panel_get_max_backlight(dev); 463 u32 max = intel_panel_get_max_backlight(dev);
271 u8 lbpc; 464 u8 lbpc;
272 465
466 /* we're screwed, but keep behaviour backwards compatible */
467 if (!max)
468 max = 1;
469
273 lbpc = level * 0xfe / max + 1; 470 lbpc = level * 0xfe / max + 1;
274 level /= lbpc; 471 level /= lbpc;
275 pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); 472 pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
@@ -282,9 +479,23 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
282 I915_WRITE(BLC_PWM_CTL, tmp | level); 479 I915_WRITE(BLC_PWM_CTL, tmp | level);
283} 480}
284 481
285void intel_panel_set_backlight(struct drm_device *dev, u32 level) 482/* set backlight brightness to level in range [0..max] */
483void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
286{ 484{
287 struct drm_i915_private *dev_priv = dev->dev_private; 485 struct drm_i915_private *dev_priv = dev->dev_private;
486 u32 freq;
487 unsigned long flags;
488
489 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
490
491 freq = intel_panel_get_max_backlight(dev);
492 if (!freq) {
493 /* we are screwed, bail out */
494 goto out;
495 }
496
497 /* scale to hardware */
498 level = level * freq / max;
288 499
289 dev_priv->backlight.level = level; 500 dev_priv->backlight.level = level;
290 if (dev_priv->backlight.device) 501 if (dev_priv->backlight.device)
@@ -292,11 +503,16 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
292 503
293 if (dev_priv->backlight.enabled) 504 if (dev_priv->backlight.enabled)
294 intel_panel_actually_set_backlight(dev, level); 505 intel_panel_actually_set_backlight(dev, level);
506out:
507 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
295} 508}
296 509
297void intel_panel_disable_backlight(struct drm_device *dev) 510void intel_panel_disable_backlight(struct drm_device *dev)
298{ 511{
299 struct drm_i915_private *dev_priv = dev->dev_private; 512 struct drm_i915_private *dev_priv = dev->dev_private;
513 unsigned long flags;
514
515 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
300 516
301 dev_priv->backlight.enabled = false; 517 dev_priv->backlight.enabled = false;
302 intel_panel_actually_set_backlight(dev, 0); 518 intel_panel_actually_set_backlight(dev, 0);
@@ -314,12 +530,19 @@ void intel_panel_disable_backlight(struct drm_device *dev)
314 I915_WRITE(BLC_PWM_PCH_CTL1, tmp); 530 I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
315 } 531 }
316 } 532 }
533
534 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
317} 535}
318 536
319void intel_panel_enable_backlight(struct drm_device *dev, 537void intel_panel_enable_backlight(struct drm_device *dev,
320 enum pipe pipe) 538 enum pipe pipe)
321{ 539{
322 struct drm_i915_private *dev_priv = dev->dev_private; 540 struct drm_i915_private *dev_priv = dev->dev_private;
541 enum transcoder cpu_transcoder =
542 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
543 unsigned long flags;
544
545 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
323 546
324 if (dev_priv->backlight.level == 0) { 547 if (dev_priv->backlight.level == 0) {
325 dev_priv->backlight.level = intel_panel_get_max_backlight(dev); 548 dev_priv->backlight.level = intel_panel_get_max_backlight(dev);
@@ -347,7 +570,10 @@ void intel_panel_enable_backlight(struct drm_device *dev,
347 else 570 else
348 tmp &= ~BLM_PIPE_SELECT; 571 tmp &= ~BLM_PIPE_SELECT;
349 572
350 tmp |= BLM_PIPE(pipe); 573 if (cpu_transcoder == TRANSCODER_EDP)
574 tmp |= BLM_TRANSCODER_EDP;
575 else
576 tmp |= BLM_PIPE(cpu_transcoder);
351 tmp &= ~BLM_PWM_ENABLE; 577 tmp &= ~BLM_PWM_ENABLE;
352 578
353 I915_WRITE(reg, tmp); 579 I915_WRITE(reg, tmp);
@@ -369,6 +595,8 @@ set_level:
369 */ 595 */
370 dev_priv->backlight.enabled = true; 596 dev_priv->backlight.enabled = true;
371 intel_panel_actually_set_backlight(dev, dev_priv->backlight.level); 597 intel_panel_actually_set_backlight(dev, dev_priv->backlight.level);
598
599 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
372} 600}
373 601
374static void intel_panel_init_backlight(struct drm_device *dev) 602static void intel_panel_init_backlight(struct drm_device *dev)
@@ -405,7 +633,8 @@ intel_panel_detect(struct drm_device *dev)
405static int intel_panel_update_status(struct backlight_device *bd) 633static int intel_panel_update_status(struct backlight_device *bd)
406{ 634{
407 struct drm_device *dev = bl_get_data(bd); 635 struct drm_device *dev = bl_get_data(bd);
408 intel_panel_set_backlight(dev, bd->props.brightness); 636 intel_panel_set_backlight(dev, bd->props.brightness,
637 bd->props.max_brightness);
409 return 0; 638 return 0;
410} 639}
411 640
@@ -425,6 +654,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
425 struct drm_device *dev = connector->dev; 654 struct drm_device *dev = connector->dev;
426 struct drm_i915_private *dev_priv = dev->dev_private; 655 struct drm_i915_private *dev_priv = dev->dev_private;
427 struct backlight_properties props; 656 struct backlight_properties props;
657 unsigned long flags;
428 658
429 intel_panel_init_backlight(dev); 659 intel_panel_init_backlight(dev);
430 660
@@ -434,7 +664,11 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
434 memset(&props, 0, sizeof(props)); 664 memset(&props, 0, sizeof(props));
435 props.type = BACKLIGHT_RAW; 665 props.type = BACKLIGHT_RAW;
436 props.brightness = dev_priv->backlight.level; 666 props.brightness = dev_priv->backlight.level;
437 props.max_brightness = _intel_panel_get_max_backlight(dev); 667
668 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
669 props.max_brightness = intel_panel_get_max_backlight(dev);
670 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
671
438 if (props.max_brightness == 0) { 672 if (props.max_brightness == 0) {
439 DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n"); 673 DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
440 return -ENODEV; 674 return -ENODEV;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index aa01128ff192..ccbdd83f5220 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -113,8 +113,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
113 fbc_ctl |= obj->fence_reg; 113 fbc_ctl |= obj->fence_reg;
114 I915_WRITE(FBC_CONTROL, fbc_ctl); 114 I915_WRITE(FBC_CONTROL, fbc_ctl);
115 115
116 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ", 116 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
117 cfb_pitch, crtc->y, intel_crtc->plane); 117 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
118} 118}
119 119
120static bool i8xx_fbc_enabled(struct drm_device *dev) 120static bool i8xx_fbc_enabled(struct drm_device *dev)
@@ -148,7 +148,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
148 /* enable it... */ 148 /* enable it... */
149 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); 149 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
150 150
151 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 151 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
152} 152}
153 153
154static void g4x_disable_fbc(struct drm_device *dev) 154static void g4x_disable_fbc(struct drm_device *dev)
@@ -228,7 +228,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
228 sandybridge_blit_fbc_update(dev); 228 sandybridge_blit_fbc_update(dev);
229 } 229 }
230 230
231 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 231 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
232} 232}
233 233
234static void ironlake_disable_fbc(struct drm_device *dev) 234static void ironlake_disable_fbc(struct drm_device *dev)
@@ -242,6 +242,18 @@ static void ironlake_disable_fbc(struct drm_device *dev)
242 dpfc_ctl &= ~DPFC_CTL_EN; 242 dpfc_ctl &= ~DPFC_CTL_EN;
243 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 243 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
244 244
245 if (IS_IVYBRIDGE(dev))
246 /* WaFbcDisableDpfcClockGating:ivb */
247 I915_WRITE(ILK_DSPCLK_GATE_D,
248 I915_READ(ILK_DSPCLK_GATE_D) &
249 ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
250
251 if (IS_HASWELL(dev))
252 /* WaFbcDisableDpfcClockGating:hsw */
253 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
254 I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
255 ~HSW_DPFC_GATING_DISABLE);
256
245 DRM_DEBUG_KMS("disabled FBC\n"); 257 DRM_DEBUG_KMS("disabled FBC\n");
246 } 258 }
247} 259}
@@ -253,6 +265,47 @@ static bool ironlake_fbc_enabled(struct drm_device *dev)
253 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; 265 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
254} 266}
255 267
268static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
269{
270 struct drm_device *dev = crtc->dev;
271 struct drm_i915_private *dev_priv = dev->dev_private;
272 struct drm_framebuffer *fb = crtc->fb;
273 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
274 struct drm_i915_gem_object *obj = intel_fb->obj;
275 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
276
277 I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
278
279 I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
280 IVB_DPFC_CTL_FENCE_EN |
281 intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
282
283 if (IS_IVYBRIDGE(dev)) {
284 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
285 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
286 /* WaFbcDisableDpfcClockGating:ivb */
287 I915_WRITE(ILK_DSPCLK_GATE_D,
288 I915_READ(ILK_DSPCLK_GATE_D) |
289 ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
290 } else {
291 /* WaFbcAsynchFlipDisableFbcQueue:hsw */
292 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
293 HSW_BYPASS_FBC_QUEUE);
294 /* WaFbcDisableDpfcClockGating:hsw */
295 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
296 I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
297 HSW_DPFC_GATING_DISABLE);
298 }
299
300 I915_WRITE(SNB_DPFC_CTL_SA,
301 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
302 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
303
304 sandybridge_blit_fbc_update(dev);
305
306 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
307}
308
256bool intel_fbc_enabled(struct drm_device *dev) 309bool intel_fbc_enabled(struct drm_device *dev)
257{ 310{
258 struct drm_i915_private *dev_priv = dev->dev_private; 311 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -378,7 +431,7 @@ void intel_disable_fbc(struct drm_device *dev)
378 * - no pixel mulitply/line duplication 431 * - no pixel mulitply/line duplication
379 * - no alpha buffer discard 432 * - no alpha buffer discard
380 * - no dual wide 433 * - no dual wide
381 * - framebuffer <= 2048 in width, 1536 in height 434 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
382 * 435 *
383 * We can't assume that any compression will take place (worst case), 436 * We can't assume that any compression will take place (worst case),
384 * so the compressed buffer has to be the same size as the uncompressed 437 * so the compressed buffer has to be the same size as the uncompressed
@@ -396,6 +449,7 @@ void intel_update_fbc(struct drm_device *dev)
396 struct intel_framebuffer *intel_fb; 449 struct intel_framebuffer *intel_fb;
397 struct drm_i915_gem_object *obj; 450 struct drm_i915_gem_object *obj;
398 int enable_fbc; 451 int enable_fbc;
452 unsigned int max_hdisplay, max_vdisplay;
399 453
400 if (!i915_powersave) 454 if (!i915_powersave)
401 return; 455 return;
@@ -439,7 +493,7 @@ void intel_update_fbc(struct drm_device *dev)
439 if (enable_fbc < 0) { 493 if (enable_fbc < 0) {
440 DRM_DEBUG_KMS("fbc set to per-chip default\n"); 494 DRM_DEBUG_KMS("fbc set to per-chip default\n");
441 enable_fbc = 1; 495 enable_fbc = 1;
442 if (INTEL_INFO(dev)->gen <= 6) 496 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
443 enable_fbc = 0; 497 enable_fbc = 0;
444 } 498 }
445 if (!enable_fbc) { 499 if (!enable_fbc) {
@@ -454,13 +508,22 @@ void intel_update_fbc(struct drm_device *dev)
454 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; 508 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
455 goto out_disable; 509 goto out_disable;
456 } 510 }
457 if ((crtc->mode.hdisplay > 2048) || 511
458 (crtc->mode.vdisplay > 1536)) { 512 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
513 max_hdisplay = 4096;
514 max_vdisplay = 2048;
515 } else {
516 max_hdisplay = 2048;
517 max_vdisplay = 1536;
518 }
519 if ((crtc->mode.hdisplay > max_hdisplay) ||
520 (crtc->mode.vdisplay > max_vdisplay)) {
459 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 521 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
460 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; 522 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
461 goto out_disable; 523 goto out_disable;
462 } 524 }
463 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { 525 if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
526 intel_crtc->plane != 0) {
464 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 527 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
465 dev_priv->no_fbc_reason = FBC_BAD_PLANE; 528 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
466 goto out_disable; 529 goto out_disable;
@@ -481,8 +544,6 @@ void intel_update_fbc(struct drm_device *dev)
481 goto out_disable; 544 goto out_disable;
482 545
483 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { 546 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
484 DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size);
485 DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
486 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); 547 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
487 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 548 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
488 goto out_disable; 549 goto out_disable;
@@ -1633,6 +1694,10 @@ static bool ironlake_check_srwm(struct drm_device *dev, int level,
1633 I915_WRITE(DISP_ARB_CTL, 1694 I915_WRITE(DISP_ARB_CTL,
1634 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); 1695 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1635 return false; 1696 return false;
1697 } else if (INTEL_INFO(dev)->gen >= 6) {
1698 /* enable FBC WM (except on ILK, where it must remain off) */
1699 I915_WRITE(DISP_ARB_CTL,
1700 I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
1636 } 1701 }
1637 1702
1638 if (display_wm > display->max_wm) { 1703 if (display_wm > display->max_wm) {
@@ -2016,31 +2081,558 @@ static void ivybridge_update_wm(struct drm_device *dev)
2016 cursor_wm); 2081 cursor_wm);
2017} 2082}
2018 2083
2019static void 2084static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
2020haswell_update_linetime_wm(struct drm_device *dev, int pipe, 2085 struct drm_crtc *crtc)
2021 struct drm_display_mode *mode) 2086{
2087 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2088 uint32_t pixel_rate, pfit_size;
2089
2090 pixel_rate = intel_crtc->config.adjusted_mode.clock;
2091
2092 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2093 * adjust the pixel_rate here. */
2094
2095 pfit_size = intel_crtc->config.pch_pfit.size;
2096 if (pfit_size) {
2097 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
2098
2099 pipe_w = intel_crtc->config.requested_mode.hdisplay;
2100 pipe_h = intel_crtc->config.requested_mode.vdisplay;
2101 pfit_w = (pfit_size >> 16) & 0xFFFF;
2102 pfit_h = pfit_size & 0xFFFF;
2103 if (pipe_w < pfit_w)
2104 pipe_w = pfit_w;
2105 if (pipe_h < pfit_h)
2106 pipe_h = pfit_h;
2107
2108 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
2109 pfit_w * pfit_h);
2110 }
2111
2112 return pixel_rate;
2113}
2114
2115static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2116 uint32_t latency)
2117{
2118 uint64_t ret;
2119
2120 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
2121 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
2122
2123 return ret;
2124}
2125
2126static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
2127 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2128 uint32_t latency)
2129{
2130 uint32_t ret;
2131
2132 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
2133 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
2134 ret = DIV_ROUND_UP(ret, 64) + 2;
2135 return ret;
2136}
2137
2138static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2139 uint8_t bytes_per_pixel)
2140{
2141 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
2142}
2143
2144struct hsw_pipe_wm_parameters {
2145 bool active;
2146 bool sprite_enabled;
2147 uint8_t pri_bytes_per_pixel;
2148 uint8_t spr_bytes_per_pixel;
2149 uint8_t cur_bytes_per_pixel;
2150 uint32_t pri_horiz_pixels;
2151 uint32_t spr_horiz_pixels;
2152 uint32_t cur_horiz_pixels;
2153 uint32_t pipe_htotal;
2154 uint32_t pixel_rate;
2155};
2156
2157struct hsw_wm_maximums {
2158 uint16_t pri;
2159 uint16_t spr;
2160 uint16_t cur;
2161 uint16_t fbc;
2162};
2163
2164struct hsw_lp_wm_result {
2165 bool enable;
2166 bool fbc_enable;
2167 uint32_t pri_val;
2168 uint32_t spr_val;
2169 uint32_t cur_val;
2170 uint32_t fbc_val;
2171};
2172
2173struct hsw_wm_values {
2174 uint32_t wm_pipe[3];
2175 uint32_t wm_lp[3];
2176 uint32_t wm_lp_spr[3];
2177 uint32_t wm_linetime[3];
2178 bool enable_fbc_wm;
2179};
2180
2181enum hsw_data_buf_partitioning {
2182 HSW_DATA_BUF_PART_1_2,
2183 HSW_DATA_BUF_PART_5_6,
2184};
2185
2186/* For both WM_PIPE and WM_LP. */
2187static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
2188 uint32_t mem_value,
2189 bool is_lp)
2190{
2191 uint32_t method1, method2;
2192
2193 /* TODO: for now, assume the primary plane is always enabled. */
2194 if (!params->active)
2195 return 0;
2196
2197 method1 = hsw_wm_method1(params->pixel_rate,
2198 params->pri_bytes_per_pixel,
2199 mem_value);
2200
2201 if (!is_lp)
2202 return method1;
2203
2204 method2 = hsw_wm_method2(params->pixel_rate,
2205 params->pipe_htotal,
2206 params->pri_horiz_pixels,
2207 params->pri_bytes_per_pixel,
2208 mem_value);
2209
2210 return min(method1, method2);
2211}
2212
2213/* For both WM_PIPE and WM_LP. */
2214static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
2215 uint32_t mem_value)
2216{
2217 uint32_t method1, method2;
2218
2219 if (!params->active || !params->sprite_enabled)
2220 return 0;
2221
2222 method1 = hsw_wm_method1(params->pixel_rate,
2223 params->spr_bytes_per_pixel,
2224 mem_value);
2225 method2 = hsw_wm_method2(params->pixel_rate,
2226 params->pipe_htotal,
2227 params->spr_horiz_pixels,
2228 params->spr_bytes_per_pixel,
2229 mem_value);
2230 return min(method1, method2);
2231}
2232
2233/* For both WM_PIPE and WM_LP. */
2234static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
2235 uint32_t mem_value)
2236{
2237 if (!params->active)
2238 return 0;
2239
2240 return hsw_wm_method2(params->pixel_rate,
2241 params->pipe_htotal,
2242 params->cur_horiz_pixels,
2243 params->cur_bytes_per_pixel,
2244 mem_value);
2245}
2246
2247/* Only for WM_LP. */
2248static uint32_t hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
2249 uint32_t pri_val,
2250 uint32_t mem_value)
2251{
2252 if (!params->active)
2253 return 0;
2254
2255 return hsw_wm_fbc(pri_val,
2256 params->pri_horiz_pixels,
2257 params->pri_bytes_per_pixel);
2258}
2259
2260static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max,
2261 struct hsw_pipe_wm_parameters *params,
2262 struct hsw_lp_wm_result *result)
2263{
2264 enum pipe pipe;
2265 uint32_t pri_val[3], spr_val[3], cur_val[3], fbc_val[3];
2266
2267 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
2268 struct hsw_pipe_wm_parameters *p = &params[pipe];
2269
2270 pri_val[pipe] = hsw_compute_pri_wm(p, mem_value, true);
2271 spr_val[pipe] = hsw_compute_spr_wm(p, mem_value);
2272 cur_val[pipe] = hsw_compute_cur_wm(p, mem_value);
2273 fbc_val[pipe] = hsw_compute_fbc_wm(p, pri_val[pipe], mem_value);
2274 }
2275
2276 result->pri_val = max3(pri_val[0], pri_val[1], pri_val[2]);
2277 result->spr_val = max3(spr_val[0], spr_val[1], spr_val[2]);
2278 result->cur_val = max3(cur_val[0], cur_val[1], cur_val[2]);
2279 result->fbc_val = max3(fbc_val[0], fbc_val[1], fbc_val[2]);
2280
2281 if (result->fbc_val > max->fbc) {
2282 result->fbc_enable = false;
2283 result->fbc_val = 0;
2284 } else {
2285 result->fbc_enable = true;
2286 }
2287
2288 result->enable = result->pri_val <= max->pri &&
2289 result->spr_val <= max->spr &&
2290 result->cur_val <= max->cur;
2291 return result->enable;
2292}
2293
2294static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
2295 uint32_t mem_value, enum pipe pipe,
2296 struct hsw_pipe_wm_parameters *params)
2297{
2298 uint32_t pri_val, cur_val, spr_val;
2299
2300 pri_val = hsw_compute_pri_wm(params, mem_value, false);
2301 spr_val = hsw_compute_spr_wm(params, mem_value);
2302 cur_val = hsw_compute_cur_wm(params, mem_value);
2303
2304 WARN(pri_val > 127,
2305 "Primary WM error, mode not supported for pipe %c\n",
2306 pipe_name(pipe));
2307 WARN(spr_val > 127,
2308 "Sprite WM error, mode not supported for pipe %c\n",
2309 pipe_name(pipe));
2310 WARN(cur_val > 63,
2311 "Cursor WM error, mode not supported for pipe %c\n",
2312 pipe_name(pipe));
2313
2314 return (pri_val << WM0_PIPE_PLANE_SHIFT) |
2315 (spr_val << WM0_PIPE_SPRITE_SHIFT) |
2316 cur_val;
2317}
2318
2319static uint32_t
2320hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2022{ 2321{
2023 struct drm_i915_private *dev_priv = dev->dev_private; 2322 struct drm_i915_private *dev_priv = dev->dev_private;
2024 u32 temp; 2323 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2324 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2325 u32 linetime, ips_linetime;
2025 2326
2026 temp = I915_READ(PIPE_WM_LINETIME(pipe)); 2327 if (!intel_crtc_active(crtc))
2027 temp &= ~PIPE_WM_LINETIME_MASK; 2328 return 0;
2028 2329
2029 /* The WM are computed with base on how long it takes to fill a single 2330 /* The WM are computed with base on how long it takes to fill a single
2030 * row at the given clock rate, multiplied by 8. 2331 * row at the given clock rate, multiplied by 8.
2031 * */ 2332 * */
2032 temp |= PIPE_WM_LINETIME_TIME( 2333 linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
2033 ((mode->crtc_hdisplay * 1000) / mode->clock) * 8); 2334 ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
2335 intel_ddi_get_cdclk_freq(dev_priv));
2034 2336
2035 /* IPS watermarks are only used by pipe A, and are ignored by 2337 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2036 * pipes B and C. They are calculated similarly to the common 2338 PIPE_WM_LINETIME_TIME(linetime);
2037 * linetime values, except that we are using CD clock frequency 2339}
2038 * in MHz instead of pixel rate for the division. 2340
2039 * 2341static void hsw_compute_wm_parameters(struct drm_device *dev,
2040 * This is a placeholder for the IPS watermark calculation code. 2342 struct hsw_pipe_wm_parameters *params,
2041 */ 2343 uint32_t *wm,
2344 struct hsw_wm_maximums *lp_max_1_2,
2345 struct hsw_wm_maximums *lp_max_5_6)
2346{
2347 struct drm_i915_private *dev_priv = dev->dev_private;
2348 struct drm_crtc *crtc;
2349 struct drm_plane *plane;
2350 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2351 enum pipe pipe;
2352 int pipes_active = 0, sprites_enabled = 0;
2353
2354 if ((sskpd >> 56) & 0xFF)
2355 wm[0] = (sskpd >> 56) & 0xFF;
2356 else
2357 wm[0] = sskpd & 0xF;
2358 wm[1] = ((sskpd >> 4) & 0xFF) * 5;
2359 wm[2] = ((sskpd >> 12) & 0xFF) * 5;
2360 wm[3] = ((sskpd >> 20) & 0x1FF) * 5;
2361 wm[4] = ((sskpd >> 32) & 0x1FF) * 5;
2362
2363 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2364 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2365 struct hsw_pipe_wm_parameters *p;
2366
2367 pipe = intel_crtc->pipe;
2368 p = &params[pipe];
2369
2370 p->active = intel_crtc_active(crtc);
2371 if (!p->active)
2372 continue;
2373
2374 pipes_active++;
2375
2376 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
2377 p->pixel_rate = hsw_wm_get_pixel_rate(dev, crtc);
2378 p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2379 p->cur_bytes_per_pixel = 4;
2380 p->pri_horiz_pixels =
2381 intel_crtc->config.requested_mode.hdisplay;
2382 p->cur_horiz_pixels = 64;
2383 }
2384
2385 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2386 struct intel_plane *intel_plane = to_intel_plane(plane);
2387 struct hsw_pipe_wm_parameters *p;
2388
2389 pipe = intel_plane->pipe;
2390 p = &params[pipe];
2391
2392 p->sprite_enabled = intel_plane->wm.enable;
2393 p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel;
2394 p->spr_horiz_pixels = intel_plane->wm.horiz_pixels;
2395
2396 if (p->sprite_enabled)
2397 sprites_enabled++;
2398 }
2042 2399
2043 I915_WRITE(PIPE_WM_LINETIME(pipe), temp); 2400 if (pipes_active > 1) {
2401 lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256;
2402 lp_max_1_2->spr = lp_max_5_6->spr = 128;
2403 lp_max_1_2->cur = lp_max_5_6->cur = 64;
2404 } else {
2405 lp_max_1_2->pri = sprites_enabled ? 384 : 768;
2406 lp_max_5_6->pri = sprites_enabled ? 128 : 768;
2407 lp_max_1_2->spr = 384;
2408 lp_max_5_6->spr = 640;
2409 lp_max_1_2->cur = lp_max_5_6->cur = 255;
2410 }
2411 lp_max_1_2->fbc = lp_max_5_6->fbc = 15;
2412}
2413
2414static void hsw_compute_wm_results(struct drm_device *dev,
2415 struct hsw_pipe_wm_parameters *params,
2416 uint32_t *wm,
2417 struct hsw_wm_maximums *lp_maximums,
2418 struct hsw_wm_values *results)
2419{
2420 struct drm_i915_private *dev_priv = dev->dev_private;
2421 struct drm_crtc *crtc;
2422 struct hsw_lp_wm_result lp_results[4] = {};
2423 enum pipe pipe;
2424 int level, max_level, wm_lp;
2425
2426 for (level = 1; level <= 4; level++)
2427 if (!hsw_compute_lp_wm(wm[level], lp_maximums, params,
2428 &lp_results[level - 1]))
2429 break;
2430 max_level = level - 1;
2431
2432 /* The spec says it is preferred to disable FBC WMs instead of disabling
2433 * a WM level. */
2434 results->enable_fbc_wm = true;
2435 for (level = 1; level <= max_level; level++) {
2436 if (!lp_results[level - 1].fbc_enable) {
2437 results->enable_fbc_wm = false;
2438 break;
2439 }
2440 }
2441
2442 memset(results, 0, sizeof(*results));
2443 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2444 const struct hsw_lp_wm_result *r;
2445
2446 level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
2447 if (level > max_level)
2448 break;
2449
2450 r = &lp_results[level - 1];
2451 results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
2452 r->fbc_val,
2453 r->pri_val,
2454 r->cur_val);
2455 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2456 }
2457
2458 for_each_pipe(pipe)
2459 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, wm[0],
2460 pipe,
2461 &params[pipe]);
2462
2463 for_each_pipe(pipe) {
2464 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2465 results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
2466 }
2467}
2468
2469/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2470 * case both are at the same level. Prefer r1 in case they're the same. */
2471struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
2472 struct hsw_wm_values *r2)
2473{
2474 int i, val_r1 = 0, val_r2 = 0;
2475
2476 for (i = 0; i < 3; i++) {
2477 if (r1->wm_lp[i] & WM3_LP_EN)
2478 val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
2479 if (r2->wm_lp[i] & WM3_LP_EN)
2480 val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
2481 }
2482
2483 if (val_r1 == val_r2) {
2484 if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
2485 return r2;
2486 else
2487 return r1;
2488 } else if (val_r1 > val_r2) {
2489 return r1;
2490 } else {
2491 return r2;
2492 }
2493}
2494
2495/*
2496 * The spec says we shouldn't write when we don't need, because every write
2497 * causes WMs to be re-evaluated, expending some power.
2498 */
2499static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2500 struct hsw_wm_values *results,
2501 enum hsw_data_buf_partitioning partitioning)
2502{
2503 struct hsw_wm_values previous;
2504 uint32_t val;
2505 enum hsw_data_buf_partitioning prev_partitioning;
2506 bool prev_enable_fbc_wm;
2507
2508 previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
2509 previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
2510 previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
2511 previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
2512 previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
2513 previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
2514 previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2515 previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2516 previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2517 previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
2518 previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
2519 previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
2520
2521 prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2522 HSW_DATA_BUF_PART_5_6 : HSW_DATA_BUF_PART_1_2;
2523
2524 prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2525
2526 if (memcmp(results->wm_pipe, previous.wm_pipe,
2527 sizeof(results->wm_pipe)) == 0 &&
2528 memcmp(results->wm_lp, previous.wm_lp,
2529 sizeof(results->wm_lp)) == 0 &&
2530 memcmp(results->wm_lp_spr, previous.wm_lp_spr,
2531 sizeof(results->wm_lp_spr)) == 0 &&
2532 memcmp(results->wm_linetime, previous.wm_linetime,
2533 sizeof(results->wm_linetime)) == 0 &&
2534 partitioning == prev_partitioning &&
2535 results->enable_fbc_wm == prev_enable_fbc_wm)
2536 return;
2537
2538 if (previous.wm_lp[2] != 0)
2539 I915_WRITE(WM3_LP_ILK, 0);
2540 if (previous.wm_lp[1] != 0)
2541 I915_WRITE(WM2_LP_ILK, 0);
2542 if (previous.wm_lp[0] != 0)
2543 I915_WRITE(WM1_LP_ILK, 0);
2544
2545 if (previous.wm_pipe[0] != results->wm_pipe[0])
2546 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2547 if (previous.wm_pipe[1] != results->wm_pipe[1])
2548 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2549 if (previous.wm_pipe[2] != results->wm_pipe[2])
2550 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2551
2552 if (previous.wm_linetime[0] != results->wm_linetime[0])
2553 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2554 if (previous.wm_linetime[1] != results->wm_linetime[1])
2555 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2556 if (previous.wm_linetime[2] != results->wm_linetime[2])
2557 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2558
2559 if (prev_partitioning != partitioning) {
2560 val = I915_READ(WM_MISC);
2561 if (partitioning == HSW_DATA_BUF_PART_1_2)
2562 val &= ~WM_MISC_DATA_PARTITION_5_6;
2563 else
2564 val |= WM_MISC_DATA_PARTITION_5_6;
2565 I915_WRITE(WM_MISC, val);
2566 }
2567
2568 if (prev_enable_fbc_wm != results->enable_fbc_wm) {
2569 val = I915_READ(DISP_ARB_CTL);
2570 if (results->enable_fbc_wm)
2571 val &= ~DISP_FBC_WM_DIS;
2572 else
2573 val |= DISP_FBC_WM_DIS;
2574 I915_WRITE(DISP_ARB_CTL, val);
2575 }
2576
2577 if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
2578 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2579 if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
2580 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2581 if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
2582 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2583
2584 if (results->wm_lp[0] != 0)
2585 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2586 if (results->wm_lp[1] != 0)
2587 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2588 if (results->wm_lp[2] != 0)
2589 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2590}
2591
2592static void haswell_update_wm(struct drm_device *dev)
2593{
2594 struct drm_i915_private *dev_priv = dev->dev_private;
2595 struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
2596 struct hsw_pipe_wm_parameters params[3];
2597 struct hsw_wm_values results_1_2, results_5_6, *best_results;
2598 uint32_t wm[5];
2599 enum hsw_data_buf_partitioning partitioning;
2600
2601 hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6);
2602
2603 hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2);
2604 if (lp_max_1_2.pri != lp_max_5_6.pri) {
2605 hsw_compute_wm_results(dev, params, wm, &lp_max_5_6,
2606 &results_5_6);
2607 best_results = hsw_find_best_result(&results_1_2, &results_5_6);
2608 } else {
2609 best_results = &results_1_2;
2610 }
2611
2612 partitioning = (best_results == &results_1_2) ?
2613 HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6;
2614
2615 hsw_write_wm_values(dev_priv, best_results, partitioning);
2616}
2617
2618static void haswell_update_sprite_wm(struct drm_device *dev, int pipe,
2619 uint32_t sprite_width, int pixel_size,
2620 bool enable)
2621{
2622 struct drm_plane *plane;
2623
2624 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2625 struct intel_plane *intel_plane = to_intel_plane(plane);
2626
2627 if (intel_plane->pipe == pipe) {
2628 intel_plane->wm.enable = enable;
2629 intel_plane->wm.horiz_pixels = sprite_width + 1;
2630 intel_plane->wm.bytes_per_pixel = pixel_size;
2631 break;
2632 }
2633 }
2634
2635 haswell_update_wm(dev);
2044} 2636}
2045 2637
2046static bool 2638static bool
@@ -2120,7 +2712,8 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2120} 2712}
2121 2713
2122static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, 2714static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2123 uint32_t sprite_width, int pixel_size) 2715 uint32_t sprite_width, int pixel_size,
2716 bool enable)
2124{ 2717{
2125 struct drm_i915_private *dev_priv = dev->dev_private; 2718 struct drm_i915_private *dev_priv = dev->dev_private;
2126 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 2719 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
@@ -2128,6 +2721,9 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2128 int sprite_wm, reg; 2721 int sprite_wm, reg;
2129 int ret; 2722 int ret;
2130 2723
2724 if (!enable)
2725 return;
2726
2131 switch (pipe) { 2727 switch (pipe) {
2132 case 0: 2728 case 0:
2133 reg = WM0_PIPEA_ILK; 2729 reg = WM0_PIPEA_ILK;
@@ -2146,15 +2742,15 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2146 &sandybridge_display_wm_info, 2742 &sandybridge_display_wm_info,
2147 latency, &sprite_wm); 2743 latency, &sprite_wm);
2148 if (!ret) { 2744 if (!ret) {
2149 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n", 2745 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
2150 pipe); 2746 pipe_name(pipe));
2151 return; 2747 return;
2152 } 2748 }
2153 2749
2154 val = I915_READ(reg); 2750 val = I915_READ(reg);
2155 val &= ~WM0_PIPE_SPRITE_MASK; 2751 val &= ~WM0_PIPE_SPRITE_MASK;
2156 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); 2752 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
2157 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); 2753 DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
2158 2754
2159 2755
2160 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, 2756 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
@@ -2163,8 +2759,8 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2163 SNB_READ_WM1_LATENCY() * 500, 2759 SNB_READ_WM1_LATENCY() * 500,
2164 &sprite_wm); 2760 &sprite_wm);
2165 if (!ret) { 2761 if (!ret) {
2166 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n", 2762 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
2167 pipe); 2763 pipe_name(pipe));
2168 return; 2764 return;
2169 } 2765 }
2170 I915_WRITE(WM1S_LP_ILK, sprite_wm); 2766 I915_WRITE(WM1S_LP_ILK, sprite_wm);
@@ -2179,8 +2775,8 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2179 SNB_READ_WM2_LATENCY() * 500, 2775 SNB_READ_WM2_LATENCY() * 500,
2180 &sprite_wm); 2776 &sprite_wm);
2181 if (!ret) { 2777 if (!ret) {
2182 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n", 2778 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
2183 pipe); 2779 pipe_name(pipe));
2184 return; 2780 return;
2185 } 2781 }
2186 I915_WRITE(WM2S_LP_IVB, sprite_wm); 2782 I915_WRITE(WM2S_LP_IVB, sprite_wm);
@@ -2191,8 +2787,8 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2191 SNB_READ_WM3_LATENCY() * 500, 2787 SNB_READ_WM3_LATENCY() * 500,
2192 &sprite_wm); 2788 &sprite_wm);
2193 if (!ret) { 2789 if (!ret) {
2194 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n", 2790 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
2195 pipe); 2791 pipe_name(pipe));
2196 return; 2792 return;
2197 } 2793 }
2198 I915_WRITE(WM3S_LP_IVB, sprite_wm); 2794 I915_WRITE(WM3S_LP_IVB, sprite_wm);
@@ -2238,23 +2834,15 @@ void intel_update_watermarks(struct drm_device *dev)
2238 dev_priv->display.update_wm(dev); 2834 dev_priv->display.update_wm(dev);
2239} 2835}
2240 2836
2241void intel_update_linetime_watermarks(struct drm_device *dev,
2242 int pipe, struct drm_display_mode *mode)
2243{
2244 struct drm_i915_private *dev_priv = dev->dev_private;
2245
2246 if (dev_priv->display.update_linetime_wm)
2247 dev_priv->display.update_linetime_wm(dev, pipe, mode);
2248}
2249
2250void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, 2837void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
2251 uint32_t sprite_width, int pixel_size) 2838 uint32_t sprite_width, int pixel_size,
2839 bool enable)
2252{ 2840{
2253 struct drm_i915_private *dev_priv = dev->dev_private; 2841 struct drm_i915_private *dev_priv = dev->dev_private;
2254 2842
2255 if (dev_priv->display.update_sprite_wm) 2843 if (dev_priv->display.update_sprite_wm)
2256 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, 2844 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
2257 pixel_size); 2845 pixel_size, enable);
2258} 2846}
2259 2847
2260static struct drm_i915_gem_object * 2848static struct drm_i915_gem_object *
@@ -2481,6 +3069,67 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
2481 trace_intel_gpu_freq_change(val * 50); 3069 trace_intel_gpu_freq_change(val * 50);
2482} 3070}
2483 3071
3072/*
3073 * Wait until the previous freq change has completed,
3074 * or the timeout elapsed, and then update our notion
3075 * of the current GPU frequency.
3076 */
3077static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
3078{
3079 unsigned long timeout = jiffies + msecs_to_jiffies(10);
3080 u32 pval;
3081
3082 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3083
3084 do {
3085 pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3086 if (time_after(jiffies, timeout)) {
3087 DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
3088 break;
3089 }
3090 udelay(10);
3091 } while (pval & 1);
3092
3093 pval >>= 8;
3094
3095 if (pval != dev_priv->rps.cur_delay)
3096 DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
3097 vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
3098 dev_priv->rps.cur_delay,
3099 vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
3100
3101 dev_priv->rps.cur_delay = pval;
3102}
3103
3104void valleyview_set_rps(struct drm_device *dev, u8 val)
3105{
3106 struct drm_i915_private *dev_priv = dev->dev_private;
3107
3108 gen6_rps_limits(dev_priv, &val);
3109
3110 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3111 WARN_ON(val > dev_priv->rps.max_delay);
3112 WARN_ON(val < dev_priv->rps.min_delay);
3113
3114 vlv_update_rps_cur_delay(dev_priv);
3115
3116 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3117 vlv_gpu_freq(dev_priv->mem_freq,
3118 dev_priv->rps.cur_delay),
3119 dev_priv->rps.cur_delay,
3120 vlv_gpu_freq(dev_priv->mem_freq, val), val);
3121
3122 if (val == dev_priv->rps.cur_delay)
3123 return;
3124
3125 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3126
3127 dev_priv->rps.cur_delay = val;
3128
3129 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
3130}
3131
3132
2484static void gen6_disable_rps(struct drm_device *dev) 3133static void gen6_disable_rps(struct drm_device *dev)
2485{ 3134{
2486 struct drm_i915_private *dev_priv = dev->dev_private; 3135 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2488,6 +3137,25 @@ static void gen6_disable_rps(struct drm_device *dev)
2488 I915_WRITE(GEN6_RC_CONTROL, 0); 3137 I915_WRITE(GEN6_RC_CONTROL, 0);
2489 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 3138 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2490 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 3139 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3140 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
3141 /* Complete PM interrupt masking here doesn't race with the rps work
3142 * item again unmasking PM interrupts because that is using a different
3143 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3144 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3145
3146 spin_lock_irq(&dev_priv->rps.lock);
3147 dev_priv->rps.pm_iir = 0;
3148 spin_unlock_irq(&dev_priv->rps.lock);
3149
3150 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3151}
3152
3153static void valleyview_disable_rps(struct drm_device *dev)
3154{
3155 struct drm_i915_private *dev_priv = dev->dev_private;
3156
3157 I915_WRITE(GEN6_RC_CONTROL, 0);
3158 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2491 I915_WRITE(GEN6_PMIER, 0); 3159 I915_WRITE(GEN6_PMIER, 0);
2492 /* Complete PM interrupt masking here doesn't race with the rps work 3160 /* Complete PM interrupt masking here doesn't race with the rps work
2493 * item again unmasking PM interrupts because that is using a different 3161 * item again unmasking PM interrupts because that is using a different
@@ -2499,6 +3167,11 @@ static void gen6_disable_rps(struct drm_device *dev)
2499 spin_unlock_irq(&dev_priv->rps.lock); 3167 spin_unlock_irq(&dev_priv->rps.lock);
2500 3168
2501 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 3169 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
3170
3171 if (dev_priv->vlv_pctx) {
3172 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3173 dev_priv->vlv_pctx = NULL;
3174 }
2502} 3175}
2503 3176
2504int intel_enable_rc6(const struct drm_device *dev) 3177int intel_enable_rc6(const struct drm_device *dev)
@@ -2655,12 +3328,15 @@ static void gen6_enable_rps(struct drm_device *dev)
2655 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); 3328 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
2656 3329
2657 /* requires MSI enabled */ 3330 /* requires MSI enabled */
2658 I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); 3331 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
2659 spin_lock_irq(&dev_priv->rps.lock); 3332 spin_lock_irq(&dev_priv->rps.lock);
2660 WARN_ON(dev_priv->rps.pm_iir != 0); 3333 /* FIXME: Our interrupt enabling sequence is bonghits.
2661 I915_WRITE(GEN6_PMIMR, 0); 3334 * dev_priv->rps.pm_iir really should be 0 here. */
3335 dev_priv->rps.pm_iir = 0;
3336 I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
3337 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
2662 spin_unlock_irq(&dev_priv->rps.lock); 3338 spin_unlock_irq(&dev_priv->rps.lock);
2663 /* enable all PM interrupts */ 3339 /* unmask all PM interrupts */
2664 I915_WRITE(GEN6_PMINTRMSK, 0); 3340 I915_WRITE(GEN6_PMINTRMSK, 0);
2665 3341
2666 rc6vids = 0; 3342 rc6vids = 0;
@@ -2742,6 +3418,207 @@ static void gen6_update_ring_freq(struct drm_device *dev)
2742 } 3418 }
2743} 3419}
2744 3420
3421int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3422{
3423 u32 val, rp0;
3424
3425 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3426
3427 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
3428 /* Clamp to max */
3429 rp0 = min_t(u32, rp0, 0xea);
3430
3431 return rp0;
3432}
3433
3434static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3435{
3436 u32 val, rpe;
3437
3438 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
3439 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
3440 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
3441 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3442
3443 return rpe;
3444}
3445
3446int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3447{
3448 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3449}
3450
3451static void vlv_rps_timer_work(struct work_struct *work)
3452{
3453 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3454 rps.vlv_work.work);
3455
3456 /*
3457 * Timer fired, we must be idle. Drop to min voltage state.
3458 * Note: we use RPe here since it should match the
3459 * Vmin we were shooting for. That should give us better
3460 * perf when we come back out of RC6 than if we used the
3461 * min freq available.
3462 */
3463 mutex_lock(&dev_priv->rps.hw_lock);
3464 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
3465 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3466 mutex_unlock(&dev_priv->rps.hw_lock);
3467}
3468
3469static void valleyview_setup_pctx(struct drm_device *dev)
3470{
3471 struct drm_i915_private *dev_priv = dev->dev_private;
3472 struct drm_i915_gem_object *pctx;
3473 unsigned long pctx_paddr;
3474 u32 pcbr;
3475 int pctx_size = 24*1024;
3476
3477 pcbr = I915_READ(VLV_PCBR);
3478 if (pcbr) {
3479 /* BIOS set it up already, grab the pre-alloc'd space */
3480 int pcbr_offset;
3481
3482 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3483 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3484 pcbr_offset,
3485 -1,
3486 pctx_size);
3487 goto out;
3488 }
3489
3490 /*
3491 * From the Gunit register HAS:
3492 * The Gfx driver is expected to program this register and ensure
3493 * proper allocation within Gfx stolen memory. For example, this
3494 * register should be programmed such than the PCBR range does not
3495 * overlap with other ranges, such as the frame buffer, protected
3496 * memory, or any other relevant ranges.
3497 */
3498 pctx = i915_gem_object_create_stolen(dev, pctx_size);
3499 if (!pctx) {
3500 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3501 return;
3502 }
3503
3504 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
3505 I915_WRITE(VLV_PCBR, pctx_paddr);
3506
3507out:
3508 dev_priv->vlv_pctx = pctx;
3509}
3510
3511static void valleyview_enable_rps(struct drm_device *dev)
3512{
3513 struct drm_i915_private *dev_priv = dev->dev_private;
3514 struct intel_ring_buffer *ring;
3515 u32 gtfifodbg, val;
3516 int i;
3517
3518 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3519
3520 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3521 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3522 I915_WRITE(GTFIFODBG, gtfifodbg);
3523 }
3524
3525 valleyview_setup_pctx(dev);
3526
3527 gen6_gt_force_wake_get(dev_priv);
3528
3529 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3530 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3531 I915_WRITE(GEN6_RP_UP_EI, 66000);
3532 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3533
3534 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3535
3536 I915_WRITE(GEN6_RP_CONTROL,
3537 GEN6_RP_MEDIA_TURBO |
3538 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3539 GEN6_RP_MEDIA_IS_GFX |
3540 GEN6_RP_ENABLE |
3541 GEN6_RP_UP_BUSY_AVG |
3542 GEN6_RP_DOWN_IDLE_CONT);
3543
3544 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
3545 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3546 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3547
3548 for_each_ring(ring, dev_priv, i)
3549 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3550
3551 I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
3552
3553 /* allows RC6 residency counter to work */
3554 I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
3555 I915_WRITE(GEN6_RC_CONTROL,
3556 GEN7_RC_CTL_TO_MODE);
3557
3558 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3559 switch ((val >> 6) & 3) {
3560 case 0:
3561 case 1:
3562 dev_priv->mem_freq = 800;
3563 break;
3564 case 2:
3565 dev_priv->mem_freq = 1066;
3566 break;
3567 case 3:
3568 dev_priv->mem_freq = 1333;
3569 break;
3570 }
3571 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
3572
3573 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
3574 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3575
3576 dev_priv->rps.cur_delay = (val >> 8) & 0xff;
3577 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3578 vlv_gpu_freq(dev_priv->mem_freq,
3579 dev_priv->rps.cur_delay),
3580 dev_priv->rps.cur_delay);
3581
3582 dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
3583 dev_priv->rps.hw_max = dev_priv->rps.max_delay;
3584 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3585 vlv_gpu_freq(dev_priv->mem_freq,
3586 dev_priv->rps.max_delay),
3587 dev_priv->rps.max_delay);
3588
3589 dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
3590 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3591 vlv_gpu_freq(dev_priv->mem_freq,
3592 dev_priv->rps.rpe_delay),
3593 dev_priv->rps.rpe_delay);
3594
3595 dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
3596 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3597 vlv_gpu_freq(dev_priv->mem_freq,
3598 dev_priv->rps.min_delay),
3599 dev_priv->rps.min_delay);
3600
3601 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3602 vlv_gpu_freq(dev_priv->mem_freq,
3603 dev_priv->rps.rpe_delay),
3604 dev_priv->rps.rpe_delay);
3605
3606 INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
3607
3608 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3609
3610 /* requires MSI enabled */
3611 I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
3612 spin_lock_irq(&dev_priv->rps.lock);
3613 WARN_ON(dev_priv->rps.pm_iir != 0);
3614 I915_WRITE(GEN6_PMIMR, 0);
3615 spin_unlock_irq(&dev_priv->rps.lock);
3616 /* enable all PM interrupts */
3617 I915_WRITE(GEN6_PMINTRMSK, 0);
3618
3619 gen6_gt_force_wake_put(dev_priv);
3620}
3621
2745void ironlake_teardown_rc6(struct drm_device *dev) 3622void ironlake_teardown_rc6(struct drm_device *dev)
2746{ 3623{
2747 struct drm_i915_private *dev_priv = dev->dev_private; 3624 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3465,13 +4342,22 @@ void intel_disable_gt_powersave(struct drm_device *dev)
3465{ 4342{
3466 struct drm_i915_private *dev_priv = dev->dev_private; 4343 struct drm_i915_private *dev_priv = dev->dev_private;
3467 4344
4345 /* Interrupts should be disabled already to avoid re-arming. */
4346 WARN_ON(dev->irq_enabled);
4347
3468 if (IS_IRONLAKE_M(dev)) { 4348 if (IS_IRONLAKE_M(dev)) {
3469 ironlake_disable_drps(dev); 4349 ironlake_disable_drps(dev);
3470 ironlake_disable_rc6(dev); 4350 ironlake_disable_rc6(dev);
3471 } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) { 4351 } else if (INTEL_INFO(dev)->gen >= 6) {
3472 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); 4352 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4353 cancel_work_sync(&dev_priv->rps.work);
4354 if (IS_VALLEYVIEW(dev))
4355 cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
3473 mutex_lock(&dev_priv->rps.hw_lock); 4356 mutex_lock(&dev_priv->rps.hw_lock);
3474 gen6_disable_rps(dev); 4357 if (IS_VALLEYVIEW(dev))
4358 valleyview_disable_rps(dev);
4359 else
4360 gen6_disable_rps(dev);
3475 mutex_unlock(&dev_priv->rps.hw_lock); 4361 mutex_unlock(&dev_priv->rps.hw_lock);
3476 } 4362 }
3477} 4363}
@@ -3484,8 +4370,13 @@ static void intel_gen6_powersave_work(struct work_struct *work)
3484 struct drm_device *dev = dev_priv->dev; 4370 struct drm_device *dev = dev_priv->dev;
3485 4371
3486 mutex_lock(&dev_priv->rps.hw_lock); 4372 mutex_lock(&dev_priv->rps.hw_lock);
3487 gen6_enable_rps(dev); 4373
3488 gen6_update_ring_freq(dev); 4374 if (IS_VALLEYVIEW(dev)) {
4375 valleyview_enable_rps(dev);
4376 } else {
4377 gen6_enable_rps(dev);
4378 gen6_update_ring_freq(dev);
4379 }
3489 mutex_unlock(&dev_priv->rps.hw_lock); 4380 mutex_unlock(&dev_priv->rps.hw_lock);
3490} 4381}
3491 4382
@@ -3497,7 +4388,7 @@ void intel_enable_gt_powersave(struct drm_device *dev)
3497 ironlake_enable_drps(dev); 4388 ironlake_enable_drps(dev);
3498 ironlake_enable_rc6(dev); 4389 ironlake_enable_rc6(dev);
3499 intel_init_emon(dev); 4390 intel_init_emon(dev);
3500 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { 4391 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
3501 /* 4392 /*
3502 * PCU communication is slow and this doesn't need to be 4393 * PCU communication is slow and this doesn't need to be
3503 * done at any specific time, so do this out of our fast path 4394 * done at any specific time, so do this out of our fast path
@@ -3520,6 +4411,19 @@ static void ibx_init_clock_gating(struct drm_device *dev)
3520 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 4411 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3521} 4412}
3522 4413
4414static void g4x_disable_trickle_feed(struct drm_device *dev)
4415{
4416 struct drm_i915_private *dev_priv = dev->dev_private;
4417 int pipe;
4418
4419 for_each_pipe(pipe) {
4420 I915_WRITE(DSPCNTR(pipe),
4421 I915_READ(DSPCNTR(pipe)) |
4422 DISPPLANE_TRICKLE_FEED_DISABLE);
4423 intel_flush_display_plane(dev_priv, pipe);
4424 }
4425}
4426
3523static void ironlake_init_clock_gating(struct drm_device *dev) 4427static void ironlake_init_clock_gating(struct drm_device *dev)
3524{ 4428{
3525 struct drm_i915_private *dev_priv = dev->dev_private; 4429 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3579,10 +4483,12 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
3579 _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 4483 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
3580 _3D_CHICKEN2_WM_READ_PIPELINED); 4484 _3D_CHICKEN2_WM_READ_PIPELINED);
3581 4485
3582 /* WaDisableRenderCachePipelinedFlush */ 4486 /* WaDisableRenderCachePipelinedFlush:ilk */
3583 I915_WRITE(CACHE_MODE_0, 4487 I915_WRITE(CACHE_MODE_0,
3584 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 4488 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
3585 4489
4490 g4x_disable_trickle_feed(dev);
4491
3586 ibx_init_clock_gating(dev); 4492 ibx_init_clock_gating(dev);
3587} 4493}
3588 4494
@@ -3607,7 +4513,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
3607 val = I915_READ(TRANS_CHICKEN2(pipe)); 4513 val = I915_READ(TRANS_CHICKEN2(pipe));
3608 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 4514 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
3609 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 4515 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
3610 if (dev_priv->fdi_rx_polarity_inverted) 4516 if (dev_priv->vbt.fdi_rx_polarity_inverted)
3611 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 4517 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
3612 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 4518 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
3613 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; 4519 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
@@ -3637,7 +4543,6 @@ static void gen6_check_mch_setup(struct drm_device *dev)
3637static void gen6_init_clock_gating(struct drm_device *dev) 4543static void gen6_init_clock_gating(struct drm_device *dev)
3638{ 4544{
3639 struct drm_i915_private *dev_priv = dev->dev_private; 4545 struct drm_i915_private *dev_priv = dev->dev_private;
3640 int pipe;
3641 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 4546 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
3642 4547
3643 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 4548 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -3646,11 +4551,11 @@ static void gen6_init_clock_gating(struct drm_device *dev)
3646 I915_READ(ILK_DISPLAY_CHICKEN2) | 4551 I915_READ(ILK_DISPLAY_CHICKEN2) |
3647 ILK_ELPIN_409_SELECT); 4552 ILK_ELPIN_409_SELECT);
3648 4553
3649 /* WaDisableHiZPlanesWhenMSAAEnabled */ 4554 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
3650 I915_WRITE(_3D_CHICKEN, 4555 I915_WRITE(_3D_CHICKEN,
3651 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); 4556 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
3652 4557
3653 /* WaSetupGtModeTdRowDispatch */ 4558 /* WaSetupGtModeTdRowDispatch:snb */
3654 if (IS_SNB_GT1(dev)) 4559 if (IS_SNB_GT1(dev))
3655 I915_WRITE(GEN6_GT_MODE, 4560 I915_WRITE(GEN6_GT_MODE,
3656 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); 4561 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
@@ -3677,8 +4582,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
3677 * According to the spec, bit 11 (RCCUNIT) must also be set, 4582 * According to the spec, bit 11 (RCCUNIT) must also be set,
3678 * but we didn't debug actual testcases to find it out. 4583 * but we didn't debug actual testcases to find it out.
3679 * 4584 *
3680 * Also apply WaDisableVDSUnitClockGating and 4585 * Also apply WaDisableVDSUnitClockGating:snb and
3681 * WaDisableRCPBUnitClockGating. 4586 * WaDisableRCPBUnitClockGating:snb.
3682 */ 4587 */
3683 I915_WRITE(GEN6_UCGCTL2, 4588 I915_WRITE(GEN6_UCGCTL2,
3684 GEN7_VDSUNIT_CLOCK_GATE_DISABLE | 4589 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
@@ -3709,16 +4614,11 @@ static void gen6_init_clock_gating(struct drm_device *dev)
3709 ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 4614 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
3710 ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 4615 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
3711 4616
3712 /* WaMbcDriverBootEnable */ 4617 /* WaMbcDriverBootEnable:snb */
3713 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 4618 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3714 GEN6_MBCTL_ENABLE_BOOT_FETCH); 4619 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3715 4620
3716 for_each_pipe(pipe) { 4621 g4x_disable_trickle_feed(dev);
3717 I915_WRITE(DSPCNTR(pipe),
3718 I915_READ(DSPCNTR(pipe)) |
3719 DISPPLANE_TRICKLE_FEED_DISABLE);
3720 intel_flush_display_plane(dev_priv, pipe);
3721 }
3722 4622
3723 /* The default value should be 0x200 according to docs, but the two 4623 /* The default value should be 0x200 according to docs, but the two
3724 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ 4624 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
@@ -3739,7 +4639,6 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
3739 reg |= GEN7_FF_VS_SCHED_HW; 4639 reg |= GEN7_FF_VS_SCHED_HW;
3740 reg |= GEN7_FF_DS_SCHED_HW; 4640 reg |= GEN7_FF_DS_SCHED_HW;
3741 4641
3742 /* WaVSRefCountFullforceMissDisable */
3743 if (IS_HASWELL(dev_priv->dev)) 4642 if (IS_HASWELL(dev_priv->dev))
3744 reg &= ~GEN7_FF_VS_REF_CNT_FFME; 4643 reg &= ~GEN7_FF_VS_REF_CNT_FFME;
3745 4644
@@ -3758,65 +4657,72 @@ static void lpt_init_clock_gating(struct drm_device *dev)
3758 I915_WRITE(SOUTH_DSPCLK_GATE_D, 4657 I915_WRITE(SOUTH_DSPCLK_GATE_D,
3759 I915_READ(SOUTH_DSPCLK_GATE_D) | 4658 I915_READ(SOUTH_DSPCLK_GATE_D) |
3760 PCH_LP_PARTITION_LEVEL_DISABLE); 4659 PCH_LP_PARTITION_LEVEL_DISABLE);
4660
4661 /* WADPOClockGatingDisable:hsw */
4662 I915_WRITE(_TRANSA_CHICKEN1,
4663 I915_READ(_TRANSA_CHICKEN1) |
4664 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4665}
4666
4667static void lpt_suspend_hw(struct drm_device *dev)
4668{
4669 struct drm_i915_private *dev_priv = dev->dev_private;
4670
4671 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
4672 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
4673
4674 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4675 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4676 }
3761} 4677}
3762 4678
3763static void haswell_init_clock_gating(struct drm_device *dev) 4679static void haswell_init_clock_gating(struct drm_device *dev)
3764{ 4680{
3765 struct drm_i915_private *dev_priv = dev->dev_private; 4681 struct drm_i915_private *dev_priv = dev->dev_private;
3766 int pipe;
3767 4682
3768 I915_WRITE(WM3_LP_ILK, 0); 4683 I915_WRITE(WM3_LP_ILK, 0);
3769 I915_WRITE(WM2_LP_ILK, 0); 4684 I915_WRITE(WM2_LP_ILK, 0);
3770 I915_WRITE(WM1_LP_ILK, 0); 4685 I915_WRITE(WM1_LP_ILK, 0);
3771 4686
3772 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. 4687 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3773 * This implements the WaDisableRCZUnitClockGating workaround. 4688 * This implements the WaDisableRCZUnitClockGating:hsw workaround.
3774 */ 4689 */
3775 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 4690 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3776 4691
3777 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ 4692 /* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
3778 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 4693 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3779 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 4694 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3780 4695
3781 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ 4696 /* WaApplyL3ControlAndL3ChickenMode:hsw */
3782 I915_WRITE(GEN7_L3CNTLREG1, 4697 I915_WRITE(GEN7_L3CNTLREG1,
3783 GEN7_WA_FOR_GEN7_L3_CONTROL); 4698 GEN7_WA_FOR_GEN7_L3_CONTROL);
3784 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 4699 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3785 GEN7_WA_L3_CHICKEN_MODE); 4700 GEN7_WA_L3_CHICKEN_MODE);
3786 4701
3787 /* This is required by WaCatErrorRejectionIssue */ 4702 /* This is required by WaCatErrorRejectionIssue:hsw */
3788 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 4703 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3789 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 4704 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3790 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 4705 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3791 4706
3792 for_each_pipe(pipe) { 4707 g4x_disable_trickle_feed(dev);
3793 I915_WRITE(DSPCNTR(pipe),
3794 I915_READ(DSPCNTR(pipe)) |
3795 DISPPLANE_TRICKLE_FEED_DISABLE);
3796 intel_flush_display_plane(dev_priv, pipe);
3797 }
3798 4708
4709 /* WaVSRefCountFullforceMissDisable:hsw */
3799 gen7_setup_fixed_func_scheduler(dev_priv); 4710 gen7_setup_fixed_func_scheduler(dev_priv);
3800 4711
3801 /* WaDisable4x2SubspanOptimization */ 4712 /* WaDisable4x2SubspanOptimization:hsw */
3802 I915_WRITE(CACHE_MODE_1, 4713 I915_WRITE(CACHE_MODE_1,
3803 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 4714 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3804 4715
3805 /* WaMbcDriverBootEnable */ 4716 /* WaMbcDriverBootEnable:hsw */
3806 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 4717 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3807 GEN6_MBCTL_ENABLE_BOOT_FETCH); 4718 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3808 4719
3809 /* WaSwitchSolVfFArbitrationPriority */ 4720 /* WaSwitchSolVfFArbitrationPriority:hsw */
3810 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 4721 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
3811 4722
3812 /* XXX: This is a workaround for early silicon revisions and should be 4723 /* WaRsPkgCStateDisplayPMReq:hsw */
3813 * removed later. 4724 I915_WRITE(CHICKEN_PAR1_1,
3814 */ 4725 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
3815 I915_WRITE(WM_DBG,
3816 I915_READ(WM_DBG) |
3817 WM_DBG_DISALLOW_MULTIPLE_LP |
3818 WM_DBG_DISALLOW_SPRITE |
3819 WM_DBG_DISALLOW_MAXFIFO);
3820 4726
3821 lpt_init_clock_gating(dev); 4727 lpt_init_clock_gating(dev);
3822} 4728}
@@ -3824,7 +4730,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
3824static void ivybridge_init_clock_gating(struct drm_device *dev) 4730static void ivybridge_init_clock_gating(struct drm_device *dev)
3825{ 4731{
3826 struct drm_i915_private *dev_priv = dev->dev_private; 4732 struct drm_i915_private *dev_priv = dev->dev_private;
3827 int pipe;
3828 uint32_t snpcr; 4733 uint32_t snpcr;
3829 4734
3830 I915_WRITE(WM3_LP_ILK, 0); 4735 I915_WRITE(WM3_LP_ILK, 0);
@@ -3833,16 +4738,16 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
3833 4738
3834 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 4739 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
3835 4740
3836 /* WaDisableEarlyCull */ 4741 /* WaDisableEarlyCull:ivb */
3837 I915_WRITE(_3D_CHICKEN3, 4742 I915_WRITE(_3D_CHICKEN3,
3838 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); 4743 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
3839 4744
3840 /* WaDisableBackToBackFlipFix */ 4745 /* WaDisableBackToBackFlipFix:ivb */
3841 I915_WRITE(IVB_CHICKEN3, 4746 I915_WRITE(IVB_CHICKEN3,
3842 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 4747 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3843 CHICKEN3_DGMG_DONE_FIX_DISABLE); 4748 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3844 4749
3845 /* WaDisablePSDDualDispatchEnable */ 4750 /* WaDisablePSDDualDispatchEnable:ivb */
3846 if (IS_IVB_GT1(dev)) 4751 if (IS_IVB_GT1(dev))
3847 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 4752 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
3848 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 4753 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
@@ -3850,11 +4755,11 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
3850 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2, 4755 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
3851 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 4756 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3852 4757
3853 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ 4758 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
3854 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 4759 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3855 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 4760 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3856 4761
3857 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ 4762 /* WaApplyL3ControlAndL3ChickenMode:ivb */
3858 I915_WRITE(GEN7_L3CNTLREG1, 4763 I915_WRITE(GEN7_L3CNTLREG1,
3859 GEN7_WA_FOR_GEN7_L3_CONTROL); 4764 GEN7_WA_FOR_GEN7_L3_CONTROL);
3860 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 4765 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
@@ -3867,7 +4772,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
3867 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 4772 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
3868 4773
3869 4774
3870 /* WaForceL3Serialization */ 4775 /* WaForceL3Serialization:ivb */
3871 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 4776 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3872 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 4777 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3873 4778
@@ -3882,31 +4787,27 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
3882 * but we didn't debug actual testcases to find it out. 4787 * but we didn't debug actual testcases to find it out.
3883 * 4788 *
3884 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 4789 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3885 * This implements the WaDisableRCZUnitClockGating workaround. 4790 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
3886 */ 4791 */
3887 I915_WRITE(GEN6_UCGCTL2, 4792 I915_WRITE(GEN6_UCGCTL2,
3888 GEN6_RCZUNIT_CLOCK_GATE_DISABLE | 4793 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
3889 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 4794 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3890 4795
3891 /* This is required by WaCatErrorRejectionIssue */ 4796 /* This is required by WaCatErrorRejectionIssue:ivb */
3892 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 4797 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3893 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 4798 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3894 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 4799 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3895 4800
3896 for_each_pipe(pipe) { 4801 g4x_disable_trickle_feed(dev);
3897 I915_WRITE(DSPCNTR(pipe),
3898 I915_READ(DSPCNTR(pipe)) |
3899 DISPPLANE_TRICKLE_FEED_DISABLE);
3900 intel_flush_display_plane(dev_priv, pipe);
3901 }
3902 4802
3903 /* WaMbcDriverBootEnable */ 4803 /* WaMbcDriverBootEnable:ivb */
3904 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 4804 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3905 GEN6_MBCTL_ENABLE_BOOT_FETCH); 4805 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3906 4806
4807 /* WaVSRefCountFullforceMissDisable:ivb */
3907 gen7_setup_fixed_func_scheduler(dev_priv); 4808 gen7_setup_fixed_func_scheduler(dev_priv);
3908 4809
3909 /* WaDisable4x2SubspanOptimization */ 4810 /* WaDisable4x2SubspanOptimization:ivb */
3910 I915_WRITE(CACHE_MODE_1, 4811 I915_WRITE(CACHE_MODE_1,
3911 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 4812 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3912 4813
@@ -3924,54 +4825,45 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
3924static void valleyview_init_clock_gating(struct drm_device *dev) 4825static void valleyview_init_clock_gating(struct drm_device *dev)
3925{ 4826{
3926 struct drm_i915_private *dev_priv = dev->dev_private; 4827 struct drm_i915_private *dev_priv = dev->dev_private;
3927 int pipe;
3928
3929 I915_WRITE(WM3_LP_ILK, 0);
3930 I915_WRITE(WM2_LP_ILK, 0);
3931 I915_WRITE(WM1_LP_ILK, 0);
3932 4828
3933 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 4829 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
3934 4830
3935 /* WaDisableEarlyCull */ 4831 /* WaDisableEarlyCull:vlv */
3936 I915_WRITE(_3D_CHICKEN3, 4832 I915_WRITE(_3D_CHICKEN3,
3937 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); 4833 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
3938 4834
3939 /* WaDisableBackToBackFlipFix */ 4835 /* WaDisableBackToBackFlipFix:vlv */
3940 I915_WRITE(IVB_CHICKEN3, 4836 I915_WRITE(IVB_CHICKEN3,
3941 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 4837 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3942 CHICKEN3_DGMG_DONE_FIX_DISABLE); 4838 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3943 4839
3944 /* WaDisablePSDDualDispatchEnable */ 4840 /* WaDisablePSDDualDispatchEnable:vlv */
3945 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 4841 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
3946 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | 4842 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
3947 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 4843 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3948 4844
3949 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ 4845 /* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
3950 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 4846 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3951 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 4847 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3952 4848
3953 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ 4849 /* WaApplyL3ControlAndL3ChickenMode:vlv */
3954 I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS); 4850 I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
3955 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); 4851 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
3956 4852
3957 /* WaForceL3Serialization */ 4853 /* WaForceL3Serialization:vlv */
3958 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 4854 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3959 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 4855 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3960 4856
3961 /* WaDisableDopClockGating */ 4857 /* WaDisableDopClockGating:vlv */
3962 I915_WRITE(GEN7_ROW_CHICKEN2, 4858 I915_WRITE(GEN7_ROW_CHICKEN2,
3963 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 4859 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
3964 4860
3965 /* WaForceL3Serialization */ 4861 /* This is required by WaCatErrorRejectionIssue:vlv */
3966 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3967 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3968
3969 /* This is required by WaCatErrorRejectionIssue */
3970 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 4862 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3971 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 4863 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3972 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 4864 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3973 4865
3974 /* WaMbcDriverBootEnable */ 4866 /* WaMbcDriverBootEnable:vlv */
3975 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 4867 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3976 GEN6_MBCTL_ENABLE_BOOT_FETCH); 4868 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3977 4869
@@ -3987,10 +4879,10 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
3987 * but we didn't debug actual testcases to find it out. 4879 * but we didn't debug actual testcases to find it out.
3988 * 4880 *
3989 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 4881 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3990 * This implements the WaDisableRCZUnitClockGating workaround. 4882 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
3991 * 4883 *
3992 * Also apply WaDisableVDSUnitClockGating and 4884 * Also apply WaDisableVDSUnitClockGating:vlv and
3993 * WaDisableRCPBUnitClockGating. 4885 * WaDisableRCPBUnitClockGating:vlv.
3994 */ 4886 */
3995 I915_WRITE(GEN6_UCGCTL2, 4887 I915_WRITE(GEN6_UCGCTL2,
3996 GEN7_VDSUNIT_CLOCK_GATE_DISABLE | 4888 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
@@ -4001,18 +4893,13 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
4001 4893
4002 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 4894 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
4003 4895
4004 for_each_pipe(pipe) { 4896 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
4005 I915_WRITE(DSPCNTR(pipe),
4006 I915_READ(DSPCNTR(pipe)) |
4007 DISPPLANE_TRICKLE_FEED_DISABLE);
4008 intel_flush_display_plane(dev_priv, pipe);
4009 }
4010 4897
4011 I915_WRITE(CACHE_MODE_1, 4898 I915_WRITE(CACHE_MODE_1,
4012 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 4899 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4013 4900
4014 /* 4901 /*
4015 * WaDisableVLVClockGating_VBIIssue 4902 * WaDisableVLVClockGating_VBIIssue:vlv
4016 * Disable clock gating on th GCFG unit to prevent a delay 4903 * Disable clock gating on th GCFG unit to prevent a delay
4017 * in the reporting of vblank events. 4904 * in the reporting of vblank events.
4018 */ 4905 */
@@ -4048,6 +4935,8 @@ static void g4x_init_clock_gating(struct drm_device *dev)
4048 /* WaDisableRenderCachePipelinedFlush */ 4935 /* WaDisableRenderCachePipelinedFlush */
4049 I915_WRITE(CACHE_MODE_0, 4936 I915_WRITE(CACHE_MODE_0,
4050 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 4937 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
4938
4939 g4x_disable_trickle_feed(dev);
4051} 4940}
4052 4941
4053static void crestline_init_clock_gating(struct drm_device *dev) 4942static void crestline_init_clock_gating(struct drm_device *dev)
@@ -4059,6 +4948,8 @@ static void crestline_init_clock_gating(struct drm_device *dev)
4059 I915_WRITE(DSPCLK_GATE_D, 0); 4948 I915_WRITE(DSPCLK_GATE_D, 0);
4060 I915_WRITE(RAMCLK_GATE_D, 0); 4949 I915_WRITE(RAMCLK_GATE_D, 0);
4061 I915_WRITE16(DEUC, 0); 4950 I915_WRITE16(DEUC, 0);
4951 I915_WRITE(MI_ARB_STATE,
4952 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
4062} 4953}
4063 4954
4064static void broadwater_init_clock_gating(struct drm_device *dev) 4955static void broadwater_init_clock_gating(struct drm_device *dev)
@@ -4071,6 +4962,8 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
4071 I965_ISC_CLOCK_GATE_DISABLE | 4962 I965_ISC_CLOCK_GATE_DISABLE |
4072 I965_FBC_CLOCK_GATE_DISABLE); 4963 I965_FBC_CLOCK_GATE_DISABLE);
4073 I915_WRITE(RENCLK_GATE_D2, 0); 4964 I915_WRITE(RENCLK_GATE_D2, 0);
4965 I915_WRITE(MI_ARB_STATE,
4966 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
4074} 4967}
4075 4968
4076static void gen3_init_clock_gating(struct drm_device *dev) 4969static void gen3_init_clock_gating(struct drm_device *dev)
@@ -4110,34 +5003,50 @@ void intel_init_clock_gating(struct drm_device *dev)
4110 dev_priv->display.init_clock_gating(dev); 5003 dev_priv->display.init_clock_gating(dev);
4111} 5004}
4112 5005
5006void intel_suspend_hw(struct drm_device *dev)
5007{
5008 if (HAS_PCH_LPT(dev))
5009 lpt_suspend_hw(dev);
5010}
5011
4113/** 5012/**
4114 * We should only use the power well if we explicitly asked the hardware to 5013 * We should only use the power well if we explicitly asked the hardware to
4115 * enable it, so check if it's enabled and also check if we've requested it to 5014 * enable it, so check if it's enabled and also check if we've requested it to
4116 * be enabled. 5015 * be enabled.
4117 */ 5016 */
4118bool intel_using_power_well(struct drm_device *dev) 5017bool intel_display_power_enabled(struct drm_device *dev,
5018 enum intel_display_power_domain domain)
4119{ 5019{
4120 struct drm_i915_private *dev_priv = dev->dev_private; 5020 struct drm_i915_private *dev_priv = dev->dev_private;
4121 5021
4122 if (IS_HASWELL(dev)) 5022 if (!HAS_POWER_WELL(dev))
5023 return true;
5024
5025 switch (domain) {
5026 case POWER_DOMAIN_PIPE_A:
5027 case POWER_DOMAIN_TRANSCODER_EDP:
5028 return true;
5029 case POWER_DOMAIN_PIPE_B:
5030 case POWER_DOMAIN_PIPE_C:
5031 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5032 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5033 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5034 case POWER_DOMAIN_TRANSCODER_A:
5035 case POWER_DOMAIN_TRANSCODER_B:
5036 case POWER_DOMAIN_TRANSCODER_C:
4123 return I915_READ(HSW_PWR_WELL_DRIVER) == 5037 return I915_READ(HSW_PWR_WELL_DRIVER) ==
4124 (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE); 5038 (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
4125 else 5039 default:
4126 return true; 5040 BUG();
5041 }
4127} 5042}
4128 5043
4129void intel_set_power_well(struct drm_device *dev, bool enable) 5044static void __intel_set_power_well(struct drm_device *dev, bool enable)
4130{ 5045{
4131 struct drm_i915_private *dev_priv = dev->dev_private; 5046 struct drm_i915_private *dev_priv = dev->dev_private;
4132 bool is_enabled, enable_requested; 5047 bool is_enabled, enable_requested;
4133 uint32_t tmp; 5048 uint32_t tmp;
4134 5049
4135 if (!HAS_POWER_WELL(dev))
4136 return;
4137
4138 if (!i915_disable_power_well && !enable)
4139 return;
4140
4141 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 5050 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
4142 is_enabled = tmp & HSW_PWR_WELL_STATE; 5051 is_enabled = tmp & HSW_PWR_WELL_STATE;
4143 enable_requested = tmp & HSW_PWR_WELL_ENABLE; 5052 enable_requested = tmp & HSW_PWR_WELL_ENABLE;
@@ -4160,6 +5069,79 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
4160 } 5069 }
4161} 5070}
4162 5071
5072static struct i915_power_well *hsw_pwr;
5073
5074/* Display audio driver power well request */
5075void i915_request_power_well(void)
5076{
5077 if (WARN_ON(!hsw_pwr))
5078 return;
5079
5080 spin_lock_irq(&hsw_pwr->lock);
5081 if (!hsw_pwr->count++ &&
5082 !hsw_pwr->i915_request)
5083 __intel_set_power_well(hsw_pwr->device, true);
5084 spin_unlock_irq(&hsw_pwr->lock);
5085}
5086EXPORT_SYMBOL_GPL(i915_request_power_well);
5087
5088/* Display audio driver power well release */
5089void i915_release_power_well(void)
5090{
5091 if (WARN_ON(!hsw_pwr))
5092 return;
5093
5094 spin_lock_irq(&hsw_pwr->lock);
5095 WARN_ON(!hsw_pwr->count);
5096 if (!--hsw_pwr->count &&
5097 !hsw_pwr->i915_request)
5098 __intel_set_power_well(hsw_pwr->device, false);
5099 spin_unlock_irq(&hsw_pwr->lock);
5100}
5101EXPORT_SYMBOL_GPL(i915_release_power_well);
5102
5103int i915_init_power_well(struct drm_device *dev)
5104{
5105 struct drm_i915_private *dev_priv = dev->dev_private;
5106
5107 hsw_pwr = &dev_priv->power_well;
5108
5109 hsw_pwr->device = dev;
5110 spin_lock_init(&hsw_pwr->lock);
5111 hsw_pwr->count = 0;
5112
5113 return 0;
5114}
5115
5116void i915_remove_power_well(struct drm_device *dev)
5117{
5118 hsw_pwr = NULL;
5119}
5120
5121void intel_set_power_well(struct drm_device *dev, bool enable)
5122{
5123 struct drm_i915_private *dev_priv = dev->dev_private;
5124 struct i915_power_well *power_well = &dev_priv->power_well;
5125
5126 if (!HAS_POWER_WELL(dev))
5127 return;
5128
5129 if (!i915_disable_power_well && !enable)
5130 return;
5131
5132 spin_lock_irq(&power_well->lock);
5133 power_well->i915_request = enable;
5134
5135 /* only reject "disable" power well request */
5136 if (power_well->count && !enable) {
5137 spin_unlock_irq(&power_well->lock);
5138 return;
5139 }
5140
5141 __intel_set_power_well(dev, enable);
5142 spin_unlock_irq(&power_well->lock);
5143}
5144
4163/* 5145/*
4164 * Starting with Haswell, we have a "Power Down Well" that can be turned off 5146 * Starting with Haswell, we have a "Power Down Well" that can be turned off
4165 * when not needed anymore. We have 4 registers that can request the power well 5147 * when not needed anymore. We have 4 registers that can request the power well
@@ -4190,7 +5172,12 @@ void intel_init_pm(struct drm_device *dev)
4190 if (I915_HAS_FBC(dev)) { 5172 if (I915_HAS_FBC(dev)) {
4191 if (HAS_PCH_SPLIT(dev)) { 5173 if (HAS_PCH_SPLIT(dev)) {
4192 dev_priv->display.fbc_enabled = ironlake_fbc_enabled; 5174 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
4193 dev_priv->display.enable_fbc = ironlake_enable_fbc; 5175 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
5176 dev_priv->display.enable_fbc =
5177 gen7_enable_fbc;
5178 else
5179 dev_priv->display.enable_fbc =
5180 ironlake_enable_fbc;
4194 dev_priv->display.disable_fbc = ironlake_disable_fbc; 5181 dev_priv->display.disable_fbc = ironlake_disable_fbc;
4195 } else if (IS_GM45(dev)) { 5182 } else if (IS_GM45(dev)) {
4196 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 5183 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
@@ -4242,10 +5229,10 @@ void intel_init_pm(struct drm_device *dev)
4242 } 5229 }
4243 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 5230 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
4244 } else if (IS_HASWELL(dev)) { 5231 } else if (IS_HASWELL(dev)) {
4245 if (SNB_READ_WM0_LATENCY()) { 5232 if (I915_READ64(MCH_SSKPD)) {
4246 dev_priv->display.update_wm = sandybridge_update_wm; 5233 dev_priv->display.update_wm = haswell_update_wm;
4247 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 5234 dev_priv->display.update_sprite_wm =
4248 dev_priv->display.update_linetime_wm = haswell_update_linetime_wm; 5235 haswell_update_sprite_wm;
4249 } else { 5236 } else {
4250 DRM_DEBUG_KMS("Failed to read display plane latency. " 5237 DRM_DEBUG_KMS("Failed to read display plane latency. "
4251 "Disable CxSR\n"); 5238 "Disable CxSR\n");
@@ -4340,6 +5327,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4340 FORCEWAKE_ACK_TIMEOUT_MS)) 5327 FORCEWAKE_ACK_TIMEOUT_MS))
4341 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 5328 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4342 5329
5330 /* WaRsForcewakeWaitTC0:snb */
4343 __gen6_gt_wait_for_thread_c0(dev_priv); 5331 __gen6_gt_wait_for_thread_c0(dev_priv);
4344} 5332}
4345 5333
@@ -4371,6 +5359,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
4371 FORCEWAKE_ACK_TIMEOUT_MS)) 5359 FORCEWAKE_ACK_TIMEOUT_MS))
4372 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 5360 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4373 5361
5362 /* WaRsForcewakeWaitTC0:ivb,hsw */
4374 __gen6_gt_wait_for_thread_c0(dev_priv); 5363 __gen6_gt_wait_for_thread_c0(dev_priv);
4375} 5364}
4376 5365
@@ -4474,6 +5463,7 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4474 FORCEWAKE_ACK_TIMEOUT_MS)) 5463 FORCEWAKE_ACK_TIMEOUT_MS))
4475 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n"); 5464 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
4476 5465
5466 /* WaRsForcewakeWaitTC0:vlv */
4477 __gen6_gt_wait_for_thread_c0(dev_priv); 5467 __gen6_gt_wait_for_thread_c0(dev_priv);
4478} 5468}
4479 5469
@@ -4568,55 +5558,58 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
4568 return 0; 5558 return 0;
4569} 5559}
4570 5560
4571static int vlv_punit_rw(struct drm_i915_private *dev_priv, u8 opcode, 5561int vlv_gpu_freq(int ddr_freq, int val)
4572 u8 addr, u32 *val)
4573{ 5562{
4574 u32 cmd, devfn, port, be, bar; 5563 int mult, base;
4575
4576 bar = 0;
4577 be = 0xf;
4578 port = IOSF_PORT_PUNIT;
4579 devfn = PCI_DEVFN(2, 0);
4580 5564
4581 cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) | 5565 switch (ddr_freq) {
4582 (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) | 5566 case 800:
4583 (bar << IOSF_BAR_SHIFT); 5567 mult = 20;
4584 5568 base = 120;
4585 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5569 break;
4586 5570 case 1066:
4587 if (I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) { 5571 mult = 22;
4588 DRM_DEBUG_DRIVER("warning: pcode (%s) mailbox access failed\n", 5572 base = 133;
4589 opcode == PUNIT_OPCODE_REG_READ ? 5573 break;
4590 "read" : "write"); 5574 case 1333:
4591 return -EAGAIN; 5575 mult = 21;
5576 base = 125;
5577 break;
5578 default:
5579 return -1;
4592 } 5580 }
4593 5581
4594 I915_WRITE(VLV_IOSF_ADDR, addr); 5582 return ((val - 0xbd) * mult) + base;
4595 if (opcode == PUNIT_OPCODE_REG_WRITE) 5583}
4596 I915_WRITE(VLV_IOSF_DATA, *val);
4597 I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
4598 5584
4599 if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5585int vlv_freq_opcode(int ddr_freq, int val)
4600 500)) { 5586{
4601 DRM_ERROR("timeout waiting for pcode %s (%d) to finish\n", 5587 int mult, base;
4602 opcode == PUNIT_OPCODE_REG_READ ? "read" : "write", 5588
4603 addr); 5589 switch (ddr_freq) {
4604 return -ETIMEDOUT; 5590 case 800:
5591 mult = 20;
5592 base = 120;
5593 break;
5594 case 1066:
5595 mult = 22;
5596 base = 133;
5597 break;
5598 case 1333:
5599 mult = 21;
5600 base = 125;
5601 break;
5602 default:
5603 return -1;
4605 } 5604 }
4606 5605
4607 if (opcode == PUNIT_OPCODE_REG_READ) 5606 val /= mult;
4608 *val = I915_READ(VLV_IOSF_DATA); 5607 val -= base / mult;
4609 I915_WRITE(VLV_IOSF_DATA, 0); 5608 val += 0xbd;
4610 5609
4611 return 0; 5610 if (val > 0xea)
4612} 5611 val = 0xea;
4613 5612
4614int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val) 5613 return val;
4615{
4616 return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_READ, addr, val);
4617} 5614}
4618 5615
4619int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
4620{
4621 return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_WRITE, addr, &val);
4622}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1d5d613eb6be..e51ab552046c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -280,6 +280,27 @@ gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
280 return 0; 280 return 0;
281} 281}
282 282
283static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
284{
285 int ret;
286
287 if (!ring->fbc_dirty)
288 return 0;
289
290 ret = intel_ring_begin(ring, 4);
291 if (ret)
292 return ret;
293 intel_ring_emit(ring, MI_NOOP);
294 /* WaFbcNukeOn3DBlt:ivb/hsw */
295 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
296 intel_ring_emit(ring, MSG_FBC_REND_STATE);
297 intel_ring_emit(ring, value);
298 intel_ring_advance(ring);
299
300 ring->fbc_dirty = false;
301 return 0;
302}
303
283static int 304static int
284gen7_render_ring_flush(struct intel_ring_buffer *ring, 305gen7_render_ring_flush(struct intel_ring_buffer *ring,
285 u32 invalidate_domains, u32 flush_domains) 306 u32 invalidate_domains, u32 flush_domains)
@@ -336,6 +357,9 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
336 intel_ring_emit(ring, 0); 357 intel_ring_emit(ring, 0);
337 intel_ring_advance(ring); 358 intel_ring_advance(ring);
338 359
360 if (flush_domains)
361 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
362
339 return 0; 363 return 0;
340} 364}
341 365
@@ -429,6 +453,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
429 ring->last_retired_head = -1; 453 ring->last_retired_head = -1;
430 } 454 }
431 455
456 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
457
432out: 458out:
433 if (HAS_FORCE_WAKE(dev)) 459 if (HAS_FORCE_WAKE(dev))
434 gen6_gt_force_wake_put(dev_priv); 460 gen6_gt_force_wake_put(dev_priv);
@@ -464,9 +490,11 @@ init_pipe_control(struct intel_ring_buffer *ring)
464 goto err_unref; 490 goto err_unref;
465 491
466 pc->gtt_offset = obj->gtt_offset; 492 pc->gtt_offset = obj->gtt_offset;
467 pc->cpu_page = kmap(sg_page(obj->pages->sgl)); 493 pc->cpu_page = kmap(sg_page(obj->pages->sgl));
468 if (pc->cpu_page == NULL) 494 if (pc->cpu_page == NULL) {
495 ret = -ENOMEM;
469 goto err_unpin; 496 goto err_unpin;
497 }
470 498
471 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 499 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
472 ring->name, pc->gtt_offset); 500 ring->name, pc->gtt_offset);
@@ -515,6 +543,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
515 /* We need to disable the AsyncFlip performance optimisations in order 543 /* We need to disable the AsyncFlip performance optimisations in order
516 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 544 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
517 * programmed to '1' on all products. 545 * programmed to '1' on all products.
546 *
547 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
518 */ 548 */
519 if (INTEL_INFO(dev)->gen >= 6) 549 if (INTEL_INFO(dev)->gen >= 6)
520 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 550 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
@@ -556,7 +586,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
556 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 586 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
557 587
558 if (HAS_L3_GPU_CACHE(dev)) 588 if (HAS_L3_GPU_CACHE(dev))
559 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); 589 I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
560 590
561 return ret; 591 return ret;
562} 592}
@@ -578,9 +608,16 @@ static void
578update_mboxes(struct intel_ring_buffer *ring, 608update_mboxes(struct intel_ring_buffer *ring,
579 u32 mmio_offset) 609 u32 mmio_offset)
580{ 610{
611/* NB: In order to be able to do semaphore MBOX updates for varying number
612 * of rings, it's easiest if we round up each individual update to a
613 * multiple of 2 (since ring updates must always be a multiple of 2)
614 * even though the actual update only requires 3 dwords.
615 */
616#define MBOX_UPDATE_DWORDS 4
581 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 617 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
582 intel_ring_emit(ring, mmio_offset); 618 intel_ring_emit(ring, mmio_offset);
583 intel_ring_emit(ring, ring->outstanding_lazy_request); 619 intel_ring_emit(ring, ring->outstanding_lazy_request);
620 intel_ring_emit(ring, MI_NOOP);
584} 621}
585 622
586/** 623/**
@@ -595,19 +632,24 @@ update_mboxes(struct intel_ring_buffer *ring,
595static int 632static int
596gen6_add_request(struct intel_ring_buffer *ring) 633gen6_add_request(struct intel_ring_buffer *ring)
597{ 634{
598 u32 mbox1_reg; 635 struct drm_device *dev = ring->dev;
599 u32 mbox2_reg; 636 struct drm_i915_private *dev_priv = dev->dev_private;
600 int ret; 637 struct intel_ring_buffer *useless;
638 int i, ret;
601 639
602 ret = intel_ring_begin(ring, 10); 640 ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
641 MBOX_UPDATE_DWORDS) +
642 4);
603 if (ret) 643 if (ret)
604 return ret; 644 return ret;
645#undef MBOX_UPDATE_DWORDS
605 646
606 mbox1_reg = ring->signal_mbox[0]; 647 for_each_ring(useless, dev_priv, i) {
607 mbox2_reg = ring->signal_mbox[1]; 648 u32 mbox_reg = ring->signal_mbox[i];
649 if (mbox_reg != GEN6_NOSYNC)
650 update_mboxes(ring, mbox_reg);
651 }
608 652
609 update_mboxes(ring, mbox1_reg);
610 update_mboxes(ring, mbox2_reg);
611 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 653 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
612 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 654 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
613 intel_ring_emit(ring, ring->outstanding_lazy_request); 655 intel_ring_emit(ring, ring->outstanding_lazy_request);
@@ -779,7 +821,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
779 return false; 821 return false;
780 822
781 spin_lock_irqsave(&dev_priv->irq_lock, flags); 823 spin_lock_irqsave(&dev_priv->irq_lock, flags);
782 if (ring->irq_refcount++ == 0) { 824 if (ring->irq_refcount.gt++ == 0) {
783 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 825 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
784 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 826 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
785 POSTING_READ(GTIMR); 827 POSTING_READ(GTIMR);
@@ -797,7 +839,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
797 unsigned long flags; 839 unsigned long flags;
798 840
799 spin_lock_irqsave(&dev_priv->irq_lock, flags); 841 spin_lock_irqsave(&dev_priv->irq_lock, flags);
800 if (--ring->irq_refcount == 0) { 842 if (--ring->irq_refcount.gt == 0) {
801 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 843 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
802 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 844 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
803 POSTING_READ(GTIMR); 845 POSTING_READ(GTIMR);
@@ -816,7 +858,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
816 return false; 858 return false;
817 859
818 spin_lock_irqsave(&dev_priv->irq_lock, flags); 860 spin_lock_irqsave(&dev_priv->irq_lock, flags);
819 if (ring->irq_refcount++ == 0) { 861 if (ring->irq_refcount.gt++ == 0) {
820 dev_priv->irq_mask &= ~ring->irq_enable_mask; 862 dev_priv->irq_mask &= ~ring->irq_enable_mask;
821 I915_WRITE(IMR, dev_priv->irq_mask); 863 I915_WRITE(IMR, dev_priv->irq_mask);
822 POSTING_READ(IMR); 864 POSTING_READ(IMR);
@@ -834,7 +876,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
834 unsigned long flags; 876 unsigned long flags;
835 877
836 spin_lock_irqsave(&dev_priv->irq_lock, flags); 878 spin_lock_irqsave(&dev_priv->irq_lock, flags);
837 if (--ring->irq_refcount == 0) { 879 if (--ring->irq_refcount.gt == 0) {
838 dev_priv->irq_mask |= ring->irq_enable_mask; 880 dev_priv->irq_mask |= ring->irq_enable_mask;
839 I915_WRITE(IMR, dev_priv->irq_mask); 881 I915_WRITE(IMR, dev_priv->irq_mask);
840 POSTING_READ(IMR); 882 POSTING_READ(IMR);
@@ -853,7 +895,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
853 return false; 895 return false;
854 896
855 spin_lock_irqsave(&dev_priv->irq_lock, flags); 897 spin_lock_irqsave(&dev_priv->irq_lock, flags);
856 if (ring->irq_refcount++ == 0) { 898 if (ring->irq_refcount.gt++ == 0) {
857 dev_priv->irq_mask &= ~ring->irq_enable_mask; 899 dev_priv->irq_mask &= ~ring->irq_enable_mask;
858 I915_WRITE16(IMR, dev_priv->irq_mask); 900 I915_WRITE16(IMR, dev_priv->irq_mask);
859 POSTING_READ16(IMR); 901 POSTING_READ16(IMR);
@@ -871,7 +913,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
871 unsigned long flags; 913 unsigned long flags;
872 914
873 spin_lock_irqsave(&dev_priv->irq_lock, flags); 915 spin_lock_irqsave(&dev_priv->irq_lock, flags);
874 if (--ring->irq_refcount == 0) { 916 if (--ring->irq_refcount.gt == 0) {
875 dev_priv->irq_mask |= ring->irq_enable_mask; 917 dev_priv->irq_mask |= ring->irq_enable_mask;
876 I915_WRITE16(IMR, dev_priv->irq_mask); 918 I915_WRITE16(IMR, dev_priv->irq_mask);
877 POSTING_READ16(IMR); 919 POSTING_READ16(IMR);
@@ -899,6 +941,9 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
899 case VCS: 941 case VCS:
900 mmio = BSD_HWS_PGA_GEN7; 942 mmio = BSD_HWS_PGA_GEN7;
901 break; 943 break;
944 case VECS:
945 mmio = VEBOX_HWS_PGA_GEN7;
946 break;
902 } 947 }
903 } else if (IS_GEN6(ring->dev)) { 948 } else if (IS_GEN6(ring->dev)) {
904 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 949 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
@@ -961,10 +1006,11 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
961 gen6_gt_force_wake_get(dev_priv); 1006 gen6_gt_force_wake_get(dev_priv);
962 1007
963 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1008 spin_lock_irqsave(&dev_priv->irq_lock, flags);
964 if (ring->irq_refcount++ == 0) { 1009 if (ring->irq_refcount.gt++ == 0) {
965 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1010 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
966 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | 1011 I915_WRITE_IMR(ring,
967 GEN6_RENDER_L3_PARITY_ERROR)); 1012 ~(ring->irq_enable_mask |
1013 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
968 else 1014 else
969 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1015 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
970 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 1016 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
@@ -984,9 +1030,10 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
984 unsigned long flags; 1030 unsigned long flags;
985 1031
986 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1032 spin_lock_irqsave(&dev_priv->irq_lock, flags);
987 if (--ring->irq_refcount == 0) { 1033 if (--ring->irq_refcount.gt == 0) {
988 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1034 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
989 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); 1035 I915_WRITE_IMR(ring,
1036 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
990 else 1037 else
991 I915_WRITE_IMR(ring, ~0); 1038 I915_WRITE_IMR(ring, ~0);
992 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 1039 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
@@ -998,6 +1045,48 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
998 gen6_gt_force_wake_put(dev_priv); 1045 gen6_gt_force_wake_put(dev_priv);
999} 1046}
1000 1047
1048static bool
1049hsw_vebox_get_irq(struct intel_ring_buffer *ring)
1050{
1051 struct drm_device *dev = ring->dev;
1052 struct drm_i915_private *dev_priv = dev->dev_private;
1053 unsigned long flags;
1054
1055 if (!dev->irq_enabled)
1056 return false;
1057
1058 spin_lock_irqsave(&dev_priv->rps.lock, flags);
1059 if (ring->irq_refcount.pm++ == 0) {
1060 u32 pm_imr = I915_READ(GEN6_PMIMR);
1061 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1062 I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
1063 POSTING_READ(GEN6_PMIMR);
1064 }
1065 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
1066
1067 return true;
1068}
1069
1070static void
1071hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1072{
1073 struct drm_device *dev = ring->dev;
1074 struct drm_i915_private *dev_priv = dev->dev_private;
1075 unsigned long flags;
1076
1077 if (!dev->irq_enabled)
1078 return;
1079
1080 spin_lock_irqsave(&dev_priv->rps.lock, flags);
1081 if (--ring->irq_refcount.pm == 0) {
1082 u32 pm_imr = I915_READ(GEN6_PMIMR);
1083 I915_WRITE_IMR(ring, ~0);
1084 I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
1085 POSTING_READ(GEN6_PMIMR);
1086 }
1087 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
1088}
1089
1001static int 1090static int
1002i965_dispatch_execbuffer(struct intel_ring_buffer *ring, 1091i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1003 u32 offset, u32 length, 1092 u32 offset, u32 length,
@@ -1423,7 +1512,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
1423 1512
1424 /* We need to add any requests required to flush the objects and ring */ 1513 /* We need to add any requests required to flush the objects and ring */
1425 if (ring->outstanding_lazy_request) { 1514 if (ring->outstanding_lazy_request) {
1426 ret = i915_add_request(ring, NULL, NULL); 1515 ret = i915_add_request(ring, NULL);
1427 if (ret) 1516 if (ret)
1428 return ret; 1517 return ret;
1429 } 1518 }
@@ -1500,6 +1589,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1500 } 1589 }
1501 1590
1502 ring->set_seqno(ring, seqno); 1591 ring->set_seqno(ring, seqno);
1592 ring->hangcheck.seqno = seqno;
1503} 1593}
1504 1594
1505void intel_ring_advance(struct intel_ring_buffer *ring) 1595void intel_ring_advance(struct intel_ring_buffer *ring)
@@ -1546,8 +1636,8 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1546 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1636 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1547} 1637}
1548 1638
1549static int gen6_ring_flush(struct intel_ring_buffer *ring, 1639static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1550 u32 invalidate, u32 flush) 1640 u32 invalidate, u32 flush)
1551{ 1641{
1552 uint32_t cmd; 1642 uint32_t cmd;
1553 int ret; 1643 int ret;
@@ -1618,9 +1708,10 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1618 1708
1619/* Blitter support (SandyBridge+) */ 1709/* Blitter support (SandyBridge+) */
1620 1710
1621static int blt_ring_flush(struct intel_ring_buffer *ring, 1711static int gen6_ring_flush(struct intel_ring_buffer *ring,
1622 u32 invalidate, u32 flush) 1712 u32 invalidate, u32 flush)
1623{ 1713{
1714 struct drm_device *dev = ring->dev;
1624 uint32_t cmd; 1715 uint32_t cmd;
1625 int ret; 1716 int ret;
1626 1717
@@ -1643,6 +1734,10 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
1643 intel_ring_emit(ring, 0); 1734 intel_ring_emit(ring, 0);
1644 intel_ring_emit(ring, MI_NOOP); 1735 intel_ring_emit(ring, MI_NOOP);
1645 intel_ring_advance(ring); 1736 intel_ring_advance(ring);
1737
1738 if (IS_GEN7(dev) && flush)
1739 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
1740
1646 return 0; 1741 return 0;
1647} 1742}
1648 1743
@@ -1662,15 +1757,18 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1662 ring->flush = gen6_render_ring_flush; 1757 ring->flush = gen6_render_ring_flush;
1663 ring->irq_get = gen6_ring_get_irq; 1758 ring->irq_get = gen6_ring_get_irq;
1664 ring->irq_put = gen6_ring_put_irq; 1759 ring->irq_put = gen6_ring_put_irq;
1665 ring->irq_enable_mask = GT_USER_INTERRUPT; 1760 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1666 ring->get_seqno = gen6_ring_get_seqno; 1761 ring->get_seqno = gen6_ring_get_seqno;
1667 ring->set_seqno = ring_set_seqno; 1762 ring->set_seqno = ring_set_seqno;
1668 ring->sync_to = gen6_ring_sync; 1763 ring->sync_to = gen6_ring_sync;
1669 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; 1764 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
1670 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; 1765 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
1671 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; 1766 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
1672 ring->signal_mbox[0] = GEN6_VRSYNC; 1767 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
1673 ring->signal_mbox[1] = GEN6_BRSYNC; 1768 ring->signal_mbox[RCS] = GEN6_NOSYNC;
1769 ring->signal_mbox[VCS] = GEN6_VRSYNC;
1770 ring->signal_mbox[BCS] = GEN6_BRSYNC;
1771 ring->signal_mbox[VECS] = GEN6_VERSYNC;
1674 } else if (IS_GEN5(dev)) { 1772 } else if (IS_GEN5(dev)) {
1675 ring->add_request = pc_render_add_request; 1773 ring->add_request = pc_render_add_request;
1676 ring->flush = gen4_render_ring_flush; 1774 ring->flush = gen4_render_ring_flush;
@@ -1678,7 +1776,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1678 ring->set_seqno = pc_render_set_seqno; 1776 ring->set_seqno = pc_render_set_seqno;
1679 ring->irq_get = gen5_ring_get_irq; 1777 ring->irq_get = gen5_ring_get_irq;
1680 ring->irq_put = gen5_ring_put_irq; 1778 ring->irq_put = gen5_ring_put_irq;
1681 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; 1779 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
1780 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
1682 } else { 1781 } else {
1683 ring->add_request = i9xx_add_request; 1782 ring->add_request = i9xx_add_request;
1684 if (INTEL_INFO(dev)->gen < 4) 1783 if (INTEL_INFO(dev)->gen < 4)
@@ -1816,20 +1915,23 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1816 /* gen6 bsd needs a special wa for tail updates */ 1915 /* gen6 bsd needs a special wa for tail updates */
1817 if (IS_GEN6(dev)) 1916 if (IS_GEN6(dev))
1818 ring->write_tail = gen6_bsd_ring_write_tail; 1917 ring->write_tail = gen6_bsd_ring_write_tail;
1819 ring->flush = gen6_ring_flush; 1918 ring->flush = gen6_bsd_ring_flush;
1820 ring->add_request = gen6_add_request; 1919 ring->add_request = gen6_add_request;
1821 ring->get_seqno = gen6_ring_get_seqno; 1920 ring->get_seqno = gen6_ring_get_seqno;
1822 ring->set_seqno = ring_set_seqno; 1921 ring->set_seqno = ring_set_seqno;
1823 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; 1922 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1824 ring->irq_get = gen6_ring_get_irq; 1923 ring->irq_get = gen6_ring_get_irq;
1825 ring->irq_put = gen6_ring_put_irq; 1924 ring->irq_put = gen6_ring_put_irq;
1826 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1925 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1827 ring->sync_to = gen6_ring_sync; 1926 ring->sync_to = gen6_ring_sync;
1828 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR; 1927 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
1829 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID; 1928 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
1830 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB; 1929 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
1831 ring->signal_mbox[0] = GEN6_RVSYNC; 1930 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
1832 ring->signal_mbox[1] = GEN6_BVSYNC; 1931 ring->signal_mbox[RCS] = GEN6_RVSYNC;
1932 ring->signal_mbox[VCS] = GEN6_NOSYNC;
1933 ring->signal_mbox[BCS] = GEN6_BVSYNC;
1934 ring->signal_mbox[VECS] = GEN6_VEVSYNC;
1833 } else { 1935 } else {
1834 ring->mmio_base = BSD_RING_BASE; 1936 ring->mmio_base = BSD_RING_BASE;
1835 ring->flush = bsd_ring_flush; 1937 ring->flush = bsd_ring_flush;
@@ -1837,7 +1939,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1837 ring->get_seqno = ring_get_seqno; 1939 ring->get_seqno = ring_get_seqno;
1838 ring->set_seqno = ring_set_seqno; 1940 ring->set_seqno = ring_set_seqno;
1839 if (IS_GEN5(dev)) { 1941 if (IS_GEN5(dev)) {
1840 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1942 ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
1841 ring->irq_get = gen5_ring_get_irq; 1943 ring->irq_get = gen5_ring_get_irq;
1842 ring->irq_put = gen5_ring_put_irq; 1944 ring->irq_put = gen5_ring_put_irq;
1843 } else { 1945 } else {
@@ -1862,20 +1964,56 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1862 1964
1863 ring->mmio_base = BLT_RING_BASE; 1965 ring->mmio_base = BLT_RING_BASE;
1864 ring->write_tail = ring_write_tail; 1966 ring->write_tail = ring_write_tail;
1865 ring->flush = blt_ring_flush; 1967 ring->flush = gen6_ring_flush;
1866 ring->add_request = gen6_add_request; 1968 ring->add_request = gen6_add_request;
1867 ring->get_seqno = gen6_ring_get_seqno; 1969 ring->get_seqno = gen6_ring_get_seqno;
1868 ring->set_seqno = ring_set_seqno; 1970 ring->set_seqno = ring_set_seqno;
1869 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; 1971 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
1870 ring->irq_get = gen6_ring_get_irq; 1972 ring->irq_get = gen6_ring_get_irq;
1871 ring->irq_put = gen6_ring_put_irq; 1973 ring->irq_put = gen6_ring_put_irq;
1872 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1974 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1873 ring->sync_to = gen6_ring_sync; 1975 ring->sync_to = gen6_ring_sync;
1874 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR; 1976 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
1875 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV; 1977 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
1876 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID; 1978 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
1877 ring->signal_mbox[0] = GEN6_RBSYNC; 1979 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
1878 ring->signal_mbox[1] = GEN6_VBSYNC; 1980 ring->signal_mbox[RCS] = GEN6_RBSYNC;
1981 ring->signal_mbox[VCS] = GEN6_VBSYNC;
1982 ring->signal_mbox[BCS] = GEN6_NOSYNC;
1983 ring->signal_mbox[VECS] = GEN6_VEBSYNC;
1984 ring->init = init_ring_common;
1985
1986 return intel_init_ring_buffer(dev, ring);
1987}
1988
1989int intel_init_vebox_ring_buffer(struct drm_device *dev)
1990{
1991 drm_i915_private_t *dev_priv = dev->dev_private;
1992 struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
1993
1994 ring->name = "video enhancement ring";
1995 ring->id = VECS;
1996
1997 ring->mmio_base = VEBOX_RING_BASE;
1998 ring->write_tail = ring_write_tail;
1999 ring->flush = gen6_ring_flush;
2000 ring->add_request = gen6_add_request;
2001 ring->get_seqno = gen6_ring_get_seqno;
2002 ring->set_seqno = ring_set_seqno;
2003 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT |
2004 PM_VEBOX_CS_ERROR_INTERRUPT;
2005 ring->irq_get = hsw_vebox_get_irq;
2006 ring->irq_put = hsw_vebox_put_irq;
2007 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2008 ring->sync_to = gen6_ring_sync;
2009 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
2010 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
2011 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
2012 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2013 ring->signal_mbox[RCS] = GEN6_RVESYNC;
2014 ring->signal_mbox[VCS] = GEN6_VVESYNC;
2015 ring->signal_mbox[BCS] = GEN6_BVESYNC;
2016 ring->signal_mbox[VECS] = GEN6_NOSYNC;
1879 ring->init = init_ring_common; 2017 ring->init = init_ring_common;
1880 2018
1881 return intel_init_ring_buffer(dev, ring); 2019 return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index d66208c2c48b..799f04c9da45 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -37,14 +37,25 @@ struct intel_hw_status_page {
37#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) 37#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
38#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) 38#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
39 39
40enum intel_ring_hangcheck_action { wait, active, kick, hung };
41
42struct intel_ring_hangcheck {
43 bool deadlock;
44 u32 seqno;
45 u32 acthd;
46 int score;
47 enum intel_ring_hangcheck_action action;
48};
49
40struct intel_ring_buffer { 50struct intel_ring_buffer {
41 const char *name; 51 const char *name;
42 enum intel_ring_id { 52 enum intel_ring_id {
43 RCS = 0x0, 53 RCS = 0x0,
44 VCS, 54 VCS,
45 BCS, 55 BCS,
56 VECS,
46 } id; 57 } id;
47#define I915_NUM_RINGS 3 58#define I915_NUM_RINGS 4
48 u32 mmio_base; 59 u32 mmio_base;
49 void __iomem *virtual_start; 60 void __iomem *virtual_start;
50 struct drm_device *dev; 61 struct drm_device *dev;
@@ -67,7 +78,10 @@ struct intel_ring_buffer {
67 */ 78 */
68 u32 last_retired_head; 79 u32 last_retired_head;
69 80
70 u32 irq_refcount; /* protected by dev_priv->irq_lock */ 81 struct {
82 u32 gt; /* protected by dev_priv->irq_lock */
83 u32 pm; /* protected by dev_priv->rps.lock (sucks) */
84 } irq_refcount;
71 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 85 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
72 u32 trace_irq_seqno; 86 u32 trace_irq_seqno;
73 u32 sync_seqno[I915_NUM_RINGS-1]; 87 u32 sync_seqno[I915_NUM_RINGS-1];
@@ -102,8 +116,11 @@ struct intel_ring_buffer {
102 struct intel_ring_buffer *to, 116 struct intel_ring_buffer *to,
103 u32 seqno); 117 u32 seqno);
104 118
105 u32 semaphore_register[3]; /*our mbox written by others */ 119 /* our mbox written by others */
106 u32 signal_mbox[2]; /* mboxes this ring signals to */ 120 u32 semaphore_register[I915_NUM_RINGS];
121 /* mboxes this ring signals to */
122 u32 signal_mbox[I915_NUM_RINGS];
123
107 /** 124 /**
108 * List of objects currently involved in rendering from the 125 * List of objects currently involved in rendering from the
109 * ringbuffer. 126 * ringbuffer.
@@ -127,6 +144,7 @@ struct intel_ring_buffer {
127 */ 144 */
128 u32 outstanding_lazy_request; 145 u32 outstanding_lazy_request;
129 bool gpu_caches_dirty; 146 bool gpu_caches_dirty;
147 bool fbc_dirty;
130 148
131 wait_queue_head_t irq_queue; 149 wait_queue_head_t irq_queue;
132 150
@@ -135,7 +153,9 @@ struct intel_ring_buffer {
135 */ 153 */
136 bool itlb_before_ctx_switch; 154 bool itlb_before_ctx_switch;
137 struct i915_hw_context *default_context; 155 struct i915_hw_context *default_context;
138 struct drm_i915_gem_object *last_context_obj; 156 struct i915_hw_context *last_context;
157
158 struct intel_ring_hangcheck hangcheck;
139 159
140 void *private; 160 void *private;
141}; 161};
@@ -224,6 +244,7 @@ int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
224int intel_init_render_ring_buffer(struct drm_device *dev); 244int intel_init_render_ring_buffer(struct drm_device *dev);
225int intel_init_bsd_ring_buffer(struct drm_device *dev); 245int intel_init_bsd_ring_buffer(struct drm_device *dev);
226int intel_init_blt_ring_buffer(struct drm_device *dev); 246int intel_init_blt_ring_buffer(struct drm_device *dev);
247int intel_init_vebox_ring_buffer(struct drm_device *dev);
227 248
228u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 249u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
229void intel_ring_setup_status_page(struct intel_ring_buffer *ring); 250void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d4ea6c265ce1..2628d5622449 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -80,7 +80,7 @@ struct intel_sdvo {
80 80
81 /* 81 /*
82 * Capabilities of the SDVO device returned by 82 * Capabilities of the SDVO device returned by
83 * i830_sdvo_get_capabilities() 83 * intel_sdvo_get_capabilities()
84 */ 84 */
85 struct intel_sdvo_caps caps; 85 struct intel_sdvo_caps caps;
86 86
@@ -712,6 +712,13 @@ static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
712 intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2)); 712 intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
713} 713}
714 714
715static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
716 struct intel_sdvo_dtd *dtd)
717{
718 return intel_sdvo_get_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
719 intel_sdvo_get_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
720}
721
715static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo, 722static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
716 struct intel_sdvo_dtd *dtd) 723 struct intel_sdvo_dtd *dtd)
717{ 724{
@@ -726,6 +733,13 @@ static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
726 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); 733 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
727} 734}
728 735
736static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
737 struct intel_sdvo_dtd *dtd)
738{
739 return intel_sdvo_get_timing(intel_sdvo,
740 SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
741}
742
729static bool 743static bool
730intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, 744intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
731 uint16_t clock, 745 uint16_t clock,
@@ -1041,6 +1055,32 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
1041 return true; 1055 return true;
1042} 1056}
1043 1057
1058static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
1059{
1060 unsigned dotclock = pipe_config->adjusted_mode.clock;
1061 struct dpll *clock = &pipe_config->dpll;
1062
1063 /* SDVO TV has fixed PLL values depend on its clock range,
1064 this mirrors vbios setting. */
1065 if (dotclock >= 100000 && dotclock < 140500) {
1066 clock->p1 = 2;
1067 clock->p2 = 10;
1068 clock->n = 3;
1069 clock->m1 = 16;
1070 clock->m2 = 8;
1071 } else if (dotclock >= 140500 && dotclock <= 200000) {
1072 clock->p1 = 1;
1073 clock->p2 = 10;
1074 clock->n = 6;
1075 clock->m1 = 12;
1076 clock->m2 = 8;
1077 } else {
1078 WARN(1, "SDVO TV clock out of range: %i\n", dotclock);
1079 }
1080
1081 pipe_config->clock_set = true;
1082}
1083
1044static bool intel_sdvo_compute_config(struct intel_encoder *encoder, 1084static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1045 struct intel_crtc_config *pipe_config) 1085 struct intel_crtc_config *pipe_config)
1046{ 1086{
@@ -1066,6 +1106,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1066 (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, 1106 (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
1067 mode, 1107 mode,
1068 adjusted_mode); 1108 adjusted_mode);
1109 pipe_config->sdvo_tv_clock = true;
1069 } else if (intel_sdvo->is_lvds) { 1110 } else if (intel_sdvo->is_lvds) {
1070 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, 1111 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
1071 intel_sdvo->sdvo_lvds_fixed_mode)) 1112 intel_sdvo->sdvo_lvds_fixed_mode))
@@ -1097,6 +1138,10 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1097 if (intel_sdvo->color_range) 1138 if (intel_sdvo->color_range)
1098 pipe_config->limited_color_range = true; 1139 pipe_config->limited_color_range = true;
1099 1140
1141 /* Clock computation needs to happen after pixel multiplier. */
1142 if (intel_sdvo->is_tv)
1143 i9xx_adjust_sdvo_tv_clock(pipe_config);
1144
1100 return true; 1145 return true;
1101} 1146}
1102 1147
@@ -1174,6 +1219,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1174 1219
1175 switch (intel_crtc->config.pixel_multiplier) { 1220 switch (intel_crtc->config.pixel_multiplier) {
1176 default: 1221 default:
1222 WARN(1, "unknown pixel mutlipler specified\n");
1177 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; 1223 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
1178 case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; 1224 case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
1179 case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; 1225 case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -1231,7 +1277,7 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
1231 struct intel_sdvo_connector *intel_sdvo_connector = 1277 struct intel_sdvo_connector *intel_sdvo_connector =
1232 to_intel_sdvo_connector(&connector->base); 1278 to_intel_sdvo_connector(&connector->base);
1233 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base); 1279 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
1234 u16 active_outputs; 1280 u16 active_outputs = 0;
1235 1281
1236 intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); 1282 intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
1237 1283
@@ -1247,7 +1293,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
1247 struct drm_device *dev = encoder->base.dev; 1293 struct drm_device *dev = encoder->base.dev;
1248 struct drm_i915_private *dev_priv = dev->dev_private; 1294 struct drm_i915_private *dev_priv = dev->dev_private;
1249 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1295 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1250 u16 active_outputs; 1296 u16 active_outputs = 0;
1251 u32 tmp; 1297 u32 tmp;
1252 1298
1253 tmp = I915_READ(intel_sdvo->sdvo_reg); 1299 tmp = I915_READ(intel_sdvo->sdvo_reg);
@@ -1264,6 +1310,74 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
1264 return true; 1310 return true;
1265} 1311}
1266 1312
1313static void intel_sdvo_get_config(struct intel_encoder *encoder,
1314 struct intel_crtc_config *pipe_config)
1315{
1316 struct drm_device *dev = encoder->base.dev;
1317 struct drm_i915_private *dev_priv = dev->dev_private;
1318 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1319 struct intel_sdvo_dtd dtd;
1320 int encoder_pixel_multiplier = 0;
1321 u32 flags = 0, sdvox;
1322 u8 val;
1323 bool ret;
1324
1325 ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
1326 if (!ret) {
1327 /* Some sdvo encoders are not spec compliant and don't
1328 * implement the mandatory get_timings function. */
1329 DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
1330 pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
1331 } else {
1332 if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
1333 flags |= DRM_MODE_FLAG_PHSYNC;
1334 else
1335 flags |= DRM_MODE_FLAG_NHSYNC;
1336
1337 if (dtd.part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
1338 flags |= DRM_MODE_FLAG_PVSYNC;
1339 else
1340 flags |= DRM_MODE_FLAG_NVSYNC;
1341 }
1342
1343 pipe_config->adjusted_mode.flags |= flags;
1344
1345 /*
1346 * pixel multiplier readout is tricky: Only on i915g/gm it is stored in
1347 * the sdvo port register, on all other platforms it is part of the dpll
1348 * state. Since the general pipe state readout happens before the
1349 * encoder->get_config we so already have a valid pixel multplier on all
1350 * other platfroms.
1351 */
1352 if (IS_I915G(dev) || IS_I915GM(dev)) {
1353 sdvox = I915_READ(intel_sdvo->sdvo_reg);
1354 pipe_config->pixel_multiplier =
1355 ((sdvox & SDVO_PORT_MULTIPLY_MASK)
1356 >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
1357 }
1358
1359 /* Cross check the port pixel multiplier with the sdvo encoder state. */
1360 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1);
1361 switch (val) {
1362 case SDVO_CLOCK_RATE_MULT_1X:
1363 encoder_pixel_multiplier = 1;
1364 break;
1365 case SDVO_CLOCK_RATE_MULT_2X:
1366 encoder_pixel_multiplier = 2;
1367 break;
1368 case SDVO_CLOCK_RATE_MULT_4X:
1369 encoder_pixel_multiplier = 4;
1370 break;
1371 }
1372
1373 if(HAS_PCH_SPLIT(dev))
1374 return; /* no pixel multiplier readout support yet */
1375
1376 WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
1377 "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
1378 pipe_config->pixel_multiplier, encoder_pixel_multiplier);
1379}
1380
1267static void intel_disable_sdvo(struct intel_encoder *encoder) 1381static void intel_disable_sdvo(struct intel_encoder *encoder)
1268{ 1382{
1269 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1383 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
@@ -1344,6 +1458,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
1344 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); 1458 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
1345} 1459}
1346 1460
1461/* Special dpms function to support cloning between dvo/sdvo/crt. */
1347static void intel_sdvo_dpms(struct drm_connector *connector, int mode) 1462static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
1348{ 1463{
1349 struct drm_crtc *crtc; 1464 struct drm_crtc *crtc;
@@ -1365,6 +1480,8 @@ static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
1365 return; 1480 return;
1366 } 1481 }
1367 1482
1483 /* We set active outputs manually below in case pipe dpms doesn't change
1484 * due to cloning. */
1368 if (mode != DRM_MODE_DPMS_ON) { 1485 if (mode != DRM_MODE_DPMS_ON) {
1369 intel_sdvo_set_active_outputs(intel_sdvo, 0); 1486 intel_sdvo_set_active_outputs(intel_sdvo, 0);
1370 if (0) 1487 if (0)
@@ -1495,7 +1612,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
1495 1612
1496 return drm_get_edid(connector, 1613 return drm_get_edid(connector,
1497 intel_gmbus_get_adapter(dev_priv, 1614 intel_gmbus_get_adapter(dev_priv,
1498 dev_priv->crt_ddc_pin)); 1615 dev_priv->vbt.crt_ddc_pin));
1499} 1616}
1500 1617
1501static enum drm_connector_status 1618static enum drm_connector_status
@@ -1625,12 +1742,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1625 if (ret == connector_status_connected) { 1742 if (ret == connector_status_connected) {
1626 intel_sdvo->is_tv = false; 1743 intel_sdvo->is_tv = false;
1627 intel_sdvo->is_lvds = false; 1744 intel_sdvo->is_lvds = false;
1628 intel_sdvo->base.needs_tv_clock = false;
1629 1745
1630 if (response & SDVO_TV_MASK) { 1746 if (response & SDVO_TV_MASK)
1631 intel_sdvo->is_tv = true; 1747 intel_sdvo->is_tv = true;
1632 intel_sdvo->base.needs_tv_clock = true;
1633 }
1634 if (response & SDVO_LVDS_MASK) 1748 if (response & SDVO_LVDS_MASK)
1635 intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL; 1749 intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
1636 } 1750 }
@@ -1772,21 +1886,12 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1772 struct drm_display_mode *newmode; 1886 struct drm_display_mode *newmode;
1773 1887
1774 /* 1888 /*
1775 * Attempt to get the mode list from DDC.
1776 * Assume that the preferred modes are
1777 * arranged in priority order.
1778 */
1779 intel_ddc_get_modes(connector, &intel_sdvo->ddc);
1780
1781 /*
1782 * Fetch modes from VBT. For SDVO prefer the VBT mode since some 1889 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
1783 * SDVO->LVDS transcoders can't cope with the EDID mode. Since 1890 * SDVO->LVDS transcoders can't cope with the EDID mode.
1784 * drm_mode_probed_add adds the mode at the head of the list we add it
1785 * last.
1786 */ 1891 */
1787 if (dev_priv->sdvo_lvds_vbt_mode != NULL) { 1892 if (dev_priv->vbt.sdvo_lvds_vbt_mode != NULL) {
1788 newmode = drm_mode_duplicate(connector->dev, 1893 newmode = drm_mode_duplicate(connector->dev,
1789 dev_priv->sdvo_lvds_vbt_mode); 1894 dev_priv->vbt.sdvo_lvds_vbt_mode);
1790 if (newmode != NULL) { 1895 if (newmode != NULL) {
1791 /* Guarantee the mode is preferred */ 1896 /* Guarantee the mode is preferred */
1792 newmode->type = (DRM_MODE_TYPE_PREFERRED | 1897 newmode->type = (DRM_MODE_TYPE_PREFERRED |
@@ -1795,6 +1900,13 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1795 } 1900 }
1796 } 1901 }
1797 1902
1903 /*
1904 * Attempt to get the mode list from DDC.
1905 * Assume that the preferred modes are
1906 * arranged in priority order.
1907 */
1908 intel_ddc_get_modes(connector, &intel_sdvo->ddc);
1909
1798 list_for_each_entry(newmode, &connector->probed_modes, head) { 1910 list_for_each_entry(newmode, &connector->probed_modes, head) {
1799 if (newmode->type & DRM_MODE_TYPE_PREFERRED) { 1911 if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
1800 intel_sdvo->sdvo_lvds_fixed_mode = 1912 intel_sdvo->sdvo_lvds_fixed_mode =
@@ -2329,7 +2441,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2329 intel_sdvo_connector->output_flag = type; 2441 intel_sdvo_connector->output_flag = type;
2330 2442
2331 intel_sdvo->is_tv = true; 2443 intel_sdvo->is_tv = true;
2332 intel_sdvo->base.needs_tv_clock = true;
2333 2444
2334 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2445 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2335 2446
@@ -2417,7 +2528,6 @@ static bool
2417intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) 2528intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
2418{ 2529{
2419 intel_sdvo->is_tv = false; 2530 intel_sdvo->is_tv = false;
2420 intel_sdvo->base.needs_tv_clock = false;
2421 intel_sdvo->is_lvds = false; 2531 intel_sdvo->is_lvds = false;
2422 2532
2423 /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ 2533 /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
@@ -2751,7 +2861,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2751 struct drm_i915_private *dev_priv = dev->dev_private; 2861 struct drm_i915_private *dev_priv = dev->dev_private;
2752 struct intel_encoder *intel_encoder; 2862 struct intel_encoder *intel_encoder;
2753 struct intel_sdvo *intel_sdvo; 2863 struct intel_sdvo *intel_sdvo;
2754 u32 hotplug_mask;
2755 int i; 2864 int i;
2756 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); 2865 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
2757 if (!intel_sdvo) 2866 if (!intel_sdvo)
@@ -2780,23 +2889,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2780 } 2889 }
2781 } 2890 }
2782 2891
2783 hotplug_mask = 0;
2784 if (IS_G4X(dev)) {
2785 hotplug_mask = intel_sdvo->is_sdvob ?
2786 SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
2787 } else if (IS_GEN4(dev)) {
2788 hotplug_mask = intel_sdvo->is_sdvob ?
2789 SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
2790 } else {
2791 hotplug_mask = intel_sdvo->is_sdvob ?
2792 SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
2793 }
2794
2795 intel_encoder->compute_config = intel_sdvo_compute_config; 2892 intel_encoder->compute_config = intel_sdvo_compute_config;
2796 intel_encoder->disable = intel_disable_sdvo; 2893 intel_encoder->disable = intel_disable_sdvo;
2797 intel_encoder->mode_set = intel_sdvo_mode_set; 2894 intel_encoder->mode_set = intel_sdvo_mode_set;
2798 intel_encoder->enable = intel_enable_sdvo; 2895 intel_encoder->enable = intel_enable_sdvo;
2799 intel_encoder->get_hw_state = intel_sdvo_get_hw_state; 2896 intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
2897 intel_encoder->get_config = intel_sdvo_get_config;
2800 2898
2801 /* In default case sdvo lvds is false */ 2899 /* In default case sdvo lvds is false */
2802 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) 2900 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
new file mode 100644
index 000000000000..9a0e6c5ea540
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -0,0 +1,177 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_drv.h"
26#include "intel_drv.h"
27
28/* IOSF sideband */
29static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
30 u32 port, u32 opcode, u32 addr, u32 *val)
31{
32 u32 cmd, be = 0xf, bar = 0;
33 bool is_read = (opcode == PUNIT_OPCODE_REG_READ ||
34 opcode == DPIO_OPCODE_REG_READ);
35
36 cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
37 (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
38 (bar << IOSF_BAR_SHIFT);
39
40 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
41
42 if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
43 DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
44 is_read ? "read" : "write");
45 return -EAGAIN;
46 }
47
48 I915_WRITE(VLV_IOSF_ADDR, addr);
49 if (!is_read)
50 I915_WRITE(VLV_IOSF_DATA, *val);
51 I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
52
53 if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
54 DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
55 is_read ? "read" : "write");
56 return -ETIMEDOUT;
57 }
58
59 if (is_read)
60 *val = I915_READ(VLV_IOSF_DATA);
61 I915_WRITE(VLV_IOSF_DATA, 0);
62
63 return 0;
64}
65
66u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
67{
68 u32 val = 0;
69
70 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
71
72 mutex_lock(&dev_priv->dpio_lock);
73 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
74 PUNIT_OPCODE_REG_READ, addr, &val);
75 mutex_unlock(&dev_priv->dpio_lock);
76
77 return val;
78}
79
80void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
81{
82 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
83
84 mutex_lock(&dev_priv->dpio_lock);
85 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
86 PUNIT_OPCODE_REG_WRITE, addr, &val);
87 mutex_unlock(&dev_priv->dpio_lock);
88}
89
90u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
91{
92 u32 val = 0;
93
94 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
95
96 mutex_lock(&dev_priv->dpio_lock);
97 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
98 PUNIT_OPCODE_REG_READ, addr, &val);
99 mutex_unlock(&dev_priv->dpio_lock);
100
101 return val;
102}
103
104u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg)
105{
106 u32 val = 0;
107
108 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
109 DPIO_OPCODE_REG_READ, reg, &val);
110
111 return val;
112}
113
114void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val)
115{
116 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
117 DPIO_OPCODE_REG_WRITE, reg, &val);
118}
119
120/* SBI access */
121u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
122 enum intel_sbi_destination destination)
123{
124 u32 value = 0;
125 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
126
127 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
128 100)) {
129 DRM_ERROR("timeout waiting for SBI to become ready\n");
130 return 0;
131 }
132
133 I915_WRITE(SBI_ADDR, (reg << 16));
134
135 if (destination == SBI_ICLK)
136 value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
137 else
138 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
139 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
140
141 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
142 100)) {
143 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
144 return 0;
145 }
146
147 return I915_READ(SBI_DATA);
148}
149
150void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
151 enum intel_sbi_destination destination)
152{
153 u32 tmp;
154
155 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
156
157 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
158 100)) {
159 DRM_ERROR("timeout waiting for SBI to become ready\n");
160 return;
161 }
162
163 I915_WRITE(SBI_ADDR, (reg << 16));
164 I915_WRITE(SBI_DATA, value);
165
166 if (destination == SBI_ICLK)
167 tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
168 else
169 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
170 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
171
172 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
173 100)) {
174 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
175 return;
176 }
177}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index c7d25c5dd4e6..1fa5612a4572 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -32,6 +32,7 @@
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33#include <drm/drm_crtc.h> 33#include <drm/drm_crtc.h>
34#include <drm/drm_fourcc.h> 34#include <drm/drm_fourcc.h>
35#include <drm/drm_rect.h>
35#include "intel_drv.h" 36#include "intel_drv.h"
36#include <drm/i915_drm.h> 37#include <drm/i915_drm.h>
37#include "i915_drv.h" 38#include "i915_drv.h"
@@ -113,7 +114,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
113 crtc_w--; 114 crtc_w--;
114 crtc_h--; 115 crtc_h--;
115 116
116 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); 117 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
117 118
118 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); 119 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
119 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); 120 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
@@ -267,7 +268,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
267 crtc_w--; 268 crtc_w--;
268 crtc_h--; 269 crtc_h--;
269 270
270 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); 271 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
271 272
272 /* 273 /*
273 * IVB workaround: must disable low power watermarks for at least 274 * IVB workaround: must disable low power watermarks for at least
@@ -334,6 +335,8 @@ ivb_disable_plane(struct drm_plane *plane)
334 335
335 dev_priv->sprite_scaling_enabled &= ~(1 << pipe); 336 dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
336 337
338 intel_update_sprite_watermarks(dev, pipe, 0, 0, false);
339
337 /* potentially re-enable LP watermarks */ 340 /* potentially re-enable LP watermarks */
338 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) 341 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
339 intel_update_watermarks(dev); 342 intel_update_watermarks(dev);
@@ -452,7 +455,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
452 crtc_w--; 455 crtc_w--;
453 crtc_h--; 456 crtc_h--;
454 457
455 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); 458 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
456 459
457 dvsscale = 0; 460 dvsscale = 0;
458 if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h) 461 if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
@@ -583,6 +586,20 @@ ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
583 key->flags = I915_SET_COLORKEY_NONE; 586 key->flags = I915_SET_COLORKEY_NONE;
584} 587}
585 588
589static bool
590format_is_yuv(uint32_t format)
591{
592 switch (format) {
593 case DRM_FORMAT_YUYV:
594 case DRM_FORMAT_UYVY:
595 case DRM_FORMAT_VYUY:
596 case DRM_FORMAT_YVYU:
597 return true;
598 default:
599 return false;
600 }
601}
602
586static int 603static int
587intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, 604intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
588 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 605 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@@ -600,9 +617,29 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
600 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 617 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
601 pipe); 618 pipe);
602 int ret = 0; 619 int ret = 0;
603 int x = src_x >> 16, y = src_y >> 16;
604 int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
605 bool disable_primary = false; 620 bool disable_primary = false;
621 bool visible;
622 int hscale, vscale;
623 int max_scale, min_scale;
624 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
625 struct drm_rect src = {
626 /* sample coordinates in 16.16 fixed point */
627 .x1 = src_x,
628 .x2 = src_x + src_w,
629 .y1 = src_y,
630 .y2 = src_y + src_h,
631 };
632 struct drm_rect dst = {
633 /* integer pixels */
634 .x1 = crtc_x,
635 .x2 = crtc_x + crtc_w,
636 .y1 = crtc_y,
637 .y2 = crtc_y + crtc_h,
638 };
639 const struct drm_rect clip = {
640 .x2 = crtc->mode.hdisplay,
641 .y2 = crtc->mode.vdisplay,
642 };
606 643
607 intel_fb = to_intel_framebuffer(fb); 644 intel_fb = to_intel_framebuffer(fb);
608 obj = intel_fb->obj; 645 obj = intel_fb->obj;
@@ -618,19 +655,23 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
618 intel_plane->src_w = src_w; 655 intel_plane->src_w = src_w;
619 intel_plane->src_h = src_h; 656 intel_plane->src_h = src_h;
620 657
621 src_w = src_w >> 16;
622 src_h = src_h >> 16;
623
624 /* Pipe must be running... */ 658 /* Pipe must be running... */
625 if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) 659 if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) {
660 DRM_DEBUG_KMS("Pipe disabled\n");
626 return -EINVAL; 661 return -EINVAL;
662 }
627 663
628 if (crtc_x >= primary_w || crtc_y >= primary_h) 664 /* Don't modify another pipe's plane */
665 if (intel_plane->pipe != intel_crtc->pipe) {
666 DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n");
629 return -EINVAL; 667 return -EINVAL;
668 }
630 669
631 /* Don't modify another pipe's plane */ 670 /* FIXME check all gen limits */
632 if (intel_plane->pipe != intel_crtc->pipe) 671 if (fb->width < 3 || fb->height < 3 || fb->pitches[0] > 16384) {
672 DRM_DEBUG_KMS("Unsuitable framebuffer for plane\n");
633 return -EINVAL; 673 return -EINVAL;
674 }
634 675
635 /* Sprite planes can be linear or x-tiled surfaces */ 676 /* Sprite planes can be linear or x-tiled surfaces */
636 switch (obj->tiling_mode) { 677 switch (obj->tiling_mode) {
@@ -638,55 +679,123 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
638 case I915_TILING_X: 679 case I915_TILING_X:
639 break; 680 break;
640 default: 681 default:
682 DRM_DEBUG_KMS("Unsupported tiling mode\n");
641 return -EINVAL; 683 return -EINVAL;
642 } 684 }
643 685
644 /* 686 /*
645 * Clamp the width & height into the visible area. Note we don't 687 * FIXME the following code does a bunch of fuzzy adjustments to the
646 * try to scale the source if part of the visible region is offscreen. 688 * coordinates and sizes. We probably need some way to decide whether
647 * The caller must handle that by adjusting source offset and size. 689 * more strict checking should be done instead.
648 */ 690 */
649 if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) { 691 max_scale = intel_plane->max_downscale << 16;
650 crtc_w += crtc_x; 692 min_scale = intel_plane->can_scale ? 1 : (1 << 16);
651 crtc_x = 0; 693
652 } 694 hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
653 if ((crtc_x + crtc_w) <= 0) /* Nothing to display */ 695 BUG_ON(hscale < 0);
654 goto out; 696
655 if ((crtc_x + crtc_w) > primary_w) 697 vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
656 crtc_w = primary_w - crtc_x; 698 BUG_ON(vscale < 0);
699
700 visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
701
702 crtc_x = dst.x1;
703 crtc_y = dst.y1;
704 crtc_w = drm_rect_width(&dst);
705 crtc_h = drm_rect_height(&dst);
706
707 if (visible) {
708 /* check again in case clipping clamped the results */
709 hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
710 if (hscale < 0) {
711 DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
712 drm_rect_debug_print(&src, true);
713 drm_rect_debug_print(&dst, false);
714
715 return hscale;
716 }
657 717
658 if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) { 718 vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
659 crtc_h += crtc_y; 719 if (vscale < 0) {
660 crtc_y = 0; 720 DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
721 drm_rect_debug_print(&src, true);
722 drm_rect_debug_print(&dst, false);
723
724 return vscale;
725 }
726
727 /* Make the source viewport size an exact multiple of the scaling factors. */
728 drm_rect_adjust_size(&src,
729 drm_rect_width(&dst) * hscale - drm_rect_width(&src),
730 drm_rect_height(&dst) * vscale - drm_rect_height(&src));
731
732 /* sanity check to make sure the src viewport wasn't enlarged */
733 WARN_ON(src.x1 < (int) src_x ||
734 src.y1 < (int) src_y ||
735 src.x2 > (int) (src_x + src_w) ||
736 src.y2 > (int) (src_y + src_h));
737
738 /*
739 * Hardware doesn't handle subpixel coordinates.
740 * Adjust to (macro)pixel boundary, but be careful not to
741 * increase the source viewport size, because that could
742 * push the downscaling factor out of bounds.
743 */
744 src_x = src.x1 >> 16;
745 src_w = drm_rect_width(&src) >> 16;
746 src_y = src.y1 >> 16;
747 src_h = drm_rect_height(&src) >> 16;
748
749 if (format_is_yuv(fb->pixel_format)) {
750 src_x &= ~1;
751 src_w &= ~1;
752
753 /*
754 * Must keep src and dst the
755 * same if we can't scale.
756 */
757 if (!intel_plane->can_scale)
758 crtc_w &= ~1;
759
760 if (crtc_w == 0)
761 visible = false;
762 }
661 } 763 }
662 if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
663 goto out;
664 if (crtc_y + crtc_h > primary_h)
665 crtc_h = primary_h - crtc_y;
666 764
667 if (!crtc_w || !crtc_h) /* Again, nothing to display */ 765 /* Check size restrictions when scaling */
668 goto out; 766 if (visible && (src_w != crtc_w || src_h != crtc_h)) {
767 unsigned int width_bytes;
669 768
670 /* 769 WARN_ON(!intel_plane->can_scale);
671 * We may not have a scaler, eg. HSW does not have it any more
672 */
673 if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
674 return -EINVAL;
675 770
676 /* 771 /* FIXME interlacing min height is 6 */
677 * We can take a larger source and scale it down, but 772
678 * only so much... 16x is the max on SNB. 773 if (crtc_w < 3 || crtc_h < 3)
679 */ 774 visible = false;
680 if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale) 775
681 return -EINVAL; 776 if (src_w < 3 || src_h < 3)
777 visible = false;
778
779 width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
780
781 if (src_w > 2048 || src_h > 2048 ||
782 width_bytes > 4096 || fb->pitches[0] > 4096) {
783 DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
784 return -EINVAL;
785 }
786 }
787
788 dst.x1 = crtc_x;
789 dst.x2 = crtc_x + crtc_w;
790 dst.y1 = crtc_y;
791 dst.y2 = crtc_y + crtc_h;
682 792
683 /* 793 /*
684 * If the sprite is completely covering the primary plane, 794 * If the sprite is completely covering the primary plane,
685 * we can disable the primary and save power. 795 * we can disable the primary and save power.
686 */ 796 */
687 if ((crtc_x == 0) && (crtc_y == 0) && 797 disable_primary = drm_rect_equals(&dst, &clip);
688 (crtc_w == primary_w) && (crtc_h == primary_h)) 798 WARN_ON(disable_primary && !visible);
689 disable_primary = true;
690 799
691 mutex_lock(&dev->struct_mutex); 800 mutex_lock(&dev->struct_mutex);
692 801
@@ -708,8 +817,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
708 if (!disable_primary) 817 if (!disable_primary)
709 intel_enable_primary(crtc); 818 intel_enable_primary(crtc);
710 819
711 intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y, 820 if (visible)
712 crtc_w, crtc_h, x, y, src_w, src_h); 821 intel_plane->update_plane(plane, fb, obj,
822 crtc_x, crtc_y, crtc_w, crtc_h,
823 src_x, src_y, src_w, src_h);
824 else
825 intel_plane->disable_plane(plane);
713 826
714 if (disable_primary) 827 if (disable_primary)
715 intel_disable_primary(crtc); 828 intel_disable_primary(crtc);
@@ -732,7 +845,6 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
732 845
733out_unlock: 846out_unlock:
734 mutex_unlock(&dev->struct_mutex); 847 mutex_unlock(&dev->struct_mutex);
735out:
736 return ret; 848 return ret;
737} 849}
738 850
@@ -845,6 +957,14 @@ void intel_plane_restore(struct drm_plane *plane)
845 intel_plane->src_w, intel_plane->src_h); 957 intel_plane->src_w, intel_plane->src_h);
846} 958}
847 959
960void intel_plane_disable(struct drm_plane *plane)
961{
962 if (!plane->crtc || !plane->fb)
963 return;
964
965 intel_disable_plane(plane);
966}
967
848static const struct drm_plane_funcs intel_plane_funcs = { 968static const struct drm_plane_funcs intel_plane_funcs = {
849 .update_plane = intel_update_plane, 969 .update_plane = intel_update_plane,
850 .disable_plane = intel_disable_plane, 970 .disable_plane = intel_disable_plane,
@@ -918,13 +1038,15 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
918 break; 1038 break;
919 1039
920 case 7: 1040 case 7:
921 if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev)) 1041 if (IS_IVYBRIDGE(dev)) {
922 intel_plane->can_scale = false;
923 else
924 intel_plane->can_scale = true; 1042 intel_plane->can_scale = true;
1043 intel_plane->max_downscale = 2;
1044 } else {
1045 intel_plane->can_scale = false;
1046 intel_plane->max_downscale = 1;
1047 }
925 1048
926 if (IS_VALLEYVIEW(dev)) { 1049 if (IS_VALLEYVIEW(dev)) {
927 intel_plane->max_downscale = 1;
928 intel_plane->update_plane = vlv_update_plane; 1050 intel_plane->update_plane = vlv_update_plane;
929 intel_plane->disable_plane = vlv_disable_plane; 1051 intel_plane->disable_plane = vlv_disable_plane;
930 intel_plane->update_colorkey = vlv_update_colorkey; 1052 intel_plane->update_colorkey = vlv_update_colorkey;
@@ -933,7 +1055,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
933 plane_formats = vlv_plane_formats; 1055 plane_formats = vlv_plane_formats;
934 num_plane_formats = ARRAY_SIZE(vlv_plane_formats); 1056 num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
935 } else { 1057 } else {
936 intel_plane->max_downscale = 2;
937 intel_plane->update_plane = ivb_update_plane; 1058 intel_plane->update_plane = ivb_update_plane;
938 intel_plane->disable_plane = ivb_disable_plane; 1059 intel_plane->disable_plane = ivb_disable_plane;
939 intel_plane->update_colorkey = ivb_update_colorkey; 1060 intel_plane->update_colorkey = ivb_update_colorkey;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index b945bc54207a..39debd80d190 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -914,9 +914,6 @@ intel_tv_compute_config(struct intel_encoder *encoder,
914 if (!tv_mode) 914 if (!tv_mode)
915 return false; 915 return false;
916 916
917 if (intel_encoder_check_is_cloned(&intel_tv->base))
918 return false;
919
920 pipe_config->adjusted_mode.clock = tv_mode->clock; 917 pipe_config->adjusted_mode.clock = tv_mode->clock;
921 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 918 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
922 pipe_config->pipe_bpp = 8*3; 919 pipe_config->pipe_bpp = 8*3;
@@ -1521,12 +1518,12 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
1521 struct child_device_config *p_child; 1518 struct child_device_config *p_child;
1522 int i, ret; 1519 int i, ret;
1523 1520
1524 if (!dev_priv->child_dev_num) 1521 if (!dev_priv->vbt.child_dev_num)
1525 return 1; 1522 return 1;
1526 1523
1527 ret = 0; 1524 ret = 0;
1528 for (i = 0; i < dev_priv->child_dev_num; i++) { 1525 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
1529 p_child = dev_priv->child_dev + i; 1526 p_child = dev_priv->vbt.child_dev + i;
1530 /* 1527 /*
1531 * If the device type is not TV, continue. 1528 * If the device type is not TV, continue.
1532 */ 1529 */
@@ -1564,7 +1561,7 @@ intel_tv_init(struct drm_device *dev)
1564 return; 1561 return;
1565 } 1562 }
1566 /* Even if we have an encoder we may not have a connector */ 1563 /* Even if we have an encoder we may not have a connector */
1567 if (!dev_priv->int_tv_support) 1564 if (!dev_priv->vbt.int_tv_support)
1568 return; 1565 return;
1569 1566
1570 /* 1567 /*
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
index 7db592eedbf1..a9a0300f09fc 100644
--- a/drivers/gpu/drm/mgag200/Makefile
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -1,5 +1,5 @@
1ccflags-y := -Iinclude/drm 1ccflags-y := -Iinclude/drm
2mgag200-y := mgag200_main.o mgag200_mode.o \ 2mgag200-y := mgag200_main.o mgag200_mode.o mgag200_cursor.o \
3 mgag200_drv.o mgag200_fb.o mgag200_i2c.o mgag200_ttm.o 3 mgag200_drv.o mgag200_fb.o mgag200_i2c.o mgag200_ttm.o
4 4
5obj-$(CONFIG_DRM_MGAG200) += mgag200.o 5obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
new file mode 100644
index 000000000000..801731aeab61
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -0,0 +1,275 @@
1/*
2 * Copyright 2013 Matrox Graphics
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License version 2. See the file COPYING in the main
6 * directory of this archive for more details.
7 *
8 * Author: Christopher Harvey <charvey@matrox.com>
9 */
10
11#include <drm/drmP.h>
12#include "mgag200_drv.h"
13
14static bool warn_transparent = true;
15static bool warn_palette = true;
16
17/*
18 Hide the cursor off screen. We can't disable the cursor hardware because it
19 takes too long to re-activate and causes momentary corruption
20*/
21static void mga_hide_cursor(struct mga_device *mdev)
22{
23 WREG8(MGA_CURPOSXL, 0);
24 WREG8(MGA_CURPOSXH, 0);
25 mgag200_bo_unpin(mdev->cursor.pixels_1);
26 mgag200_bo_unpin(mdev->cursor.pixels_2);
27}
28
29int mga_crtc_cursor_set(struct drm_crtc *crtc,
30 struct drm_file *file_priv,
31 uint32_t handle,
32 uint32_t width,
33 uint32_t height)
34{
35 struct drm_device *dev = (struct drm_device *)file_priv->minor->dev;
36 struct mga_device *mdev = (struct mga_device *)dev->dev_private;
37 struct mgag200_bo *pixels_1 = mdev->cursor.pixels_1;
38 struct mgag200_bo *pixels_2 = mdev->cursor.pixels_2;
39 struct mgag200_bo *pixels_current = mdev->cursor.pixels_current;
40 struct mgag200_bo *pixels_prev = mdev->cursor.pixels_prev;
41 struct drm_gem_object *obj;
42 struct mgag200_bo *bo = NULL;
43 int ret = 0;
44 unsigned int i, row, col;
45 uint32_t colour_set[16];
46 uint32_t *next_space = &colour_set[0];
47 uint32_t *palette_iter;
48 uint32_t this_colour;
49 bool found = false;
50 int colour_count = 0;
51 u64 gpu_addr;
52 u8 reg_index;
53 u8 this_row[48];
54
55 if (!pixels_1 || !pixels_2) {
56 WREG8(MGA_CURPOSXL, 0);
57 WREG8(MGA_CURPOSXH, 0);
58 return -ENOTSUPP; /* Didn't allocate space for cursors */
59 }
60
61 if ((width != 64 || height != 64) && handle) {
62 WREG8(MGA_CURPOSXL, 0);
63 WREG8(MGA_CURPOSXH, 0);
64 return -EINVAL;
65 }
66
67 BUG_ON(pixels_1 != pixels_current && pixels_1 != pixels_prev);
68 BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
69 BUG_ON(pixels_current == pixels_prev);
70
71 ret = mgag200_bo_reserve(pixels_1, true);
72 if (ret) {
73 WREG8(MGA_CURPOSXL, 0);
74 WREG8(MGA_CURPOSXH, 0);
75 return ret;
76 }
77 ret = mgag200_bo_reserve(pixels_2, true);
78 if (ret) {
79 WREG8(MGA_CURPOSXL, 0);
80 WREG8(MGA_CURPOSXH, 0);
81 mgag200_bo_unreserve(pixels_1);
82 return ret;
83 }
84
85 if (!handle) {
86 mga_hide_cursor(mdev);
87 ret = 0;
88 goto out1;
89 }
90
91 /* Move cursor buffers into VRAM if they aren't already */
92 if (!pixels_1->pin_count) {
93 ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM,
94 &mdev->cursor.pixels_1_gpu_addr);
95 if (ret)
96 goto out1;
97 }
98 if (!pixels_2->pin_count) {
99 ret = mgag200_bo_pin(pixels_2, TTM_PL_FLAG_VRAM,
100 &mdev->cursor.pixels_2_gpu_addr);
101 if (ret) {
102 mgag200_bo_unpin(pixels_1);
103 goto out1;
104 }
105 }
106
107 mutex_lock(&dev->struct_mutex);
108 obj = drm_gem_object_lookup(dev, file_priv, handle);
109 if (!obj) {
110 mutex_unlock(&dev->struct_mutex);
111 ret = -ENOENT;
112 goto out1;
113 }
114 drm_gem_object_unreference(obj);
115 mutex_unlock(&dev->struct_mutex);
116
117 bo = gem_to_mga_bo(obj);
118 ret = mgag200_bo_reserve(bo, true);
119 if (ret) {
120 dev_err(&dev->pdev->dev, "failed to reserve user bo\n");
121 goto out1;
122 }
123 if (!bo->kmap.virtual) {
124 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
125 if (ret) {
126 dev_err(&dev->pdev->dev, "failed to kmap user buffer updates\n");
127 goto out2;
128 }
129 }
130
131 memset(&colour_set[0], 0, sizeof(uint32_t)*16);
132 /* width*height*4 = 16384 */
133 for (i = 0; i < 16384; i += 4) {
134 this_colour = ioread32(bo->kmap.virtual + i);
135 /* No transparency */
136 if (this_colour>>24 != 0xff &&
137 this_colour>>24 != 0x0) {
138 if (warn_transparent) {
139 dev_info(&dev->pdev->dev, "Video card doesn't support cursors with partial transparency.\n");
140 dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
141 warn_transparent = false; /* Only tell the user once. */
142 }
143 ret = -EINVAL;
144 goto out3;
145 }
146 /* Don't need to store transparent pixels as colours */
147 if (this_colour>>24 == 0x0)
148 continue;
149 found = false;
150 for (palette_iter = &colour_set[0]; palette_iter != next_space; palette_iter++) {
151 if (*palette_iter == this_colour) {
152 found = true;
153 break;
154 }
155 }
156 if (found)
157 continue;
158 /* We only support 4bit paletted cursors */
159 if (colour_count >= 16) {
160 if (warn_palette) {
161 dev_info(&dev->pdev->dev, "Video card only supports cursors with up to 16 colours.\n");
162 dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
163 warn_palette = false; /* Only tell the user once. */
164 }
165 ret = -EINVAL;
166 goto out3;
167 }
168 *next_space = this_colour;
169 next_space++;
170 colour_count++;
171 }
172
173 /* Program colours from cursor icon into palette */
174 for (i = 0; i < colour_count; i++) {
175 if (i <= 2)
176 reg_index = 0x8 + i*0x4;
177 else
178 reg_index = 0x60 + i*0x3;
179 WREG_DAC(reg_index, colour_set[i] & 0xff);
180 WREG_DAC(reg_index+1, colour_set[i]>>8 & 0xff);
181 WREG_DAC(reg_index+2, colour_set[i]>>16 & 0xff);
182 BUG_ON((colour_set[i]>>24 & 0xff) != 0xff);
183 }
184
185 /* Map up-coming buffer to write colour indices */
186 if (!pixels_prev->kmap.virtual) {
187 ret = ttm_bo_kmap(&pixels_prev->bo, 0,
188 pixels_prev->bo.num_pages,
189 &pixels_prev->kmap);
190 if (ret) {
191 dev_err(&dev->pdev->dev, "failed to kmap cursor updates\n");
192 goto out3;
193 }
194 }
195
196 /* now write colour indices into hardware cursor buffer */
197 for (row = 0; row < 64; row++) {
198 memset(&this_row[0], 0, 48);
199 for (col = 0; col < 64; col++) {
200 this_colour = ioread32(bo->kmap.virtual + 4*(col + 64*row));
201 /* write transparent pixels */
202 if (this_colour>>24 == 0x0) {
203 this_row[47 - col/8] |= 0x80>>(col%8);
204 continue;
205 }
206
207 /* write colour index here */
208 for (i = 0; i < colour_count; i++) {
209 if (colour_set[i] == this_colour) {
210 if (col % 2)
211 this_row[col/2] |= i<<4;
212 else
213 this_row[col/2] |= i;
214 break;
215 }
216 }
217 }
218 memcpy_toio(pixels_prev->kmap.virtual + row*48, &this_row[0], 48);
219 }
220
221 /* Program gpu address of cursor buffer */
222 if (pixels_prev == pixels_1)
223 gpu_addr = mdev->cursor.pixels_1_gpu_addr;
224 else
225 gpu_addr = mdev->cursor.pixels_2_gpu_addr;
226 WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, (u8)((gpu_addr>>10) & 0xff));
227 WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, (u8)((gpu_addr>>18) & 0x3f));
228
229 /* Adjust cursor control register to turn on the cursor */
230 WREG_DAC(MGA1064_CURSOR_CTL, 4); /* 16-colour palletized cursor mode */
231
232 /* Now swap internal buffer pointers */
233 if (mdev->cursor.pixels_1 == mdev->cursor.pixels_prev) {
234 mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
235 mdev->cursor.pixels_current = mdev->cursor.pixels_1;
236 } else if (mdev->cursor.pixels_1 == mdev->cursor.pixels_current) {
237 mdev->cursor.pixels_prev = mdev->cursor.pixels_1;
238 mdev->cursor.pixels_current = mdev->cursor.pixels_2;
239 } else {
240 BUG();
241 }
242 ret = 0;
243
244 ttm_bo_kunmap(&pixels_prev->kmap);
245 out3:
246 ttm_bo_kunmap(&bo->kmap);
247 out2:
248 mgag200_bo_unreserve(bo);
249 out1:
250 if (ret)
251 mga_hide_cursor(mdev);
252 mgag200_bo_unreserve(pixels_1);
253 mgag200_bo_unreserve(pixels_2);
254 return ret;
255}
256
257int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
258{
259 struct mga_device *mdev = (struct mga_device *)crtc->dev->dev_private;
260 /* Our origin is at (64,64) */
261 x += 64;
262 y += 64;
263
264 BUG_ON(x <= 0);
265 BUG_ON(y <= 0);
266 BUG_ON(x & ~0xffff);
267 BUG_ON(y & ~0xffff);
268
269 WREG8(MGA_CURPOSXL, x & 0xff);
270 WREG8(MGA_CURPOSXH, (x>>8) & 0xff);
271
272 WREG8(MGA_CURPOSYL, y & 0xff);
273 WREG8(MGA_CURPOSYH, (y>>8) & 0xff);
274 return 0;
275}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index bf29b2f4d68d..12e2499d9352 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -149,6 +149,21 @@ struct mga_connector {
149 struct mga_i2c_chan *i2c; 149 struct mga_i2c_chan *i2c;
150}; 150};
151 151
152struct mga_cursor {
153 /*
154 We have to have 2 buffers for the cursor to avoid occasional
155 corruption while switching cursor icons.
156 If either of these is NULL, then don't do hardware cursors, and
157 fall back to software.
158 */
159 struct mgag200_bo *pixels_1;
160 struct mgag200_bo *pixels_2;
161 u64 pixels_1_gpu_addr, pixels_2_gpu_addr;
162 /* The currently displayed icon, this points to one of pixels_1, or pixels_2 */
163 struct mgag200_bo *pixels_current;
164 /* The previously displayed icon */
165 struct mgag200_bo *pixels_prev;
166};
152 167
153struct mga_mc { 168struct mga_mc {
154 resource_size_t vram_size; 169 resource_size_t vram_size;
@@ -181,6 +196,7 @@ struct mga_device {
181 struct mga_mode_info mode_info; 196 struct mga_mode_info mode_info;
182 197
183 struct mga_fbdev *mfbdev; 198 struct mga_fbdev *mfbdev;
199 struct mga_cursor cursor;
184 200
185 bool suspended; 201 bool suspended;
186 int num_crtc; 202 int num_crtc;
@@ -198,7 +214,8 @@ struct mga_device {
198 struct ttm_bo_device bdev; 214 struct ttm_bo_device bdev;
199 } ttm; 215 } ttm;
200 216
201 u32 reg_1e24; /* SE model number */ 217 /* SE model number stored in reg 0x1e24 */
218 u32 unique_rev_id;
202}; 219};
203 220
204 221
@@ -263,8 +280,24 @@ void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
263#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) 280#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
264void mgag200_ttm_placement(struct mgag200_bo *bo, int domain); 281void mgag200_ttm_placement(struct mgag200_bo *bo, int domain);
265 282
266int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait); 283static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
267void mgag200_bo_unreserve(struct mgag200_bo *bo); 284{
285 int ret;
286
287 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
288 if (ret) {
289 if (ret != -ERESTARTSYS && ret != -EBUSY)
290 DRM_ERROR("reserve failed %p\n", bo);
291 return ret;
292 }
293 return 0;
294}
295
296static inline void mgag200_bo_unreserve(struct mgag200_bo *bo)
297{
298 ttm_bo_unreserve(&bo->bo);
299}
300
268int mgag200_bo_create(struct drm_device *dev, int size, int align, 301int mgag200_bo_create(struct drm_device *dev, int size, int align,
269 uint32_t flags, struct mgag200_bo **pastbo); 302 uint32_t flags, struct mgag200_bo **pastbo);
270int mgag200_mm_init(struct mga_device *mdev); 303int mgag200_mm_init(struct mga_device *mdev);
@@ -273,4 +306,9 @@ int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
273int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr); 306int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr);
274int mgag200_bo_unpin(struct mgag200_bo *bo); 307int mgag200_bo_unpin(struct mgag200_bo *bo);
275int mgag200_bo_push_sysram(struct mgag200_bo *bo); 308int mgag200_bo_push_sysram(struct mgag200_bo *bo);
309 /* mgag200_cursor.c */
310int mga_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
311 uint32_t handle, uint32_t width, uint32_t height);
312int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
313
276#endif /* __MGAG200_DRV_H__ */ 314#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 5da824ce9ba1..964f58cee5ea 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -27,7 +27,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
27 struct mgag200_bo *bo; 27 struct mgag200_bo *bo;
28 int src_offset, dst_offset; 28 int src_offset, dst_offset;
29 int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8; 29 int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
30 int ret; 30 int ret = -EBUSY;
31 bool unmap = false; 31 bool unmap = false;
32 bool store_for_later = false; 32 bool store_for_later = false;
33 int x2, y2; 33 int x2, y2;
@@ -41,7 +41,8 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
41 * then the BO is being moved and we should 41 * then the BO is being moved and we should
42 * store up the damage until later. 42 * store up the damage until later.
43 */ 43 */
44 ret = mgag200_bo_reserve(bo, true); 44 if (!in_interrupt())
45 ret = mgag200_bo_reserve(bo, true);
45 if (ret) { 46 if (ret) {
46 if (ret != -EBUSY) 47 if (ret != -EBUSY)
47 return; 48 return;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 99059237da38..9fa5685baee0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -176,7 +176,7 @@ static int mgag200_device_init(struct drm_device *dev,
176 176
177 /* stash G200 SE model number for later use */ 177 /* stash G200 SE model number for later use */
178 if (IS_G200_SE(mdev)) 178 if (IS_G200_SE(mdev))
179 mdev->reg_1e24 = RREG32(0x1e24); 179 mdev->unique_rev_id = RREG32(0x1e24);
180 180
181 ret = mga_vram_init(mdev); 181 ret = mga_vram_init(mdev);
182 if (ret) 182 if (ret)
@@ -209,7 +209,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
209 r = mgag200_device_init(dev, flags); 209 r = mgag200_device_init(dev, flags);
210 if (r) { 210 if (r) {
211 dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r); 211 dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
212 goto out; 212 return r;
213 } 213 }
214 r = mgag200_mm_init(mdev); 214 r = mgag200_mm_init(mdev);
215 if (r) 215 if (r)
@@ -221,8 +221,27 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
221 dev->mode_config.prefer_shadow = 1; 221 dev->mode_config.prefer_shadow = 1;
222 222
223 r = mgag200_modeset_init(mdev); 223 r = mgag200_modeset_init(mdev);
224 if (r) 224 if (r) {
225 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r); 225 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
226 goto out;
227 }
228
229 /* Make small buffers to store a hardware cursor (double buffered icon updates) */
230 mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
231 &mdev->cursor.pixels_1);
232 mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
233 &mdev->cursor.pixels_2);
234 if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1)
235 goto cursor_nospace;
236 mdev->cursor.pixels_current = mdev->cursor.pixels_1;
237 mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
238 goto cursor_done;
239 cursor_nospace:
240 mdev->cursor.pixels_1 = NULL;
241 mdev->cursor.pixels_2 = NULL;
242 dev_warn(&dev->pdev->dev, "Could not allocate space for cursors. Not doing hardware cursors.\n");
243 cursor_done:
244
226out: 245out:
227 if (r) 246 if (r)
228 mgag200_driver_unload(dev); 247 mgag200_driver_unload(dev);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index ee66badc8bb6..251784aa2225 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1008,7 +1008,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
1008 1008
1009 1009
1010 if (IS_G200_SE(mdev)) { 1010 if (IS_G200_SE(mdev)) {
1011 if (mdev->reg_1e24 >= 0x02) { 1011 if (mdev->unique_rev_id >= 0x02) {
1012 u8 hi_pri_lvl; 1012 u8 hi_pri_lvl;
1013 u32 bpp; 1013 u32 bpp;
1014 u32 mb; 1014 u32 mb;
@@ -1038,7 +1038,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
1038 WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl); 1038 WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl);
1039 } else { 1039 } else {
1040 WREG8(MGAREG_CRTCEXT_INDEX, 0x06); 1040 WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
1041 if (mdev->reg_1e24 >= 0x01) 1041 if (mdev->unique_rev_id >= 0x01)
1042 WREG8(MGAREG_CRTCEXT_DATA, 0x03); 1042 WREG8(MGAREG_CRTCEXT_DATA, 0x03);
1043 else 1043 else
1044 WREG8(MGAREG_CRTCEXT_DATA, 0x04); 1044 WREG8(MGAREG_CRTCEXT_DATA, 0x04);
@@ -1253,6 +1253,8 @@ static void mga_crtc_destroy(struct drm_crtc *crtc)
1253 1253
1254/* These provide the minimum set of functions required to handle a CRTC */ 1254/* These provide the minimum set of functions required to handle a CRTC */
1255static const struct drm_crtc_funcs mga_crtc_funcs = { 1255static const struct drm_crtc_funcs mga_crtc_funcs = {
1256 .cursor_set = mga_crtc_cursor_set,
1257 .cursor_move = mga_crtc_cursor_move,
1256 .gamma_set = mga_crtc_gamma_set, 1258 .gamma_set = mga_crtc_gamma_set,
1257 .set_config = drm_crtc_helper_set_config, 1259 .set_config = drm_crtc_helper_set_config,
1258 .destroy = mga_crtc_destroy, 1260 .destroy = mga_crtc_destroy,
@@ -1410,6 +1412,32 @@ static int mga_vga_get_modes(struct drm_connector *connector)
1410 return ret; 1412 return ret;
1411} 1413}
1412 1414
1415static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode,
1416 int bits_per_pixel)
1417{
1418 uint32_t total_area, divisor;
1419 int64_t active_area, pixels_per_second, bandwidth;
1420 uint64_t bytes_per_pixel = (bits_per_pixel + 7) / 8;
1421
1422 divisor = 1024;
1423
1424 if (!mode->htotal || !mode->vtotal || !mode->clock)
1425 return 0;
1426
1427 active_area = mode->hdisplay * mode->vdisplay;
1428 total_area = mode->htotal * mode->vtotal;
1429
1430 pixels_per_second = active_area * mode->clock * 1000;
1431 do_div(pixels_per_second, total_area);
1432
1433 bandwidth = pixels_per_second * bytes_per_pixel * 100;
1434 do_div(bandwidth, divisor);
1435
1436 return (uint32_t)(bandwidth);
1437}
1438
1439#define MODE_BANDWIDTH MODE_BAD
1440
1413static int mga_vga_mode_valid(struct drm_connector *connector, 1441static int mga_vga_mode_valid(struct drm_connector *connector,
1414 struct drm_display_mode *mode) 1442 struct drm_display_mode *mode)
1415{ 1443{
@@ -1421,7 +1449,45 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
1421 int bpp = 32; 1449 int bpp = 32;
1422 int i = 0; 1450 int i = 0;
1423 1451
1424 /* FIXME: Add bandwidth and g200se limitations */ 1452 if (IS_G200_SE(mdev)) {
1453 if (mdev->unique_rev_id == 0x01) {
1454 if (mode->hdisplay > 1600)
1455 return MODE_VIRTUAL_X;
1456 if (mode->vdisplay > 1200)
1457 return MODE_VIRTUAL_Y;
1458 if (mga_vga_calculate_mode_bandwidth(mode, bpp)
1459 > (24400 * 1024))
1460 return MODE_BANDWIDTH;
1461 } else if (mdev->unique_rev_id >= 0x02) {
1462 if (mode->hdisplay > 1920)
1463 return MODE_VIRTUAL_X;
1464 if (mode->vdisplay > 1200)
1465 return MODE_VIRTUAL_Y;
1466 if (mga_vga_calculate_mode_bandwidth(mode, bpp)
1467 > (30100 * 1024))
1468 return MODE_BANDWIDTH;
1469 }
1470 } else if (mdev->type == G200_WB) {
1471 if (mode->hdisplay > 1280)
1472 return MODE_VIRTUAL_X;
1473 if (mode->vdisplay > 1024)
1474 return MODE_VIRTUAL_Y;
1475 if (mga_vga_calculate_mode_bandwidth(mode,
1476 bpp > (31877 * 1024)))
1477 return MODE_BANDWIDTH;
1478 } else if (mdev->type == G200_EV &&
1479 (mga_vga_calculate_mode_bandwidth(mode, bpp)
1480 > (32700 * 1024))) {
1481 return MODE_BANDWIDTH;
1482 } else if (mode->type == G200_EH &&
1483 (mga_vga_calculate_mode_bandwidth(mode, bpp)
1484 > (37500 * 1024))) {
1485 return MODE_BANDWIDTH;
1486 } else if (mode->type == G200_ER &&
1487 (mga_vga_calculate_mode_bandwidth(mode,
1488 bpp) > (55000 * 1024))) {
1489 return MODE_BANDWIDTH;
1490 }
1425 1491
1426 if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 || 1492 if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
1427 mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 || 1493 mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
index fb24d8655feb..3ae442a64bd6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -235,7 +235,11 @@
235#define MGAREG_CRTCEXT_INDEX 0x1fde 235#define MGAREG_CRTCEXT_INDEX 0x1fde
236#define MGAREG_CRTCEXT_DATA 0x1fdf 236#define MGAREG_CRTCEXT_DATA 0x1fdf
237 237
238 238/* Cursor X and Y position */
239#define MGA_CURPOSXL 0x3c0c
240#define MGA_CURPOSXH 0x3c0d
241#define MGA_CURPOSYL 0x3c0e
242#define MGA_CURPOSYH 0x3c0f
239 243
240/* MGA bits for registers PCI_OPTION_REG */ 244/* MGA bits for registers PCI_OPTION_REG */
241#define MGA1064_OPT_SYS_CLK_PCI ( 0x00 << 0 ) 245#define MGA1064_OPT_SYS_CLK_PCI ( 0x00 << 0 )
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 401c9891d3a8..3acb2b044c7b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -270,26 +270,20 @@ int mgag200_mm_init(struct mga_device *mdev)
270 return ret; 270 return ret;
271 } 271 }
272 272
273 mdev->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0), 273 mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
274 pci_resource_len(dev->pdev, 0), 274 pci_resource_len(dev->pdev, 0));
275 DRM_MTRR_WC);
276 275
277 return 0; 276 return 0;
278} 277}
279 278
280void mgag200_mm_fini(struct mga_device *mdev) 279void mgag200_mm_fini(struct mga_device *mdev)
281{ 280{
282 struct drm_device *dev = mdev->dev;
283 ttm_bo_device_release(&mdev->ttm.bdev); 281 ttm_bo_device_release(&mdev->ttm.bdev);
284 282
285 mgag200_ttm_global_release(mdev); 283 mgag200_ttm_global_release(mdev);
286 284
287 if (mdev->fb_mtrr >= 0) { 285 arch_phys_wc_del(mdev->fb_mtrr);
288 drm_mtrr_del(mdev->fb_mtrr, 286 mdev->fb_mtrr = 0;
289 pci_resource_start(dev->pdev, 0),
290 pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
291 mdev->fb_mtrr = -1;
292 }
293} 287}
294 288
295void mgag200_ttm_placement(struct mgag200_bo *bo, int domain) 289void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
@@ -309,24 +303,6 @@ void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
309 bo->placement.num_busy_placement = c; 303 bo->placement.num_busy_placement = c;
310} 304}
311 305
312int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
313{
314 int ret;
315
316 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
317 if (ret) {
318 if (ret != -ERESTARTSYS && ret != -EBUSY)
319 DRM_ERROR("reserve failed %p %d\n", bo, ret);
320 return ret;
321 }
322 return 0;
323}
324
325void mgag200_bo_unreserve(struct mgag200_bo *bo)
326{
327 ttm_bo_unreserve(&bo->bo);
328}
329
330int mgag200_bo_create(struct drm_device *dev, int size, int align, 306int mgag200_bo_create(struct drm_device *dev, int size, int align,
331 uint32_t flags, struct mgag200_bo **pmgabo) 307 uint32_t flags, struct mgag200_bo **pmgabo)
332{ 308{
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index a7ff6d5a34b9..ff80f12480ea 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -15,6 +15,13 @@ config DRM_NOUVEAU
15 select ACPI_WMI if ACPI && X86 15 select ACPI_WMI if ACPI && X86
16 select MXM_WMI if ACPI && X86 16 select MXM_WMI if ACPI && X86
17 select POWER_SUPPLY 17 select POWER_SUPPLY
18 # Similar to i915, we need to select ACPI_VIDEO and it's dependencies
19 select BACKLIGHT_LCD_SUPPORT if ACPI && X86
20 select BACKLIGHT_CLASS_DEVICE if ACPI && X86
21 select VIDEO_OUTPUT_CONTROL if ACPI && X86
22 select INPUT if ACPI && X86
23 select THERMAL if ACPI && X86
24 select ACPI_VIDEO if ACPI && X86
18 help 25 help
19 Choose this option for open-source nVidia support. 26 Choose this option for open-source nVidia support.
20 27
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 1c4c6c9161ac..8f467e7bfd19 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -129,6 +129,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
129 129
130 if (chan->ntfy) { 130 if (chan->ntfy) {
131 nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma); 131 nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
132 nouveau_bo_unpin(chan->ntfy);
132 drm_gem_object_unreference_unlocked(chan->ntfy->gem); 133 drm_gem_object_unreference_unlocked(chan->ntfy->gem);
133 } 134 }
134 135
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 0b6c296e6ef3..708b2d1c0037 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -26,6 +26,7 @@
26 26
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29#include <drm/ttm/ttm_execbuf_util.h>
29 30
30#include "nouveau_fbcon.h" 31#include "nouveau_fbcon.h"
31#include "dispnv04/hw.h" 32#include "dispnv04/hw.h"
@@ -462,51 +463,6 @@ nouveau_display_resume(struct drm_device *dev)
462} 463}
463 464
464static int 465static int
465nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
466 struct nouveau_bo *new_bo)
467{
468 int ret;
469
470 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
471 if (ret)
472 return ret;
473
474 ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
475 if (ret)
476 goto fail;
477
478 if (likely(old_bo != new_bo)) {
479 ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
480 if (ret)
481 goto fail_unreserve;
482 }
483
484 return 0;
485
486fail_unreserve:
487 ttm_bo_unreserve(&new_bo->bo);
488fail:
489 nouveau_bo_unpin(new_bo);
490 return ret;
491}
492
493static void
494nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
495 struct nouveau_bo *new_bo,
496 struct nouveau_fence *fence)
497{
498 nouveau_bo_fence(new_bo, fence);
499 ttm_bo_unreserve(&new_bo->bo);
500
501 if (likely(old_bo != new_bo)) {
502 nouveau_bo_fence(old_bo, fence);
503 ttm_bo_unreserve(&old_bo->bo);
504 }
505
506 nouveau_bo_unpin(old_bo);
507}
508
509static int
510nouveau_page_flip_emit(struct nouveau_channel *chan, 466nouveau_page_flip_emit(struct nouveau_channel *chan,
511 struct nouveau_bo *old_bo, 467 struct nouveau_bo *old_bo,
512 struct nouveau_bo *new_bo, 468 struct nouveau_bo *new_bo,
@@ -568,6 +524,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
568 struct nouveau_page_flip_state *s; 524 struct nouveau_page_flip_state *s;
569 struct nouveau_channel *chan = NULL; 525 struct nouveau_channel *chan = NULL;
570 struct nouveau_fence *fence; 526 struct nouveau_fence *fence;
527 struct list_head res;
528 struct ttm_validate_buffer res_val[2];
529 struct ww_acquire_ctx ticket;
571 int ret; 530 int ret;
572 531
573 if (!drm->channel) 532 if (!drm->channel)
@@ -577,25 +536,43 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
577 if (!s) 536 if (!s)
578 return -ENOMEM; 537 return -ENOMEM;
579 538
580 /* Don't let the buffers go away while we flip */
581 ret = nouveau_page_flip_reserve(old_bo, new_bo);
582 if (ret)
583 goto fail_free;
584
585 /* Initialize a page flip struct */
586 *s = (struct nouveau_page_flip_state)
587 { { }, event, nouveau_crtc(crtc)->index,
588 fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
589 new_bo->bo.offset };
590
591 /* Choose the channel the flip will be handled in */ 539 /* Choose the channel the flip will be handled in */
540 spin_lock(&old_bo->bo.bdev->fence_lock);
592 fence = new_bo->bo.sync_obj; 541 fence = new_bo->bo.sync_obj;
593 if (fence) 542 if (fence)
594 chan = fence->channel; 543 chan = fence->channel;
595 if (!chan) 544 if (!chan)
596 chan = drm->channel; 545 chan = drm->channel;
546 spin_unlock(&old_bo->bo.bdev->fence_lock);
547
597 mutex_lock(&chan->cli->mutex); 548 mutex_lock(&chan->cli->mutex);
598 549
550 if (new_bo != old_bo) {
551 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
552 if (likely(!ret)) {
553 res_val[0].bo = &old_bo->bo;
554 res_val[1].bo = &new_bo->bo;
555 INIT_LIST_HEAD(&res);
556 list_add_tail(&res_val[0].head, &res);
557 list_add_tail(&res_val[1].head, &res);
558 ret = ttm_eu_reserve_buffers(&ticket, &res);
559 if (ret)
560 nouveau_bo_unpin(new_bo);
561 }
562 } else
563 ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
564
565 if (ret) {
566 mutex_unlock(&chan->cli->mutex);
567 goto fail_free;
568 }
569
570 /* Initialize a page flip struct */
571 *s = (struct nouveau_page_flip_state)
572 { { }, event, nouveau_crtc(crtc)->index,
573 fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
574 new_bo->bo.offset };
575
599 /* Emit a page flip */ 576 /* Emit a page flip */
600 if (nv_device(drm->device)->card_type >= NV_50) { 577 if (nv_device(drm->device)->card_type >= NV_50) {
601 ret = nv50_display_flip_next(crtc, fb, chan, 0); 578 ret = nv50_display_flip_next(crtc, fb, chan, 0);
@@ -613,12 +590,22 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
613 /* Update the crtc struct and cleanup */ 590 /* Update the crtc struct and cleanup */
614 crtc->fb = fb; 591 crtc->fb = fb;
615 592
616 nouveau_page_flip_unreserve(old_bo, new_bo, fence); 593 if (old_bo != new_bo) {
594 ttm_eu_fence_buffer_objects(&ticket, &res, fence);
595 nouveau_bo_unpin(old_bo);
596 } else {
597 nouveau_bo_fence(new_bo, fence);
598 ttm_bo_unreserve(&new_bo->bo);
599 }
617 nouveau_fence_unref(&fence); 600 nouveau_fence_unref(&fence);
618 return 0; 601 return 0;
619 602
620fail_unreserve: 603fail_unreserve:
621 nouveau_page_flip_unreserve(old_bo, new_bo, NULL); 604 if (old_bo != new_bo) {
605 ttm_eu_backoff_reservation(&ticket, &res);
606 nouveau_bo_unpin(new_bo);
607 } else
608 ttm_bo_unreserve(&new_bo->bo);
622fail_free: 609fail_free:
623 kfree(s); 610 kfree(s);
624 return ret; 611 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 383f4e6ea9d1..218a4b522fe5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -702,6 +702,7 @@ driver = {
702 .gem_prime_export = drm_gem_prime_export, 702 .gem_prime_export = drm_gem_prime_export,
703 .gem_prime_import = drm_gem_prime_import, 703 .gem_prime_import = drm_gem_prime_import,
704 .gem_prime_pin = nouveau_gem_prime_pin, 704 .gem_prime_pin = nouveau_gem_prime_pin,
705 .gem_prime_unpin = nouveau_gem_prime_unpin,
705 .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table, 706 .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
706 .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table, 707 .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
707 .gem_prime_vmap = nouveau_gem_prime_vmap, 708 .gem_prime_vmap = nouveau_gem_prime_vmap,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 51fe6406edab..9352010030e9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -289,16 +289,13 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
289 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM); 289 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
290 if (ret) { 290 if (ret) {
291 NV_ERROR(drm, "failed to pin fb: %d\n", ret); 291 NV_ERROR(drm, "failed to pin fb: %d\n", ret);
292 nouveau_bo_ref(NULL, &nvbo); 292 goto out_unref;
293 goto out;
294 } 293 }
295 294
296 ret = nouveau_bo_map(nvbo); 295 ret = nouveau_bo_map(nvbo);
297 if (ret) { 296 if (ret) {
298 NV_ERROR(drm, "failed to map fb: %d\n", ret); 297 NV_ERROR(drm, "failed to map fb: %d\n", ret);
299 nouveau_bo_unpin(nvbo); 298 goto out_unpin;
300 nouveau_bo_ref(NULL, &nvbo);
301 goto out;
302 } 299 }
303 300
304 chan = nouveau_nofbaccel ? NULL : drm->channel; 301 chan = nouveau_nofbaccel ? NULL : drm->channel;
@@ -316,13 +313,14 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
316 info = framebuffer_alloc(0, &pdev->dev); 313 info = framebuffer_alloc(0, &pdev->dev);
317 if (!info) { 314 if (!info) {
318 ret = -ENOMEM; 315 ret = -ENOMEM;
319 goto out_unref; 316 goto out_unlock;
320 } 317 }
321 318
322 ret = fb_alloc_cmap(&info->cmap, 256, 0); 319 ret = fb_alloc_cmap(&info->cmap, 256, 0);
323 if (ret) { 320 if (ret) {
324 ret = -ENOMEM; 321 ret = -ENOMEM;
325 goto out_unref; 322 framebuffer_release(info);
323 goto out_unlock;
326 } 324 }
327 325
328 info->par = fbcon; 326 info->par = fbcon;
@@ -337,7 +335,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
337 fbcon->helper.fbdev = info; 335 fbcon->helper.fbdev = info;
338 336
339 strcpy(info->fix.id, "nouveaufb"); 337 strcpy(info->fix.id, "nouveaufb");
340 if (nouveau_nofbaccel) 338 if (!chan)
341 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED; 339 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
342 else 340 else
343 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | 341 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
@@ -383,8 +381,14 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
383 vga_switcheroo_client_fb_set(dev->pdev, info); 381 vga_switcheroo_client_fb_set(dev->pdev, info);
384 return 0; 382 return 0;
385 383
386out_unref: 384out_unlock:
387 mutex_unlock(&dev->struct_mutex); 385 mutex_unlock(&dev->struct_mutex);
386 if (chan)
387 nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma);
388out_unpin:
389 nouveau_bo_unpin(nvbo);
390out_unref:
391 nouveau_bo_ref(NULL, &nvbo);
388out: 392out:
389 return ret; 393 return ret;
390} 394}
@@ -413,6 +417,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
413 if (nouveau_fb->nvbo) { 417 if (nouveau_fb->nvbo) {
414 nouveau_bo_unmap(nouveau_fb->nvbo); 418 nouveau_bo_unmap(nouveau_fb->nvbo);
415 nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma); 419 nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
420 nouveau_bo_unpin(nouveau_fb->nvbo);
416 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 421 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
417 nouveau_fb->nvbo = NULL; 422 nouveau_fb->nvbo = NULL;
418 } 423 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index c0e324b557c1..e72d09c068a8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -50,7 +50,8 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
50 return; 50 return;
51 nvbo->gem = NULL; 51 nvbo->gem = NULL;
52 52
53 if (unlikely(nvbo->pin_refcnt)) { 53 /* Lockdep hates you for doing reserve with gem object lock held */
54 if (WARN_ON_ONCE(nvbo->pin_refcnt)) {
54 nvbo->pin_refcnt = 1; 55 nvbo->pin_refcnt = 1;
55 nouveau_bo_unpin(nvbo); 56 nouveau_bo_unpin(nvbo);
56 } 57 }
@@ -309,10 +310,12 @@ struct validate_op {
309 struct list_head vram_list; 310 struct list_head vram_list;
310 struct list_head gart_list; 311 struct list_head gart_list;
311 struct list_head both_list; 312 struct list_head both_list;
313 struct ww_acquire_ctx ticket;
312}; 314};
313 315
314static void 316static void
315validate_fini_list(struct list_head *list, struct nouveau_fence *fence) 317validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
318 struct ww_acquire_ctx *ticket)
316{ 319{
317 struct list_head *entry, *tmp; 320 struct list_head *entry, *tmp;
318 struct nouveau_bo *nvbo; 321 struct nouveau_bo *nvbo;
@@ -329,17 +332,24 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
329 332
330 list_del(&nvbo->entry); 333 list_del(&nvbo->entry);
331 nvbo->reserved_by = NULL; 334 nvbo->reserved_by = NULL;
332 ttm_bo_unreserve(&nvbo->bo); 335 ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
333 drm_gem_object_unreference_unlocked(nvbo->gem); 336 drm_gem_object_unreference_unlocked(nvbo->gem);
334 } 337 }
335} 338}
336 339
337static void 340static void
338validate_fini(struct validate_op *op, struct nouveau_fence* fence) 341validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence)
339{ 342{
340 validate_fini_list(&op->vram_list, fence); 343 validate_fini_list(&op->vram_list, fence, &op->ticket);
341 validate_fini_list(&op->gart_list, fence); 344 validate_fini_list(&op->gart_list, fence, &op->ticket);
342 validate_fini_list(&op->both_list, fence); 345 validate_fini_list(&op->both_list, fence, &op->ticket);
346}
347
348static void
349validate_fini(struct validate_op *op, struct nouveau_fence *fence)
350{
351 validate_fini_no_ticket(op, fence);
352 ww_acquire_fini(&op->ticket);
343} 353}
344 354
345static int 355static int
@@ -349,13 +359,11 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
349{ 359{
350 struct nouveau_cli *cli = nouveau_cli(file_priv); 360 struct nouveau_cli *cli = nouveau_cli(file_priv);
351 struct drm_device *dev = chan->drm->dev; 361 struct drm_device *dev = chan->drm->dev;
352 struct nouveau_drm *drm = nouveau_drm(dev);
353 uint32_t sequence;
354 int trycnt = 0; 362 int trycnt = 0;
355 int ret, i; 363 int ret, i;
356 struct nouveau_bo *res_bo = NULL; 364 struct nouveau_bo *res_bo = NULL;
357 365
358 sequence = atomic_add_return(1, &drm->ttm.validate_sequence); 366 ww_acquire_init(&op->ticket, &reservation_ww_class);
359retry: 367retry:
360 if (++trycnt > 100000) { 368 if (++trycnt > 100000) {
361 NV_ERROR(cli, "%s failed and gave up.\n", __func__); 369 NV_ERROR(cli, "%s failed and gave up.\n", __func__);
@@ -370,6 +378,7 @@ retry:
370 gem = drm_gem_object_lookup(dev, file_priv, b->handle); 378 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
371 if (!gem) { 379 if (!gem) {
372 NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle); 380 NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
381 ww_acquire_done(&op->ticket);
373 validate_fini(op, NULL); 382 validate_fini(op, NULL);
374 return -ENOENT; 383 return -ENOENT;
375 } 384 }
@@ -384,21 +393,23 @@ retry:
384 NV_ERROR(cli, "multiple instances of buffer %d on " 393 NV_ERROR(cli, "multiple instances of buffer %d on "
385 "validation list\n", b->handle); 394 "validation list\n", b->handle);
386 drm_gem_object_unreference_unlocked(gem); 395 drm_gem_object_unreference_unlocked(gem);
396 ww_acquire_done(&op->ticket);
387 validate_fini(op, NULL); 397 validate_fini(op, NULL);
388 return -EINVAL; 398 return -EINVAL;
389 } 399 }
390 400
391 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence); 401 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
392 if (ret) { 402 if (ret) {
393 validate_fini(op, NULL); 403 validate_fini_no_ticket(op, NULL);
394 if (unlikely(ret == -EAGAIN)) { 404 if (unlikely(ret == -EDEADLK)) {
395 sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
396 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, 405 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
397 sequence); 406 &op->ticket);
398 if (!ret) 407 if (!ret)
399 res_bo = nvbo; 408 res_bo = nvbo;
400 } 409 }
401 if (unlikely(ret)) { 410 if (unlikely(ret)) {
411 ww_acquire_done(&op->ticket);
412 ww_acquire_fini(&op->ticket);
402 drm_gem_object_unreference_unlocked(gem); 413 drm_gem_object_unreference_unlocked(gem);
403 if (ret != -ERESTARTSYS) 414 if (ret != -ERESTARTSYS)
404 NV_ERROR(cli, "fail reserve\n"); 415 NV_ERROR(cli, "fail reserve\n");
@@ -422,6 +433,7 @@ retry:
422 NV_ERROR(cli, "invalid valid domains: 0x%08x\n", 433 NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
423 b->valid_domains); 434 b->valid_domains);
424 list_add_tail(&nvbo->entry, &op->both_list); 435 list_add_tail(&nvbo->entry, &op->both_list);
436 ww_acquire_done(&op->ticket);
425 validate_fini(op, NULL); 437 validate_fini(op, NULL);
426 return -EINVAL; 438 return -EINVAL;
427 } 439 }
@@ -429,6 +441,7 @@ retry:
429 goto retry; 441 goto retry;
430 } 442 }
431 443
444 ww_acquire_done(&op->ticket);
432 return 0; 445 return 0;
433} 446}
434 447
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 8d7a3f0aeb86..502e4290aa8f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -36,6 +36,7 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
36 struct drm_file *); 36 struct drm_file *);
37 37
38extern int nouveau_gem_prime_pin(struct drm_gem_object *); 38extern int nouveau_gem_prime_pin(struct drm_gem_object *);
39extern void nouveau_gem_prime_unpin(struct drm_gem_object *);
39extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *); 40extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
40extern struct drm_gem_object *nouveau_gem_prime_import_sg_table( 41extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
41 struct drm_device *, size_t size, struct sg_table *); 42 struct drm_device *, size_t size, struct sg_table *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index f53e10874cae..e90468d5e5c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -84,7 +84,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
84int nouveau_gem_prime_pin(struct drm_gem_object *obj) 84int nouveau_gem_prime_pin(struct drm_gem_object *obj)
85{ 85{
86 struct nouveau_bo *nvbo = nouveau_gem_object(obj); 86 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
87 int ret = 0; 87 int ret;
88 88
89 /* pin buffer into GTT */ 89 /* pin buffer into GTT */
90 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT); 90 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
@@ -93,3 +93,10 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
93 93
94 return 0; 94 return 0;
95} 95}
96
97void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
98{
99 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
100
101 nouveau_bo_unpin(nvbo);
102}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index d0382f7e86c8..19e3757291fb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -393,9 +393,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
393 return ret; 393 return ret;
394 } 394 }
395 395
396 drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), 396 drm->ttm.mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 1),
397 pci_resource_len(dev->pdev, 1), 397 pci_resource_len(dev->pdev, 1));
398 DRM_MTRR_WC);
399 398
400 /* GART init */ 399 /* GART init */
401 if (drm->agp.stat != ENABLED) { 400 if (drm->agp.stat != ENABLED) {
@@ -428,10 +427,6 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
428 427
429 nouveau_ttm_global_release(drm); 428 nouveau_ttm_global_release(drm);
430 429
431 if (drm->ttm.mtrr >= 0) { 430 arch_phys_wc_del(drm->ttm.mtrr);
432 drm_mtrr_del(drm->ttm.mtrr, 431 drm->ttm.mtrr = 0;
433 pci_resource_start(drm->dev->pdev, 1),
434 pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC);
435 drm->ttm.mtrr = -1;
436 }
437} 432}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 09f65dc3d2c8..20c41e73d448 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -1,7 +1,7 @@
1 1
2config DRM_OMAP 2config DRM_OMAP
3 tristate "OMAP DRM" 3 tristate "OMAP DRM"
4 depends on DRM && !CONFIG_FB_OMAP2 4 depends on DRM
5 depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM 5 depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
6 depends on OMAP2_DSS 6 depends on OMAP2_DSS
7 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 79b200aee18a..ef161ea982e6 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -253,10 +253,6 @@ static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
253 NULL, NULL); 253 NULL, NULL);
254} 254}
255 255
256static void omap_crtc_load_lut(struct drm_crtc *crtc)
257{
258}
259
260static void vblank_cb(void *arg) 256static void vblank_cb(void *arg)
261{ 257{
262 struct drm_crtc *crtc = arg; 258 struct drm_crtc *crtc = arg;
@@ -366,7 +362,6 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
366 .prepare = omap_crtc_prepare, 362 .prepare = omap_crtc_prepare,
367 .commit = omap_crtc_commit, 363 .commit = omap_crtc_commit,
368 .mode_set_base = omap_crtc_mode_set_base, 364 .mode_set_base = omap_crtc_mode_set_base,
369 .load_lut = omap_crtc_load_lut,
370}; 365};
371 366
372const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc) 367const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index b11ce609fcc2..002988d09021 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -281,21 +281,7 @@ fail:
281 return ret; 281 return ret;
282} 282}
283 283
284static void omap_crtc_fb_gamma_set(struct drm_crtc *crtc,
285 u16 red, u16 green, u16 blue, int regno)
286{
287 DBG("fbdev: set gamma");
288}
289
290static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
291 u16 *red, u16 *green, u16 *blue, int regno)
292{
293 DBG("fbdev: get gamma");
294}
295
296static struct drm_fb_helper_funcs omap_fb_helper_funcs = { 284static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
297 .gamma_set = omap_crtc_fb_gamma_set,
298 .gamma_get = omap_crtc_fb_gamma_get,
299 .fb_probe = omap_fbdev_create, 285 .fb_probe = omap_fbdev_create,
300}; 286};
301 287
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index be7cd97a0db0..4fcca8d42796 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -136,44 +136,21 @@ static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
136 kunmap(pages[page_num]); 136 kunmap(pages[page_num]);
137} 137}
138 138
139/*
140 * TODO maybe we can split up drm_gem_mmap to avoid duplicating
141 * some here.. or at least have a drm_dmabuf_mmap helper.
142 */
143static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, 139static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
144 struct vm_area_struct *vma) 140 struct vm_area_struct *vma)
145{ 141{
146 struct drm_gem_object *obj = buffer->priv; 142 struct drm_gem_object *obj = buffer->priv;
143 struct drm_device *dev = obj->dev;
147 int ret = 0; 144 int ret = 0;
148 145
149 if (WARN_ON(!obj->filp)) 146 if (WARN_ON(!obj->filp))
150 return -EINVAL; 147 return -EINVAL;
151 148
152 /* Check for valid size. */ 149 mutex_lock(&dev->struct_mutex);
153 if (omap_gem_mmap_size(obj) < vma->vm_end - vma->vm_start) { 150 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
154 ret = -EINVAL; 151 mutex_unlock(&dev->struct_mutex);
155 goto out_unlock; 152 if (ret < 0)
156 } 153 return ret;
157
158 if (!obj->dev->driver->gem_vm_ops) {
159 ret = -EINVAL;
160 goto out_unlock;
161 }
162
163 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
164 vma->vm_ops = obj->dev->driver->gem_vm_ops;
165 vma->vm_private_data = obj;
166 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
167
168 /* Take a ref for this mapping of the object, so that the fault
169 * handler can dereference the mmap offset's pointer to the object.
170 * This reference is cleaned up by the corresponding vm_close
171 * (which should happen whether the vma was created by this call, or
172 * by a vm_open due to mremap or partial unmap or whatever).
173 */
174 vma->vm_ops->open(vma);
175
176out_unlock:
177 154
178 return omap_gem_mmap_obj(obj, vma); 155 return omap_gem_mmap_obj(obj, vma);
179} 156}
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index f86771481317..93c2f2cceb51 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -49,6 +49,11 @@ void qxl_ring_free(struct qxl_ring *ring)
49 kfree(ring); 49 kfree(ring);
50} 50}
51 51
52void qxl_ring_init_hdr(struct qxl_ring *ring)
53{
54 ring->ring->header.notify_on_prod = ring->n_elements;
55}
56
52struct qxl_ring * 57struct qxl_ring *
53qxl_ring_create(struct qxl_ring_header *header, 58qxl_ring_create(struct qxl_ring_header *header,
54 int element_size, 59 int element_size,
@@ -69,7 +74,7 @@ qxl_ring_create(struct qxl_ring_header *header,
69 ring->prod_notify = prod_notify; 74 ring->prod_notify = prod_notify;
70 ring->push_event = push_event; 75 ring->push_event = push_event;
71 if (set_prod_notify) 76 if (set_prod_notify)
72 header->notify_on_prod = ring->n_elements; 77 qxl_ring_init_hdr(ring);
73 spin_lock_init(&ring->lock); 78 spin_lock_init(&ring->lock);
74 return ring; 79 return ring;
75} 80}
@@ -87,7 +92,7 @@ static int qxl_check_header(struct qxl_ring *ring)
87 return ret; 92 return ret;
88} 93}
89 94
90static int qxl_check_idle(struct qxl_ring *ring) 95int qxl_check_idle(struct qxl_ring *ring)
91{ 96{
92 int ret; 97 int ret;
93 struct qxl_ring_header *header = &(ring->ring->header); 98 struct qxl_ring_header *header = &(ring->ring->header);
@@ -375,8 +380,8 @@ void qxl_io_destroy_primary(struct qxl_device *qdev)
375 wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC); 380 wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
376} 381}
377 382
378void qxl_io_create_primary(struct qxl_device *qdev, unsigned width, 383void qxl_io_create_primary(struct qxl_device *qdev,
379 unsigned height, unsigned offset, struct qxl_bo *bo) 384 unsigned offset, struct qxl_bo *bo)
380{ 385{
381 struct qxl_surface_create *create; 386 struct qxl_surface_create *create;
382 387
@@ -384,8 +389,8 @@ void qxl_io_create_primary(struct qxl_device *qdev, unsigned width,
384 qdev->ram_header); 389 qdev->ram_header);
385 create = &qdev->ram_header->create_surface; 390 create = &qdev->ram_header->create_surface;
386 create->format = bo->surf.format; 391 create->format = bo->surf.format;
387 create->width = width; 392 create->width = bo->surf.width;
388 create->height = height; 393 create->height = bo->surf.height;
389 create->stride = bo->surf.stride; 394 create->stride = bo->surf.stride;
390 create->mem = qxl_bo_physical_address(qdev, bo, offset); 395 create->mem = qxl_bo_physical_address(qdev, bo, offset);
391 396
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 823d29e926ec..f76f5dd7bfc4 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -30,53 +30,9 @@
30#include "qxl_object.h" 30#include "qxl_object.h"
31#include "drm_crtc_helper.h" 31#include "drm_crtc_helper.h"
32 32
33static void qxl_crtc_set_to_mode(struct qxl_device *qdev, 33static bool qxl_head_enabled(struct qxl_head *head)
34 struct drm_connector *connector,
35 struct qxl_head *head)
36{ 34{
37 struct drm_device *dev = connector->dev; 35 return head->width && head->height;
38 struct drm_display_mode *mode, *t;
39 int width = head->width;
40 int height = head->height;
41
42 if (width < 320 || height < 240) {
43 qxl_io_log(qdev, "%s: bad head: %dx%d", width, height);
44 width = 1024;
45 height = 768;
46 }
47 if (width * height * 4 > 16*1024*1024) {
48 width = 1024;
49 height = 768;
50 }
51 /* TODO: go over regular modes and removed preferred? */
52 list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
53 drm_mode_remove(connector, mode);
54 mode = drm_cvt_mode(dev, width, height, 60, false, false, false);
55 mode->type |= DRM_MODE_TYPE_PREFERRED;
56 mode->status = MODE_OK;
57 drm_mode_probed_add(connector, mode);
58 qxl_io_log(qdev, "%s: %d x %d\n", __func__, width, height);
59}
60
61void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev)
62{
63 struct drm_connector *connector;
64 int i;
65 struct drm_device *dev = qdev->ddev;
66
67 i = 0;
68 qxl_io_log(qdev, "%s: %d, %d\n", __func__,
69 dev->mode_config.num_connector,
70 qdev->monitors_config->count);
71 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
72 if (i > qdev->monitors_config->count) {
73 /* crtc will be reported as disabled */
74 continue;
75 }
76 qxl_crtc_set_to_mode(qdev, connector,
77 &qdev->monitors_config->heads[i]);
78 ++i;
79 }
80} 36}
81 37
82void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count) 38void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
@@ -106,7 +62,6 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
106 int num_monitors; 62 int num_monitors;
107 uint32_t crc; 63 uint32_t crc;
108 64
109 BUG_ON(!qdev->monitors_config);
110 num_monitors = qdev->rom->client_monitors_config.count; 65 num_monitors = qdev->rom->client_monitors_config.count;
111 crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config, 66 crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
112 sizeof(qdev->rom->client_monitors_config)); 67 sizeof(qdev->rom->client_monitors_config));
@@ -117,8 +72,8 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
117 return 1; 72 return 1;
118 } 73 }
119 if (num_monitors > qdev->monitors_config->max_allowed) { 74 if (num_monitors > qdev->monitors_config->max_allowed) {
120 DRM_INFO("client monitors list will be truncated: %d < %d\n", 75 DRM_DEBUG_KMS("client monitors list will be truncated: %d < %d\n",
121 qdev->monitors_config->max_allowed, num_monitors); 76 qdev->monitors_config->max_allowed, num_monitors);
122 num_monitors = qdev->monitors_config->max_allowed; 77 num_monitors = qdev->monitors_config->max_allowed;
123 } else { 78 } else {
124 num_monitors = qdev->rom->client_monitors_config.count; 79 num_monitors = qdev->rom->client_monitors_config.count;
@@ -132,18 +87,15 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
132 &qdev->rom->client_monitors_config.heads[i]; 87 &qdev->rom->client_monitors_config.heads[i];
133 struct qxl_head *client_head = 88 struct qxl_head *client_head =
134 &qdev->client_monitors_config->heads[i]; 89 &qdev->client_monitors_config->heads[i];
135 struct qxl_head *head = &qdev->monitors_config->heads[i]; 90 client_head->x = c_rect->left;
136 client_head->x = head->x = c_rect->left; 91 client_head->y = c_rect->top;
137 client_head->y = head->y = c_rect->top; 92 client_head->width = c_rect->right - c_rect->left;
138 client_head->width = head->width = 93 client_head->height = c_rect->bottom - c_rect->top;
139 c_rect->right - c_rect->left; 94 client_head->surface_id = 0;
140 client_head->height = head->height = 95 client_head->id = i;
141 c_rect->bottom - c_rect->top; 96 client_head->flags = 0;
142 client_head->surface_id = head->surface_id = 0; 97 DRM_DEBUG_KMS("read %dx%d+%d+%d\n", client_head->width, client_head->height,
143 client_head->id = head->id = i; 98 client_head->x, client_head->y);
144 client_head->flags = head->flags = 0;
145 QXL_DEBUG(qdev, "read %dx%d+%d+%d\n", head->width, head->height,
146 head->x, head->y);
147 } 99 }
148 return 0; 100 return 0;
149} 101}
@@ -155,10 +107,7 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
155 qxl_io_log(qdev, "failed crc check for client_monitors_config," 107 qxl_io_log(qdev, "failed crc check for client_monitors_config,"
156 " retrying\n"); 108 " retrying\n");
157 } 109 }
158 qxl_crtc_set_from_monitors_config(qdev); 110 drm_helper_hpd_irq_event(qdev->ddev);
159 /* fire off a uevent and let userspace tell us what to do */
160 qxl_io_log(qdev, "calling drm_sysfs_hotplug_event\n");
161 drm_sysfs_hotplug_event(qdev->ddev);
162} 111}
163 112
164static int qxl_add_monitors_config_modes(struct drm_connector *connector) 113static int qxl_add_monitors_config_modes(struct drm_connector *connector)
@@ -170,9 +119,9 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector)
170 struct drm_display_mode *mode = NULL; 119 struct drm_display_mode *mode = NULL;
171 struct qxl_head *head; 120 struct qxl_head *head;
172 121
173 if (!qdev->monitors_config) 122 if (!qdev->client_monitors_config)
174 return 0; 123 return 0;
175 head = &qdev->monitors_config->heads[h]; 124 head = &qdev->client_monitors_config->heads[h];
176 125
177 mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false, 126 mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
178 false); 127 false);
@@ -222,12 +171,6 @@ static int qxl_add_common_modes(struct drm_connector *connector)
222 return i - 1; 171 return i - 1;
223} 172}
224 173
225static void qxl_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
226 u16 *blue, uint32_t start, uint32_t size)
227{
228 /* TODO */
229}
230
231static void qxl_crtc_destroy(struct drm_crtc *crtc) 174static void qxl_crtc_destroy(struct drm_crtc *crtc)
232{ 175{
233 struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc); 176 struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
@@ -255,11 +198,11 @@ qxl_hide_cursor(struct qxl_device *qdev)
255 qxl_release_unreserve(qdev, release); 198 qxl_release_unreserve(qdev, release);
256} 199}
257 200
258static int qxl_crtc_cursor_set(struct drm_crtc *crtc, 201static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
259 struct drm_file *file_priv, 202 struct drm_file *file_priv,
260 uint32_t handle, 203 uint32_t handle,
261 uint32_t width, 204 uint32_t width,
262 uint32_t height) 205 uint32_t height, int32_t hot_x, int32_t hot_y)
263{ 206{
264 struct drm_device *dev = crtc->dev; 207 struct drm_device *dev = crtc->dev;
265 struct qxl_device *qdev = dev->dev_private; 208 struct qxl_device *qdev = dev->dev_private;
@@ -315,8 +258,8 @@ static int qxl_crtc_cursor_set(struct drm_crtc *crtc,
315 cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; 258 cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
316 cursor->header.width = 64; 259 cursor->header.width = 64;
317 cursor->header.height = 64; 260 cursor->header.height = 64;
318 cursor->header.hot_spot_x = 0; 261 cursor->header.hot_spot_x = hot_x;
319 cursor->header.hot_spot_y = 0; 262 cursor->header.hot_spot_y = hot_y;
320 cursor->data_size = size; 263 cursor->data_size = size;
321 cursor->chunk.next_chunk = 0; 264 cursor->chunk.next_chunk = 0;
322 cursor->chunk.prev_chunk = 0; 265 cursor->chunk.prev_chunk = 0;
@@ -397,9 +340,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
397 340
398 341
399static const struct drm_crtc_funcs qxl_crtc_funcs = { 342static const struct drm_crtc_funcs qxl_crtc_funcs = {
400 .cursor_set = qxl_crtc_cursor_set, 343 .cursor_set2 = qxl_crtc_cursor_set2,
401 .cursor_move = qxl_crtc_cursor_move, 344 .cursor_move = qxl_crtc_cursor_move,
402 .gamma_set = qxl_crtc_gamma_set,
403 .set_config = drm_crtc_helper_set_config, 345 .set_config = drm_crtc_helper_set_config,
404 .destroy = qxl_crtc_destroy, 346 .destroy = qxl_crtc_destroy,
405}; 347};
@@ -506,7 +448,7 @@ qxl_send_monitors_config(struct qxl_device *qdev)
506 for (i = 0 ; i < qdev->monitors_config->count ; ++i) { 448 for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
507 struct qxl_head *head = &qdev->monitors_config->heads[i]; 449 struct qxl_head *head = &qdev->monitors_config->heads[i];
508 450
509 if (head->y > 8192 || head->y < head->x || 451 if (head->y > 8192 || head->x > 8192 ||
510 head->width > 8192 || head->height > 8192) { 452 head->width > 8192 || head->height > 8192) {
511 DRM_ERROR("head %d wrong: %dx%d+%d+%d\n", 453 DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
512 i, head->width, head->height, 454 i, head->width, head->height,
@@ -517,16 +459,19 @@ qxl_send_monitors_config(struct qxl_device *qdev)
517 qxl_io_monitors_config(qdev); 459 qxl_io_monitors_config(qdev);
518} 460}
519 461
520static void qxl_monitors_config_set_single(struct qxl_device *qdev, 462static void qxl_monitors_config_set(struct qxl_device *qdev,
521 unsigned x, unsigned y, 463 int index,
522 unsigned width, unsigned height) 464 unsigned x, unsigned y,
465 unsigned width, unsigned height,
466 unsigned surf_id)
523{ 467{
524 DRM_DEBUG("%dx%d+%d+%d\n", width, height, x, y); 468 DRM_DEBUG_KMS("%d:%dx%d+%d+%d\n", index, width, height, x, y);
525 qdev->monitors_config->count = 1; 469 qdev->monitors_config->heads[index].x = x;
526 qdev->monitors_config->heads[0].x = x; 470 qdev->monitors_config->heads[index].y = y;
527 qdev->monitors_config->heads[0].y = y; 471 qdev->monitors_config->heads[index].width = width;
528 qdev->monitors_config->heads[0].width = width; 472 qdev->monitors_config->heads[index].height = height;
529 qdev->monitors_config->heads[0].height = height; 473 qdev->monitors_config->heads[index].surface_id = surf_id;
474
530} 475}
531 476
532static int qxl_crtc_mode_set(struct drm_crtc *crtc, 477static int qxl_crtc_mode_set(struct drm_crtc *crtc,
@@ -540,10 +485,11 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
540 struct qxl_mode *m = (void *)mode->private; 485 struct qxl_mode *m = (void *)mode->private;
541 struct qxl_framebuffer *qfb; 486 struct qxl_framebuffer *qfb;
542 struct qxl_bo *bo, *old_bo = NULL; 487 struct qxl_bo *bo, *old_bo = NULL;
488 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
543 uint32_t width, height, base_offset; 489 uint32_t width, height, base_offset;
544 bool recreate_primary = false; 490 bool recreate_primary = false;
545 int ret; 491 int ret;
546 492 int surf_id;
547 if (!crtc->fb) { 493 if (!crtc->fb) {
548 DRM_DEBUG_KMS("No FB bound\n"); 494 DRM_DEBUG_KMS("No FB bound\n");
549 return 0; 495 return 0;
@@ -567,7 +513,8 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
567 adjusted_mode->hdisplay, 513 adjusted_mode->hdisplay,
568 adjusted_mode->vdisplay); 514 adjusted_mode->vdisplay);
569 515
570 recreate_primary = true; 516 if (qcrtc->index == 0)
517 recreate_primary = true;
571 518
572 width = mode->hdisplay; 519 width = mode->hdisplay;
573 height = mode->vdisplay; 520 height = mode->vdisplay;
@@ -588,8 +535,11 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
588 "recreate primary: %dx%d (was %dx%d,%d,%d)\n", 535 "recreate primary: %dx%d (was %dx%d,%d,%d)\n",
589 width, height, bo->surf.width, 536 width, height, bo->surf.width,
590 bo->surf.height, bo->surf.stride, bo->surf.format); 537 bo->surf.height, bo->surf.stride, bo->surf.format);
591 qxl_io_create_primary(qdev, width, height, base_offset, bo); 538 qxl_io_create_primary(qdev, base_offset, bo);
592 bo->is_primary = true; 539 bo->is_primary = true;
540 surf_id = 0;
541 } else {
542 surf_id = bo->surface_id;
593 } 543 }
594 544
595 if (old_bo && old_bo != bo) { 545 if (old_bo && old_bo != bo) {
@@ -599,11 +549,9 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
599 qxl_bo_unreserve(old_bo); 549 qxl_bo_unreserve(old_bo);
600 } 550 }
601 551
602 if (qdev->monitors_config->count == 0) { 552 qxl_monitors_config_set(qdev, qcrtc->index, x, y,
603 qxl_monitors_config_set_single(qdev, x, y, 553 mode->hdisplay,
604 mode->hdisplay, 554 mode->vdisplay, surf_id);
605 mode->vdisplay);
606 }
607 return 0; 555 return 0;
608} 556}
609 557
@@ -619,21 +567,36 @@ static void qxl_crtc_commit(struct drm_crtc *crtc)
619 DRM_DEBUG("\n"); 567 DRM_DEBUG("\n");
620} 568}
621 569
622static void qxl_crtc_load_lut(struct drm_crtc *crtc) 570static void qxl_crtc_disable(struct drm_crtc *crtc)
623{ 571{
624 DRM_DEBUG("\n"); 572 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
573 struct drm_device *dev = crtc->dev;
574 struct qxl_device *qdev = dev->dev_private;
575 if (crtc->fb) {
576 struct qxl_framebuffer *qfb = to_qxl_framebuffer(crtc->fb);
577 struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
578 int ret;
579 ret = qxl_bo_reserve(bo, false);
580 qxl_bo_unpin(bo);
581 qxl_bo_unreserve(bo);
582 crtc->fb = NULL;
583 }
584
585 qxl_monitors_config_set(qdev, qcrtc->index, 0, 0, 0, 0, 0);
586
587 qxl_send_monitors_config(qdev);
625} 588}
626 589
627static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = { 590static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
628 .dpms = qxl_crtc_dpms, 591 .dpms = qxl_crtc_dpms,
592 .disable = qxl_crtc_disable,
629 .mode_fixup = qxl_crtc_mode_fixup, 593 .mode_fixup = qxl_crtc_mode_fixup,
630 .mode_set = qxl_crtc_mode_set, 594 .mode_set = qxl_crtc_mode_set,
631 .prepare = qxl_crtc_prepare, 595 .prepare = qxl_crtc_prepare,
632 .commit = qxl_crtc_commit, 596 .commit = qxl_crtc_commit,
633 .load_lut = qxl_crtc_load_lut,
634}; 597};
635 598
636static int qdev_crtc_init(struct drm_device *dev, int num_crtc) 599static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
637{ 600{
638 struct qxl_crtc *qxl_crtc; 601 struct qxl_crtc *qxl_crtc;
639 602
@@ -642,7 +605,7 @@ static int qdev_crtc_init(struct drm_device *dev, int num_crtc)
642 return -ENOMEM; 605 return -ENOMEM;
643 606
644 drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs); 607 drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
645 608 qxl_crtc->index = crtc_id;
646 drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256); 609 drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
647 drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs); 610 drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
648 return 0; 611 return 0;
@@ -670,18 +633,13 @@ static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev,
670 struct drm_encoder *encoder) 633 struct drm_encoder *encoder)
671{ 634{
672 int i; 635 int i;
636 struct qxl_output *output = drm_encoder_to_qxl_output(encoder);
673 struct qxl_head *head; 637 struct qxl_head *head;
674 struct drm_display_mode *mode; 638 struct drm_display_mode *mode;
675 639
676 BUG_ON(!encoder); 640 BUG_ON(!encoder);
677 /* TODO: ugly, do better */ 641 /* TODO: ugly, do better */
678 for (i = 0 ; (encoder->possible_crtcs != (1 << i)) && i < 32; ++i) 642 i = output->index;
679 ;
680 if (encoder->possible_crtcs != (1 << i)) {
681 DRM_ERROR("encoder has wrong possible_crtcs: %x\n",
682 encoder->possible_crtcs);
683 return;
684 }
685 if (!qdev->monitors_config || 643 if (!qdev->monitors_config ||
686 qdev->monitors_config->max_allowed <= i) { 644 qdev->monitors_config->max_allowed <= i) {
687 DRM_ERROR( 645 DRM_ERROR(
@@ -699,7 +657,6 @@ static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev,
699 DRM_DEBUG("missing for multiple monitors: no head holes\n"); 657 DRM_DEBUG("missing for multiple monitors: no head holes\n");
700 head = &qdev->monitors_config->heads[i]; 658 head = &qdev->monitors_config->heads[i];
701 head->id = i; 659 head->id = i;
702 head->surface_id = 0;
703 if (encoder->crtc->enabled) { 660 if (encoder->crtc->enabled) {
704 mode = &encoder->crtc->mode; 661 mode = &encoder->crtc->mode;
705 head->width = mode->hdisplay; 662 head->width = mode->hdisplay;
@@ -714,8 +671,8 @@ static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev,
714 head->x = 0; 671 head->x = 0;
715 head->y = 0; 672 head->y = 0;
716 } 673 }
717 DRM_DEBUG("setting head %d to +%d+%d %dx%d\n", 674 DRM_DEBUG_KMS("setting head %d to +%d+%d %dx%d out of %d\n",
718 i, head->x, head->y, head->width, head->height); 675 i, head->x, head->y, head->width, head->height, qdev->monitors_config->count);
719 head->flags = 0; 676 head->flags = 0;
720 /* TODO - somewhere else to call this for multiple monitors 677 /* TODO - somewhere else to call this for multiple monitors
721 * (config_commit?) */ 678 * (config_commit?) */
@@ -810,8 +767,9 @@ static enum drm_connector_status qxl_conn_detect(
810 767
811 /* The first monitor is always connected */ 768 /* The first monitor is always connected */
812 connected = (output->index == 0) || 769 connected = (output->index == 0) ||
813 (qdev->monitors_config && 770 (qdev->client_monitors_config &&
814 qdev->monitors_config->count > output->index); 771 qdev->client_monitors_config->count > output->index &&
772 qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
815 773
816 DRM_DEBUG("\n"); 774 DRM_DEBUG("\n");
817 return connected ? connector_status_connected 775 return connected ? connector_status_connected
@@ -875,6 +833,8 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
875 drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs, 833 drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
876 DRM_MODE_ENCODER_VIRTUAL); 834 DRM_MODE_ENCODER_VIRTUAL);
877 835
836 /* we get HPD via client monitors config */
837 connector->polled = DRM_CONNECTOR_POLL_HPD;
878 encoder->possible_crtcs = 1 << num_output; 838 encoder->possible_crtcs = 1 << num_output;
879 drm_mode_connector_attach_encoder(&qxl_output->base, 839 drm_mode_connector_attach_encoder(&qxl_output->base,
880 &qxl_output->enc); 840 &qxl_output->enc);
@@ -914,16 +874,14 @@ static const struct drm_mode_config_funcs qxl_mode_funcs = {
914 .fb_create = qxl_user_framebuffer_create, 874 .fb_create = qxl_user_framebuffer_create,
915}; 875};
916 876
917int qxl_modeset_init(struct qxl_device *qdev) 877int qxl_create_monitors_object(struct qxl_device *qdev)
918{ 878{
919 int i;
920 int ret; 879 int ret;
921 struct drm_gem_object *gobj; 880 struct drm_gem_object *gobj;
922 int max_allowed = QXL_NUM_OUTPUTS; 881 int max_allowed = qxl_num_crtc;
923 int monitors_config_size = sizeof(struct qxl_monitors_config) + 882 int monitors_config_size = sizeof(struct qxl_monitors_config) +
924 max_allowed * sizeof(struct qxl_head); 883 max_allowed * sizeof(struct qxl_head);
925 884
926 drm_mode_config_init(qdev->ddev);
927 ret = qxl_gem_object_create(qdev, monitors_config_size, 0, 885 ret = qxl_gem_object_create(qdev, monitors_config_size, 0,
928 QXL_GEM_DOMAIN_VRAM, 886 QXL_GEM_DOMAIN_VRAM,
929 false, false, NULL, &gobj); 887 false, false, NULL, &gobj);
@@ -932,13 +890,59 @@ int qxl_modeset_init(struct qxl_device *qdev)
932 return -ENOMEM; 890 return -ENOMEM;
933 } 891 }
934 qdev->monitors_config_bo = gem_to_qxl_bo(gobj); 892 qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
893
894 ret = qxl_bo_reserve(qdev->monitors_config_bo, false);
895 if (ret)
896 return ret;
897
898 ret = qxl_bo_pin(qdev->monitors_config_bo, QXL_GEM_DOMAIN_VRAM, NULL);
899 if (ret) {
900 qxl_bo_unreserve(qdev->monitors_config_bo);
901 return ret;
902 }
903
904 qxl_bo_unreserve(qdev->monitors_config_bo);
905
935 qxl_bo_kmap(qdev->monitors_config_bo, NULL); 906 qxl_bo_kmap(qdev->monitors_config_bo, NULL);
907
936 qdev->monitors_config = qdev->monitors_config_bo->kptr; 908 qdev->monitors_config = qdev->monitors_config_bo->kptr;
937 qdev->ram_header->monitors_config = 909 qdev->ram_header->monitors_config =
938 qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0); 910 qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
939 911
940 memset(qdev->monitors_config, 0, monitors_config_size); 912 memset(qdev->monitors_config, 0, monitors_config_size);
941 qdev->monitors_config->max_allowed = max_allowed; 913 qdev->monitors_config->max_allowed = max_allowed;
914 return 0;
915}
916
917int qxl_destroy_monitors_object(struct qxl_device *qdev)
918{
919 int ret;
920
921 qdev->monitors_config = NULL;
922 qdev->ram_header->monitors_config = 0;
923
924 qxl_bo_kunmap(qdev->monitors_config_bo);
925 ret = qxl_bo_reserve(qdev->monitors_config_bo, false);
926 if (ret)
927 return ret;
928
929 qxl_bo_unpin(qdev->monitors_config_bo);
930 qxl_bo_unreserve(qdev->monitors_config_bo);
931
932 qxl_bo_unref(&qdev->monitors_config_bo);
933 return 0;
934}
935
936int qxl_modeset_init(struct qxl_device *qdev)
937{
938 int i;
939 int ret;
940
941 drm_mode_config_init(qdev->ddev);
942
943 ret = qxl_create_monitors_object(qdev);
944 if (ret)
945 return ret;
942 946
943 qdev->ddev->mode_config.funcs = (void *)&qxl_mode_funcs; 947 qdev->ddev->mode_config.funcs = (void *)&qxl_mode_funcs;
944 948
@@ -949,7 +953,7 @@ int qxl_modeset_init(struct qxl_device *qdev)
949 qdev->ddev->mode_config.max_height = 8192; 953 qdev->ddev->mode_config.max_height = 8192;
950 954
951 qdev->ddev->mode_config.fb_base = qdev->vram_base; 955 qdev->ddev->mode_config.fb_base = qdev->vram_base;
952 for (i = 0 ; i < QXL_NUM_OUTPUTS; ++i) { 956 for (i = 0 ; i < qxl_num_crtc; ++i) {
953 qdev_crtc_init(qdev->ddev, i); 957 qdev_crtc_init(qdev->ddev, i);
954 qdev_output_init(qdev->ddev, i); 958 qdev_output_init(qdev->ddev, i);
955 } 959 }
@@ -966,6 +970,8 @@ int qxl_modeset_init(struct qxl_device *qdev)
966void qxl_modeset_fini(struct qxl_device *qdev) 970void qxl_modeset_fini(struct qxl_device *qdev)
967{ 971{
968 qxl_fbdev_fini(qdev); 972 qxl_fbdev_fini(qdev);
973
974 qxl_destroy_monitors_object(qdev);
969 if (qdev->mode_info.mode_config_initialized) { 975 if (qdev->mode_info.mode_config_initialized) {
970 drm_mode_config_cleanup(qdev->ddev); 976 drm_mode_config_cleanup(qdev->ddev);
971 qdev->mode_info.mode_config_initialized = false; 977 qdev->mode_info.mode_config_initialized = false;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index aa291d8a98a2..df0b577a6608 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -33,8 +33,9 @@
33 33
34#include "drmP.h" 34#include "drmP.h"
35#include "drm/drm.h" 35#include "drm/drm.h"
36 36#include "drm_crtc_helper.h"
37#include "qxl_drv.h" 37#include "qxl_drv.h"
38#include "qxl_object.h"
38 39
39extern int qxl_max_ioctls; 40extern int qxl_max_ioctls;
40static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { 41static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
@@ -47,10 +48,14 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
47MODULE_DEVICE_TABLE(pci, pciidlist); 48MODULE_DEVICE_TABLE(pci, pciidlist);
48 49
49static int qxl_modeset = -1; 50static int qxl_modeset = -1;
51int qxl_num_crtc = 4;
50 52
51MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); 53MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
52module_param_named(modeset, qxl_modeset, int, 0400); 54module_param_named(modeset, qxl_modeset, int, 0400);
53 55
56MODULE_PARM_DESC(num_heads, "Number of virtual crtcs to expose (default 4)");
57module_param_named(num_heads, qxl_num_crtc, int, 0400);
58
54static struct drm_driver qxl_driver; 59static struct drm_driver qxl_driver;
55static struct pci_driver qxl_pci_driver; 60static struct pci_driver qxl_pci_driver;
56 61
@@ -73,13 +78,6 @@ qxl_pci_remove(struct pci_dev *pdev)
73 drm_put_dev(dev); 78 drm_put_dev(dev);
74} 79}
75 80
76static struct pci_driver qxl_pci_driver = {
77 .name = DRIVER_NAME,
78 .id_table = pciidlist,
79 .probe = qxl_pci_probe,
80 .remove = qxl_pci_remove,
81};
82
83static const struct file_operations qxl_fops = { 81static const struct file_operations qxl_fops = {
84 .owner = THIS_MODULE, 82 .owner = THIS_MODULE,
85 .open = drm_open, 83 .open = drm_open,
@@ -90,6 +88,130 @@ static const struct file_operations qxl_fops = {
90 .mmap = qxl_mmap, 88 .mmap = qxl_mmap,
91}; 89};
92 90
91static int qxl_drm_freeze(struct drm_device *dev)
92{
93 struct pci_dev *pdev = dev->pdev;
94 struct qxl_device *qdev = dev->dev_private;
95 struct drm_crtc *crtc;
96
97 drm_kms_helper_poll_disable(dev);
98
99 console_lock();
100 qxl_fbdev_set_suspend(qdev, 1);
101 console_unlock();
102
103 /* unpin the front buffers */
104 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
105 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
106 if (crtc->enabled)
107 (*crtc_funcs->disable)(crtc);
108 }
109
110 qxl_destroy_monitors_object(qdev);
111 qxl_surf_evict(qdev);
112 qxl_vram_evict(qdev);
113
114 while (!qxl_check_idle(qdev->command_ring));
115 while (!qxl_check_idle(qdev->release_ring))
116 qxl_queue_garbage_collect(qdev, 1);
117
118 pci_save_state(pdev);
119
120 return 0;
121}
122
123static int qxl_drm_resume(struct drm_device *dev, bool thaw)
124{
125 struct qxl_device *qdev = dev->dev_private;
126
127 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
128 if (!thaw) {
129 qxl_reinit_memslots(qdev);
130 qxl_ring_init_hdr(qdev->release_ring);
131 }
132
133 qxl_create_monitors_object(qdev);
134 drm_helper_resume_force_mode(dev);
135
136 console_lock();
137 qxl_fbdev_set_suspend(qdev, 0);
138 console_unlock();
139
140 drm_kms_helper_poll_enable(dev);
141 return 0;
142}
143
144static int qxl_pm_suspend(struct device *dev)
145{
146 struct pci_dev *pdev = to_pci_dev(dev);
147 struct drm_device *drm_dev = pci_get_drvdata(pdev);
148 int error;
149
150 error = qxl_drm_freeze(drm_dev);
151 if (error)
152 return error;
153
154 pci_disable_device(pdev);
155 pci_set_power_state(pdev, PCI_D3hot);
156 return 0;
157}
158
159static int qxl_pm_resume(struct device *dev)
160{
161 struct pci_dev *pdev = to_pci_dev(dev);
162 struct drm_device *drm_dev = pci_get_drvdata(pdev);
163
164 pci_set_power_state(pdev, PCI_D0);
165 pci_restore_state(pdev);
166 if (pci_enable_device(pdev)) {
167 return -EIO;
168 }
169
170 return qxl_drm_resume(drm_dev, false);
171}
172
173static int qxl_pm_thaw(struct device *dev)
174{
175 struct pci_dev *pdev = to_pci_dev(dev);
176 struct drm_device *drm_dev = pci_get_drvdata(pdev);
177
178 return qxl_drm_resume(drm_dev, true);
179}
180
181static int qxl_pm_freeze(struct device *dev)
182{
183 struct pci_dev *pdev = to_pci_dev(dev);
184 struct drm_device *drm_dev = pci_get_drvdata(pdev);
185
186 return qxl_drm_freeze(drm_dev);
187}
188
189static int qxl_pm_restore(struct device *dev)
190{
191 struct pci_dev *pdev = to_pci_dev(dev);
192 struct drm_device *drm_dev = pci_get_drvdata(pdev);
193 struct qxl_device *qdev = drm_dev->dev_private;
194
195 qxl_io_reset(qdev);
196 return qxl_drm_resume(drm_dev, false);
197}
198
199static const struct dev_pm_ops qxl_pm_ops = {
200 .suspend = qxl_pm_suspend,
201 .resume = qxl_pm_resume,
202 .freeze = qxl_pm_freeze,
203 .thaw = qxl_pm_thaw,
204 .poweroff = qxl_pm_freeze,
205 .restore = qxl_pm_restore,
206};
207static struct pci_driver qxl_pci_driver = {
208 .name = DRIVER_NAME,
209 .id_table = pciidlist,
210 .probe = qxl_pci_probe,
211 .remove = qxl_pci_remove,
212 .driver.pm = &qxl_pm_ops,
213};
214
93static struct drm_driver qxl_driver = { 215static struct drm_driver qxl_driver = {
94 .driver_features = DRIVER_GEM | DRIVER_MODESET | 216 .driver_features = DRIVER_GEM | DRIVER_MODESET |
95 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 217 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 43d06ab28a21..aacb791464a3 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -55,11 +55,10 @@
55#define DRIVER_MINOR 1 55#define DRIVER_MINOR 1
56#define DRIVER_PATCHLEVEL 0 56#define DRIVER_PATCHLEVEL 0
57 57
58#define QXL_NUM_OUTPUTS 1
59
60#define QXL_DEBUGFS_MAX_COMPONENTS 32 58#define QXL_DEBUGFS_MAX_COMPONENTS 32
61 59
62extern int qxl_log_level; 60extern int qxl_log_level;
61extern int qxl_num_crtc;
63 62
64enum { 63enum {
65 QXL_INFO_LEVEL = 1, 64 QXL_INFO_LEVEL = 1,
@@ -139,6 +138,7 @@ struct qxl_reloc_list {
139 138
140struct qxl_crtc { 139struct qxl_crtc {
141 struct drm_crtc base; 140 struct drm_crtc base;
141 int index;
142 int cur_x; 142 int cur_x;
143 int cur_y; 143 int cur_y;
144}; 144};
@@ -156,7 +156,7 @@ struct qxl_framebuffer {
156 156
157#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base) 157#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
158#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base) 158#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
159#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, base) 159#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
160#define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base) 160#define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base)
161 161
162struct qxl_mman { 162struct qxl_mman {
@@ -331,6 +331,10 @@ void qxl_modeset_fini(struct qxl_device *qdev);
331int qxl_bo_init(struct qxl_device *qdev); 331int qxl_bo_init(struct qxl_device *qdev);
332void qxl_bo_fini(struct qxl_device *qdev); 332void qxl_bo_fini(struct qxl_device *qdev);
333 333
334void qxl_reinit_memslots(struct qxl_device *qdev);
335int qxl_surf_evict(struct qxl_device *qdev);
336int qxl_vram_evict(struct qxl_device *qdev);
337
334struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header, 338struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header,
335 int element_size, 339 int element_size,
336 int n_elements, 340 int n_elements,
@@ -338,6 +342,8 @@ struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header,
338 bool set_prod_notify, 342 bool set_prod_notify,
339 wait_queue_head_t *push_event); 343 wait_queue_head_t *push_event);
340void qxl_ring_free(struct qxl_ring *ring); 344void qxl_ring_free(struct qxl_ring *ring);
345void qxl_ring_init_hdr(struct qxl_ring *ring);
346int qxl_check_idle(struct qxl_ring *ring);
341 347
342static inline void * 348static inline void *
343qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical) 349qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical)
@@ -365,6 +371,7 @@ void qxl_fbdev_fini(struct qxl_device *qdev);
365int qxl_get_handle_for_primary_fb(struct qxl_device *qdev, 371int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
366 struct drm_file *file_priv, 372 struct drm_file *file_priv,
367 uint32_t *handle); 373 uint32_t *handle);
374void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state);
368 375
369/* qxl_display.c */ 376/* qxl_display.c */
370int 377int
@@ -374,6 +381,8 @@ qxl_framebuffer_init(struct drm_device *dev,
374 struct drm_gem_object *obj); 381 struct drm_gem_object *obj);
375void qxl_display_read_client_monitors_config(struct qxl_device *qdev); 382void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
376void qxl_send_monitors_config(struct qxl_device *qdev); 383void qxl_send_monitors_config(struct qxl_device *qdev);
384int qxl_create_monitors_object(struct qxl_device *qdev);
385int qxl_destroy_monitors_object(struct qxl_device *qdev);
377 386
378/* used by qxl_debugfs only */ 387/* used by qxl_debugfs only */
379void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev); 388void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev);
@@ -435,7 +444,7 @@ void qxl_update_screen(struct qxl_device *qxl);
435/* qxl io operations (qxl_cmd.c) */ 444/* qxl io operations (qxl_cmd.c) */
436 445
437void qxl_io_create_primary(struct qxl_device *qdev, 446void qxl_io_create_primary(struct qxl_device *qdev,
438 unsigned width, unsigned height, unsigned offset, 447 unsigned offset,
439 struct qxl_bo *bo); 448 struct qxl_bo *bo);
440void qxl_io_destroy_primary(struct qxl_device *qdev); 449void qxl_io_destroy_primary(struct qxl_device *qdev);
441void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id); 450void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
@@ -528,6 +537,7 @@ irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS);
528 537
529/* qxl_fb.c */ 538/* qxl_fb.c */
530int qxl_fb_init(struct qxl_device *qdev); 539int qxl_fb_init(struct qxl_device *qdev);
540bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj);
531 541
532int qxl_debugfs_add_files(struct qxl_device *qdev, 542int qxl_debugfs_add_files(struct qxl_device *qdev,
533 struct drm_info_list *files, 543 struct drm_info_list *files,
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index b3c51275df5c..76f39d88d684 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -520,10 +520,6 @@ static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
520} 520}
521 521
522static struct drm_fb_helper_funcs qxl_fb_helper_funcs = { 522static struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
523 /* TODO
524 .gamma_set = qxl_crtc_fb_gamma_set,
525 .gamma_get = qxl_crtc_fb_gamma_get,
526 */
527 .fb_probe = qxl_fb_find_or_create_single, 523 .fb_probe = qxl_fb_find_or_create_single,
528}; 524};
529 525
@@ -542,7 +538,7 @@ int qxl_fbdev_init(struct qxl_device *qdev)
542 qfbdev->helper.funcs = &qxl_fb_helper_funcs; 538 qfbdev->helper.funcs = &qxl_fb_helper_funcs;
543 539
544 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, 540 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
545 1 /* num_crtc - QXL supports just 1 */, 541 qxl_num_crtc /* num_crtc - QXL supports just 1 */,
546 QXLFB_CONN_LIMIT); 542 QXLFB_CONN_LIMIT);
547 if (ret) { 543 if (ret) {
548 kfree(qfbdev); 544 kfree(qfbdev);
@@ -564,4 +560,14 @@ void qxl_fbdev_fini(struct qxl_device *qdev)
564 qdev->mode_info.qfbdev = NULL; 560 qdev->mode_info.qfbdev = NULL;
565} 561}
566 562
563void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
564{
565 fb_set_suspend(qdev->mode_info.qfbdev->helper.fbdev, state);
566}
567 567
568bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
569{
570 if (qobj == gem_to_qxl_bo(qdev->mode_info.qfbdev->qfb.obj))
571 return true;
572 return false;
573}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index a4b71b25fa53..6ba49d9922f2 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -183,6 +183,12 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
183 /* TODO copy slow path code from i915 */ 183 /* TODO copy slow path code from i915 */
184 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); 184 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
185 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size); 185 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
186
187 {
188 struct qxl_drawable *draw = fb_cmd;
189
190 draw->mm_time = qdev->rom->mm_clock;
191 }
186 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); 192 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
187 if (unwritten) { 193 if (unwritten) {
188 DRM_ERROR("got unwritten %d\n", unwritten); 194 DRM_ERROR("got unwritten %d\n", unwritten);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index e27ce2a907cf..9e8da9ee9731 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -26,6 +26,7 @@
26#include "qxl_drv.h" 26#include "qxl_drv.h"
27#include "qxl_object.h" 27#include "qxl_object.h"
28 28
29#include <drm/drm_crtc_helper.h>
29#include <linux/io-mapping.h> 30#include <linux/io-mapping.h>
30 31
31int qxl_log_level; 32int qxl_log_level;
@@ -72,21 +73,28 @@ static bool qxl_check_device(struct qxl_device *qdev)
72 return true; 73 return true;
73} 74}
74 75
76static void setup_hw_slot(struct qxl_device *qdev, int slot_index,
77 struct qxl_memslot *slot)
78{
79 qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
80 qdev->ram_header->mem_slot.mem_end = slot->end_phys_addr;
81 qxl_io_memslot_add(qdev, slot_index);
82}
83
75static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset, 84static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
76 unsigned long start_phys_addr, unsigned long end_phys_addr) 85 unsigned long start_phys_addr, unsigned long end_phys_addr)
77{ 86{
78 uint64_t high_bits; 87 uint64_t high_bits;
79 struct qxl_memslot *slot; 88 struct qxl_memslot *slot;
80 uint8_t slot_index; 89 uint8_t slot_index;
81 struct qxl_ram_header *ram_header = qdev->ram_header;
82 90
83 slot_index = qdev->rom->slots_start + slot_index_offset; 91 slot_index = qdev->rom->slots_start + slot_index_offset;
84 slot = &qdev->mem_slots[slot_index]; 92 slot = &qdev->mem_slots[slot_index];
85 slot->start_phys_addr = start_phys_addr; 93 slot->start_phys_addr = start_phys_addr;
86 slot->end_phys_addr = end_phys_addr; 94 slot->end_phys_addr = end_phys_addr;
87 ram_header->mem_slot.mem_start = slot->start_phys_addr; 95
88 ram_header->mem_slot.mem_end = slot->end_phys_addr; 96 setup_hw_slot(qdev, slot_index, slot);
89 qxl_io_memslot_add(qdev, slot_index); 97
90 slot->generation = qdev->rom->slot_generation; 98 slot->generation = qdev->rom->slot_generation;
91 high_bits = slot_index << qdev->slot_gen_bits; 99 high_bits = slot_index << qdev->slot_gen_bits;
92 high_bits |= slot->generation; 100 high_bits |= slot->generation;
@@ -95,6 +103,12 @@ static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
95 return slot_index; 103 return slot_index;
96} 104}
97 105
106void qxl_reinit_memslots(struct qxl_device *qdev)
107{
108 setup_hw_slot(qdev, qdev->main_mem_slot, &qdev->mem_slots[qdev->main_mem_slot]);
109 setup_hw_slot(qdev, qdev->surfaces_mem_slot, &qdev->mem_slots[qdev->surfaces_mem_slot]);
110}
111
98static void qxl_gc_work(struct work_struct *work) 112static void qxl_gc_work(struct work_struct *work)
99{ 113{
100 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work); 114 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
@@ -294,6 +308,8 @@ int qxl_driver_load(struct drm_device *dev, unsigned long flags)
294 goto out; 308 goto out;
295 } 309 }
296 310
311 drm_kms_helper_poll_init(qdev->ddev);
312
297 return 0; 313 return 0;
298out: 314out:
299 kfree(qdev); 315 kfree(qdev);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index d9b12e7bc6e1..1191fe7788c9 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -363,3 +363,13 @@ int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
363 return ret; 363 return ret;
364 return 0; 364 return 0;
365} 365}
366
367int qxl_surf_evict(struct qxl_device *qdev)
368{
369 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
370}
371
372int qxl_vram_evict(struct qxl_device *qdev)
373{
374 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
375}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index b4fd89fbd8b7..ee7ad79ce781 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -57,11 +57,6 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
57 return bo->tbo.num_pages << PAGE_SHIFT; 57 return bo->tbo.num_pages << PAGE_SHIFT;
58} 58}
59 59
60static inline bool qxl_bo_is_reserved(struct qxl_bo *bo)
61{
62 return !!atomic_read(&bo->tbo.reserved);
63}
64
65static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) 60static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
66{ 61{
67 return bo->tbo.addr_space_offset; 62 return bo->tbo.addr_space_offset;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 86c5e3611892..c3df52c1a60c 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -76,7 +76,10 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ 76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ 77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ 78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
79 si_blit_shaders.o radeon_prime.o radeon_uvd.o 79 si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \
80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o
80 83
81radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 84radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
82radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 85radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
index ca4b038050d2..06192698bd96 100644
--- a/drivers/gpu/drm/radeon/ObjectID.h
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -69,6 +69,8 @@
69#define ENCODER_OBJECT_ID_ALMOND 0x22 69#define ENCODER_OBJECT_ID_ALMOND 0x22
70#define ENCODER_OBJECT_ID_TRAVIS 0x23 70#define ENCODER_OBJECT_ID_TRAVIS 0x23
71#define ENCODER_OBJECT_ID_NUTMEG 0x22 71#define ENCODER_OBJECT_ID_NUTMEG 0x22
72#define ENCODER_OBJECT_ID_HDMI_ANX9805 0x26
73
72/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */ 74/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
73#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 75#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
74#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 76#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
@@ -86,6 +88,8 @@
86#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20 88#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20
87#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21 89#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21
88#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24 90#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
91#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
92#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
89 93
90#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF 94#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
91 95
@@ -364,6 +368,14 @@
364 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 368 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
365 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT) 369 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
366 370
371#define ENCODER_INTERNAL_UNIPHY3_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
372 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
373 ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 << OBJECT_ID_SHIFT)
374
375#define ENCODER_INTERNAL_UNIPHY3_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
376 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
377 ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 << OBJECT_ID_SHIFT)
378
367#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 379#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
368 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 380 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
369 ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) 381 ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
@@ -392,6 +404,10 @@
392 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 404 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
393 ENCODER_OBJECT_ID_INTERNAL_VCE << OBJECT_ID_SHIFT) 405 ENCODER_OBJECT_ID_INTERNAL_VCE << OBJECT_ID_SHIFT)
394 406
407#define ENCODER_HDMI_ANX9805_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
408 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
409 ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
410
395/****************************************************/ 411/****************************************************/
396/* Connector Object ID definition - Shared with BIOS */ 412/* Connector Object ID definition - Shared with BIOS */
397/****************************************************/ 413/****************************************************/
@@ -461,6 +477,14 @@
461 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ 477 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
462 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) 478 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
463 479
480#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
481 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
482 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
483
484#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
485 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
486 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
487
464#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 488#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
465 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 489 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
466 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) 490 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
@@ -473,6 +497,10 @@
473 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ 497 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
474 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) 498 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
475 499
500#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
501 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
502 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
503
476#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 504#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
477 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 505 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
478 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) 506 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
@@ -541,6 +569,18 @@
541 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ 569 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
542 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT) 570 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
543 571
572#define CONNECTOR_HDMI_TYPE_A_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
573 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
574 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
575
576#define CONNECTOR_HDMI_TYPE_A_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
577 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
578 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
579
580#define CONNECTOR_HDMI_TYPE_A_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
581 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
582 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
583
544#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 584#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
545 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 585 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
546 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT) 586 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 0ee573743de9..16b120c3f144 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -74,6 +74,8 @@
74#define ATOM_PPLL2 1 74#define ATOM_PPLL2 1
75#define ATOM_DCPLL 2 75#define ATOM_DCPLL 2
76#define ATOM_PPLL0 2 76#define ATOM_PPLL0 2
77#define ATOM_PPLL3 3
78
77#define ATOM_EXT_PLL1 8 79#define ATOM_EXT_PLL1 8
78#define ATOM_EXT_PLL2 9 80#define ATOM_EXT_PLL2 9
79#define ATOM_EXT_CLOCK 10 81#define ATOM_EXT_CLOCK 10
@@ -259,7 +261,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
259 USHORT AdjustDisplayPll; //Atomic Table, used by various SW componentes. 261 USHORT AdjustDisplayPll; //Atomic Table, used by various SW componentes.
260 USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock 262 USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
261 USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios 263 USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
262 USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios 264 USHORT SetUniphyInstance; //Atomic Table, only used by Bios
263 USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2 265 USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
264 USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3 266 USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
265 USHORT HW_Misc_Operation; //Atomic Table, directly used by various SW components,latest version 1.1 267 USHORT HW_Misc_Operation; //Atomic Table, directly used by various SW components,latest version 1.1
@@ -271,7 +273,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
271 USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1 273 USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
272 USHORT PatchMCSetting; //only used by BIOS 274 USHORT PatchMCSetting; //only used by BIOS
273 USHORT MC_SEQ_Control; //only used by BIOS 275 USHORT MC_SEQ_Control; //only used by BIOS
274 USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead 276 USHORT Gfx_Harvesting; //Atomic Table, Obsolete from Ry6xx, Now only used by BIOS for GFX harvesting
275 USHORT EnableScaler; //Atomic Table, used only by Bios 277 USHORT EnableScaler; //Atomic Table, used only by Bios
276 USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1 278 USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
277 USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1 279 USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
@@ -328,7 +330,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
328#define UNIPHYTransmitterControl DIG1TransmitterControl 330#define UNIPHYTransmitterControl DIG1TransmitterControl
329#define LVTMATransmitterControl DIG2TransmitterControl 331#define LVTMATransmitterControl DIG2TransmitterControl
330#define SetCRTC_DPM_State GetConditionalGoldenSetting 332#define SetCRTC_DPM_State GetConditionalGoldenSetting
331#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange 333#define ASIC_StaticPwrMgtStatusChange SetUniphyInstance
332#define HPDInterruptService ReadHWAssistedI2CStatus 334#define HPDInterruptService ReadHWAssistedI2CStatus
333#define EnableVGA_Access GetSCLKOverMCLKRatio 335#define EnableVGA_Access GetSCLKOverMCLKRatio
334#define EnableYUV GetDispObjectInfo 336#define EnableYUV GetDispObjectInfo
@@ -338,7 +340,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
338#define TMDSAEncoderControl PatchMCSetting 340#define TMDSAEncoderControl PatchMCSetting
339#define LVDSEncoderControl MC_SEQ_Control 341#define LVDSEncoderControl MC_SEQ_Control
340#define LCD1OutputControl HW_Misc_Operation 342#define LCD1OutputControl HW_Misc_Operation
341 343#define TV1OutputControl Gfx_Harvesting
342 344
343typedef struct _ATOM_MASTER_COMMAND_TABLE 345typedef struct _ATOM_MASTER_COMMAND_TABLE
344{ 346{
@@ -478,11 +480,11 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
478typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 480typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
479{ 481{
480#if ATOM_BIG_ENDIAN 482#if ATOM_BIG_ENDIAN
481 ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly 483 ULONG ucPostDiv:8; //return parameter: post divider which is used to program to register directly
482 ULONG ulClock:24; //Input= target clock, output = actual clock 484 ULONG ulClock:24; //Input= target clock, output = actual clock
483#else 485#else
484 ULONG ulClock:24; //Input= target clock, output = actual clock 486 ULONG ulClock:24; //Input= target clock, output = actual clock
485 ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly 487 ULONG ucPostDiv:8; //return parameter: post divider which is used to program to register directly
486#endif 488#endif
487}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4; 489}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
488 490
@@ -504,6 +506,32 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
504 UCHAR ucReserved; 506 UCHAR ucReserved;
505}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5; 507}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
506 508
509
510typedef struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6
511{
512 ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
513 ULONG ulReserved[2];
514}COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6;
515
516//ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag
517#define COMPUTE_GPUCLK_INPUT_FLAG_CLK_TYPE_MASK 0x0f
518#define COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK 0x00
519#define COMPUTE_GPUCLK_INPUT_FLAG_SCLK 0x01
520
521typedef struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6
522{
523 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock; //Output Parameter: ucPostDiv=DFS divider
524 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter: PLL FB divider
525 UCHAR ucPllRefDiv; //Output Parameter: PLL ref divider
526 UCHAR ucPllPostDiv; //Output Parameter: PLL post divider
527 UCHAR ucPllCntlFlag; //Output Flags: control flag
528 UCHAR ucReserved;
529}COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6;
530
531//ucPllCntlFlag
532#define SPLL_CNTL_FLAG_VCO_MODE_MASK 0x03
533
534
507// ucInputFlag 535// ucInputFlag
508#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode 536#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
509 537
@@ -1686,6 +1714,7 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V6
1686#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08 1714#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
1687#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c 1715#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
1688#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10 1716#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
1717#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK 0x40
1689 1718
1690typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2 1719typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
1691{ 1720{
@@ -2102,6 +2131,17 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
2102}DVO_ENCODER_CONTROL_PARAMETERS_V3; 2131}DVO_ENCODER_CONTROL_PARAMETERS_V3;
2103#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3 2132#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3
2104 2133
2134typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V1_4
2135{
2136 USHORT usPixelClock;
2137 UCHAR ucDVOConfig;
2138 UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
2139 UCHAR ucBitPerColor; //please refer to definition of PANEL_xBIT_PER_COLOR
2140 UCHAR ucReseved[3];
2141}DVO_ENCODER_CONTROL_PARAMETERS_V1_4;
2142#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 DVO_ENCODER_CONTROL_PARAMETERS_V1_4
2143
2144
2105//ucTableFormatRevision=1 2145//ucTableFormatRevision=1
2106//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for 2146//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for
2107// bit1=0: non-coherent mode 2147// bit1=0: non-coherent mode
@@ -2165,7 +2205,7 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
2165#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4 2205#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4
2166 2206
2167#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0 2207#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0
2168#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1 2208#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
2169#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2 2209#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2
2170 2210
2171typedef struct _SET_VOLTAGE_PARAMETERS 2211typedef struct _SET_VOLTAGE_PARAMETERS
@@ -2200,15 +2240,20 @@ typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
2200//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode 2240//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
2201#define ATOM_SET_VOLTAGE 0 //Set voltage Level 2241#define ATOM_SET_VOLTAGE 0 //Set voltage Level
2202#define ATOM_INIT_VOLTAGE_REGULATOR 3 //Init Regulator 2242#define ATOM_INIT_VOLTAGE_REGULATOR 3 //Init Regulator
2203#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase 2243#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase, only for SVID/PVID regulator
2204#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used in SetVoltageTable v1.3 2244#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used from SetVoltageTable v1.3
2205#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID 2245#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID, not used for SetVoltage v1.4
2246#define ATOM_GET_LEAKAGE_ID 8 //Get Leakage Voltage Id ( starting from SMU7x IP ), SetVoltage v1.4
2206 2247
2207// define vitual voltage id in usVoltageLevel 2248// define vitual voltage id in usVoltageLevel
2208#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01 2249#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
2209#define ATOM_VIRTUAL_VOLTAGE_ID1 0xff02 2250#define ATOM_VIRTUAL_VOLTAGE_ID1 0xff02
2210#define ATOM_VIRTUAL_VOLTAGE_ID2 0xff03 2251#define ATOM_VIRTUAL_VOLTAGE_ID2 0xff03
2211#define ATOM_VIRTUAL_VOLTAGE_ID3 0xff04 2252#define ATOM_VIRTUAL_VOLTAGE_ID3 0xff04
2253#define ATOM_VIRTUAL_VOLTAGE_ID4 0xff05
2254#define ATOM_VIRTUAL_VOLTAGE_ID5 0xff06
2255#define ATOM_VIRTUAL_VOLTAGE_ID6 0xff07
2256#define ATOM_VIRTUAL_VOLTAGE_ID7 0xff08
2212 2257
2213typedef struct _SET_VOLTAGE_PS_ALLOCATION 2258typedef struct _SET_VOLTAGE_PS_ALLOCATION
2214{ 2259{
@@ -2628,7 +2673,8 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_2
2628 ULONG ulFirmwareRevision; 2673 ULONG ulFirmwareRevision;
2629 ULONG ulDefaultEngineClock; //In 10Khz unit 2674 ULONG ulDefaultEngineClock; //In 10Khz unit
2630 ULONG ulDefaultMemoryClock; //In 10Khz unit 2675 ULONG ulDefaultMemoryClock; //In 10Khz unit
2631 ULONG ulReserved[2]; 2676 ULONG ulSPLL_OutputFreq; //In 10Khz unit
2677 ULONG ulGPUPLL_OutputFreq; //In 10Khz unit
2632 ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit* 2678 ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
2633 ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit* 2679 ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
2634 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit 2680 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
@@ -3813,6 +3859,12 @@ typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
3813 UCHAR ucGPIO_ID; 3859 UCHAR ucGPIO_ID;
3814}ATOM_GPIO_PIN_ASSIGNMENT; 3860}ATOM_GPIO_PIN_ASSIGNMENT;
3815 3861
3862//ucGPIO_ID pre-define id for multiple usage
3863//from SMU7.x, if ucGPIO_ID=PP_AC_DC_SWITCH_GPIO_PINID in GPIO_LUTTable, AC/DC swithing feature is enable
3864#define PP_AC_DC_SWITCH_GPIO_PINID 60
3865//from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable
3866#define VDDC_VRHOT_GPIO_PINID 61
3867
3816typedef struct _ATOM_GPIO_PIN_LUT 3868typedef struct _ATOM_GPIO_PIN_LUT
3817{ 3869{
3818 ATOM_COMMON_TABLE_HEADER sHeader; 3870 ATOM_COMMON_TABLE_HEADER sHeader;
@@ -4074,17 +4126,19 @@ typedef struct _EXT_DISPLAY_PATH
4074 4126
4075//usCaps 4127//usCaps
4076#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01 4128#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01
4129#define EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN 0x02
4077 4130
4078typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO 4131typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
4079{ 4132{
4080 ATOM_COMMON_TABLE_HEADER sHeader; 4133 ATOM_COMMON_TABLE_HEADER sHeader;
4081 UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string 4134 UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
4082 EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries. 4135 EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
4083 UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0. 4136 UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
4084 UCHAR uc3DStereoPinId; // use for eDP panel 4137 UCHAR uc3DStereoPinId; // use for eDP panel
4085 UCHAR ucRemoteDisplayConfig; 4138 UCHAR ucRemoteDisplayConfig;
4086 UCHAR uceDPToLVDSRxId; 4139 UCHAR uceDPToLVDSRxId;
4087 UCHAR Reserved[4]; // for potential expansion 4140 UCHAR ucFixDPVoltageSwing; // usCaps[1]=1, this indicate DP_LANE_SET value
4141 UCHAR Reserved[3]; // for potential expansion
4088}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO; 4142}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
4089 4143
4090//Related definitions, all records are different but they have a commond header 4144//Related definitions, all records are different but they have a commond header
@@ -4416,6 +4470,13 @@ typedef struct _ATOM_VOLTAGE_CONTROL
4416#define VOLTAGE_CONTROL_ID_CHL822x 0x08 4470#define VOLTAGE_CONTROL_ID_CHL822x 0x08
4417#define VOLTAGE_CONTROL_ID_VT1586M 0x09 4471#define VOLTAGE_CONTROL_ID_VT1586M 0x09
4418#define VOLTAGE_CONTROL_ID_UP1637 0x0A 4472#define VOLTAGE_CONTROL_ID_UP1637 0x0A
4473#define VOLTAGE_CONTROL_ID_CHL8214 0x0B
4474#define VOLTAGE_CONTROL_ID_UP1801 0x0C
4475#define VOLTAGE_CONTROL_ID_ST6788A 0x0D
4476#define VOLTAGE_CONTROL_ID_CHLIR3564SVI2 0x0E
4477#define VOLTAGE_CONTROL_ID_AD527x 0x0F
4478#define VOLTAGE_CONTROL_ID_NCP81022 0x10
4479#define VOLTAGE_CONTROL_ID_LTC2635 0x11
4419 4480
4420typedef struct _ATOM_VOLTAGE_OBJECT 4481typedef struct _ATOM_VOLTAGE_OBJECT
4421{ 4482{
@@ -4458,6 +4519,15 @@ typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
4458 USHORT usSize; //Size of Object 4519 USHORT usSize; //Size of Object
4459}ATOM_VOLTAGE_OBJECT_HEADER_V3; 4520}ATOM_VOLTAGE_OBJECT_HEADER_V3;
4460 4521
4522// ATOM_VOLTAGE_OBJECT_HEADER_V3.ucVoltageMode
4523#define VOLTAGE_OBJ_GPIO_LUT 0 //VOLTAGE and GPIO Lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
4524#define VOLTAGE_OBJ_VR_I2C_INIT_SEQ 3 //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3
4525#define VOLTAGE_OBJ_PHASE_LUT 4 //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
4526#define VOLTAGE_OBJ_SVID2 7 //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3
4527#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT 0x10 //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4528#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT 0x11 //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4529#define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT 0x12 //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4530
4461typedef struct _VOLTAGE_LUT_ENTRY_V2 4531typedef struct _VOLTAGE_LUT_ENTRY_V2
4462{ 4532{
4463 ULONG ulVoltageId; // The Voltage ID which is used to program GPIO register 4533 ULONG ulVoltageId; // The Voltage ID which is used to program GPIO register
@@ -4473,7 +4543,7 @@ typedef struct _LEAKAGE_VOLTAGE_LUT_ENTRY_V2
4473 4543
4474typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3 4544typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
4475{ 4545{
4476 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; 4546 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_VR_I2C_INIT_SEQ
4477 UCHAR ucVoltageRegulatorId; //Indicate Voltage Regulator Id 4547 UCHAR ucVoltageRegulatorId; //Indicate Voltage Regulator Id
4478 UCHAR ucVoltageControlI2cLine; 4548 UCHAR ucVoltageControlI2cLine;
4479 UCHAR ucVoltageControlAddress; 4549 UCHAR ucVoltageControlAddress;
@@ -4484,7 +4554,7 @@ typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
4484 4554
4485typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3 4555typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
4486{ 4556{
4487 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; 4557 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
4488 UCHAR ucVoltageGpioCntlId; // default is 0 which indicate control through CG VID mode 4558 UCHAR ucVoltageGpioCntlId; // default is 0 which indicate control through CG VID mode
4489 UCHAR ucGpioEntryNum; // indiate the entry numbers of Votlage/Gpio value Look up table 4559 UCHAR ucGpioEntryNum; // indiate the entry numbers of Votlage/Gpio value Look up table
4490 UCHAR ucPhaseDelay; // phase delay in unit of micro second 4560 UCHAR ucPhaseDelay; // phase delay in unit of micro second
@@ -4495,7 +4565,7 @@ typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
4495 4565
4496typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 4566typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4497{ 4567{
4498 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; 4568 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = 0x10/0x11/0x12
4499 UCHAR ucLeakageCntlId; // default is 0 4569 UCHAR ucLeakageCntlId; // default is 0
4500 UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table 4570 UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table
4501 UCHAR ucReserved[2]; 4571 UCHAR ucReserved[2];
@@ -4503,10 +4573,26 @@ typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4503 LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1]; 4573 LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];
4504}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3; 4574}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
4505 4575
4576
4577typedef struct _ATOM_SVID2_VOLTAGE_OBJECT_V3
4578{
4579 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_SVID2
4580// 14:7 – PSI0_VID
4581// 6 – PSI0_EN
4582// 5 – PSI1
4583// 4:2 – load line slope trim.
4584// 1:0 – offset trim,
4585 USHORT usLoadLine_PSI;
4586// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31
4587 UCHAR ucReserved[2];
4588 ULONG ulReserved;
4589}ATOM_SVID2_VOLTAGE_OBJECT_V3;
4590
4506typedef union _ATOM_VOLTAGE_OBJECT_V3{ 4591typedef union _ATOM_VOLTAGE_OBJECT_V3{
4507 ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj; 4592 ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
4508 ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj; 4593 ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
4509 ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj; 4594 ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
4595 ATOM_SVID2_VOLTAGE_OBJECT_V3 asSVID2Obj;
4510}ATOM_VOLTAGE_OBJECT_V3; 4596}ATOM_VOLTAGE_OBJECT_V3;
4511 4597
4512typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 4598typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1
@@ -4536,6 +4622,21 @@ typedef struct _ATOM_ASIC_PROFILING_INFO
4536 ATOM_ASIC_PROFILE_VOLTAGE asVoltage; 4622 ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
4537}ATOM_ASIC_PROFILING_INFO; 4623}ATOM_ASIC_PROFILING_INFO;
4538 4624
4625typedef struct _ATOM_ASIC_PROFILING_INFO_V2_1
4626{
4627 ATOM_COMMON_TABLE_HEADER asHeader;
4628 UCHAR ucLeakageBinNum; // indicate the entry number of LeakageId/Voltage Lut table
4629 USHORT usLeakageBinArrayOffset; // offset of USHORT Leakage Bin list array ( from lower LeakageId to higher)
4630
4631 UCHAR ucElbVDDC_Num;
4632 USHORT usElbVDDC_IdArrayOffset; // offset of USHORT virtual VDDC voltage id ( 0xff01~0xff08 )
4633 USHORT usElbVDDC_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array
4634
4635 UCHAR ucElbVDDCI_Num;
4636 USHORT usElbVDDCI_IdArrayOffset; // offset of USHORT virtual VDDCI voltage id ( 0xff01~0xff08 )
4637 USHORT usElbVDDCI_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array
4638}ATOM_ASIC_PROFILING_INFO_V2_1;
4639
4539typedef struct _ATOM_POWER_SOURCE_OBJECT 4640typedef struct _ATOM_POWER_SOURCE_OBJECT
4540{ 4641{
4541 UCHAR ucPwrSrcId; // Power source 4642 UCHAR ucPwrSrcId; // Power source
@@ -4652,6 +4753,8 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
4652#define SYS_INFO_LVDSMISC__888_BPC 0x04 4753#define SYS_INFO_LVDSMISC__888_BPC 0x04
4653#define SYS_INFO_LVDSMISC__OVERRIDE_EN 0x08 4754#define SYS_INFO_LVDSMISC__OVERRIDE_EN 0x08
4654#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW 0x10 4755#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW 0x10
4756// new since Trinity
4757#define SYS_INFO_LVDSMISC__TRAVIS_LVDS_VOL_OVERRIDE_EN 0x20
4655 4758
4656// not used any more 4759// not used any more
4657#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW 0x04 4760#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW 0x04
@@ -4752,6 +4855,29 @@ typedef struct _ATOM_FUSION_SYSTEM_INFO_V1
4752 ATOM_INTEGRATED_SYSTEM_INFO_V6 sIntegratedSysInfo; 4855 ATOM_INTEGRATED_SYSTEM_INFO_V6 sIntegratedSysInfo;
4753 ULONG ulPowerplayTable[128]; 4856 ULONG ulPowerplayTable[128];
4754}ATOM_FUSION_SYSTEM_INFO_V1; 4857}ATOM_FUSION_SYSTEM_INFO_V1;
4858
4859
4860typedef struct _ATOM_TDP_CONFIG_BITS
4861{
4862#if ATOM_BIG_ENDIAN
4863 ULONG uReserved:2;
4864 ULONG uTDP_Value:14; // Original TDP value in tens of milli watts
4865 ULONG uCTDP_Value:14; // Override value in tens of milli watts
4866 ULONG uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value))
4867#else
4868 ULONG uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value))
4869 ULONG uCTDP_Value:14; // Override value in tens of milli watts
4870 ULONG uTDP_Value:14; // Original TDP value in tens of milli watts
4871 ULONG uReserved:2;
4872#endif
4873}ATOM_TDP_CONFIG_BITS;
4874
4875typedef union _ATOM_TDP_CONFIG
4876{
4877 ATOM_TDP_CONFIG_BITS TDP_config;
4878 ULONG TDP_config_all;
4879}ATOM_TDP_CONFIG;
4880
4755/********************************************************************************************************************** 4881/**********************************************************************************************************************
4756 ATOM_FUSION_SYSTEM_INFO_V1 Description 4882 ATOM_FUSION_SYSTEM_INFO_V1 Description
4757sIntegratedSysInfo: refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition. 4883sIntegratedSysInfo: refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
@@ -4784,7 +4910,8 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
4784 UCHAR ucMemoryType; 4910 UCHAR ucMemoryType;
4785 UCHAR ucUMAChannelNumber; 4911 UCHAR ucUMAChannelNumber;
4786 UCHAR strVBIOSMsg[40]; 4912 UCHAR strVBIOSMsg[40];
4787 ULONG ulReserved[20]; 4913 ATOM_TDP_CONFIG asTdpConfig;
4914 ULONG ulReserved[19];
4788 ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5]; 4915 ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
4789 ULONG ulGMCRestoreResetTime; 4916 ULONG ulGMCRestoreResetTime;
4790 ULONG ulMinimumNClk; 4917 ULONG ulMinimumNClk;
@@ -4809,7 +4936,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
4809 USHORT GnbTdpLimit; 4936 USHORT GnbTdpLimit;
4810 USHORT usMaxLVDSPclkFreqInSingleLink; 4937 USHORT usMaxLVDSPclkFreqInSingleLink;
4811 UCHAR ucLvdsMisc; 4938 UCHAR ucLvdsMisc;
4812 UCHAR ucLVDSReserved; 4939 UCHAR ucTravisLVDSVolAdjust;
4813 UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms; 4940 UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
4814 UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms; 4941 UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
4815 UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms; 4942 UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
@@ -4817,7 +4944,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
4817 UCHAR ucLVDSOffToOnDelay_in4Ms; 4944 UCHAR ucLVDSOffToOnDelay_in4Ms;
4818 UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms; 4945 UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
4819 UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms; 4946 UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
4820 UCHAR ucLVDSReserved1; 4947 UCHAR ucMinAllowedBL_Level;
4821 ULONG ulLCDBitDepthControlVal; 4948 ULONG ulLCDBitDepthControlVal;
4822 ULONG ulNbpStateMemclkFreq[4]; 4949 ULONG ulNbpStateMemclkFreq[4];
4823 USHORT usNBP2Voltage; 4950 USHORT usNBP2Voltage;
@@ -4846,6 +4973,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
4846#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01 4973#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
4847#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02 4974#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
4848#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08 4975#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
4976#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10
4849 4977
4850/********************************************************************************************************************** 4978/**********************************************************************************************************************
4851 ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description 4979 ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
@@ -4945,6 +5073,9 @@ ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 pan
4945 [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color 5073 [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
4946 [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used 5074 [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
4947 [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low ) 5075 [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
5076 [bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4
5077ucTravisLVDSVolAdjust When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust
5078 value to program Travis register LVDS_CTRL_4
4948ucLVDSPwrOnSeqDIGONtoDE_in4Ms: LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ). 5079ucLVDSPwrOnSeqDIGONtoDE_in4Ms: LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
4949 =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. 5080 =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
4950 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. 5081 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
@@ -4964,18 +5095,241 @@ ucLVDSOffToOnDelay_in4Ms: LVDS power down sequence time in unit of 4ms.
4964 =0 means to use VBIOS default delay which is 125 ( 500ms ). 5095 =0 means to use VBIOS default delay which is 125 ( 500ms ).
4965 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. 5096 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
4966 5097
4967ucLVDSPwrOnVARY_BLtoBLON_in4Ms: LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active. 5098ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms:
5099 LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
4968 =0 means to use VBIOS default delay which is 0 ( 0ms ). 5100 =0 means to use VBIOS default delay which is 0 ( 0ms ).
4969 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. 5101 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
4970 5102
4971ucLVDSPwrOffBLONtoVARY_BL_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off. 5103ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms:
5104 LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
4972 =0 means to use VBIOS default delay which is 0 ( 0ms ). 5105 =0 means to use VBIOS default delay which is 0 ( 0ms ).
4973 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. 5106 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
4974 5107
5108ucMinAllowedBL_Level: Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0.
5109
4975ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB pstate. 5110ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB pstate.
4976 5111
4977**********************************************************************************************************************/ 5112**********************************************************************************************************************/
4978 5113
5114// this IntegrateSystemInfoTable is used for Kaveri & Kabini APU
5115typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8
5116{
5117 ATOM_COMMON_TABLE_HEADER sHeader;
5118 ULONG ulBootUpEngineClock;
5119 ULONG ulDentistVCOFreq;
5120 ULONG ulBootUpUMAClock;
5121 ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
5122 ULONG ulBootUpReqDisplayVector;
5123 ULONG ulVBIOSMisc;
5124 ULONG ulGPUCapInfo;
5125 ULONG ulDISP_CLK2Freq;
5126 USHORT usRequestedPWMFreqInHz;
5127 UCHAR ucHtcTmpLmt;
5128 UCHAR ucHtcHystLmt;
5129 ULONG ulReserved2;
5130 ULONG ulSystemConfig;
5131 ULONG ulCPUCapInfo;
5132 ULONG ulReserved3;
5133 USHORT usGPUReservedSysMemSize;
5134 USHORT usExtDispConnInfoOffset;
5135 USHORT usPanelRefreshRateRange;
5136 UCHAR ucMemoryType;
5137 UCHAR ucUMAChannelNumber;
5138 UCHAR strVBIOSMsg[40];
5139 ATOM_TDP_CONFIG asTdpConfig;
5140 ULONG ulReserved[19];
5141 ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
5142 ULONG ulGMCRestoreResetTime;
5143 ULONG ulReserved4;
5144 ULONG ulIdleNClk;
5145 ULONG ulDDR_DLL_PowerUpTime;
5146 ULONG ulDDR_PLL_PowerUpTime;
5147 USHORT usPCIEClkSSPercentage;
5148 USHORT usPCIEClkSSType;
5149 USHORT usLvdsSSPercentage;
5150 USHORT usLvdsSSpreadRateIn10Hz;
5151 USHORT usHDMISSPercentage;
5152 USHORT usHDMISSpreadRateIn10Hz;
5153 USHORT usDVISSPercentage;
5154 USHORT usDVISSpreadRateIn10Hz;
5155 ULONG ulGPUReservedSysMemBaseAddrLo;
5156 ULONG ulGPUReservedSysMemBaseAddrHi;
5157 ULONG ulReserved5[3];
5158 USHORT usMaxLVDSPclkFreqInSingleLink;
5159 UCHAR ucLvdsMisc;
5160 UCHAR ucTravisLVDSVolAdjust;
5161 UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
5162 UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
5163 UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
5164 UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
5165 UCHAR ucLVDSOffToOnDelay_in4Ms;
5166 UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
5167 UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
5168 UCHAR ucMinAllowedBL_Level;
5169 ULONG ulLCDBitDepthControlVal;
5170 ULONG ulNbpStateMemclkFreq[4];
5171 ULONG ulReserved6;
5172 ULONG ulNbpStateNClkFreq[4];
5173 USHORT usNBPStateVoltage[4];
5174 USHORT usBootUpNBVoltage;
5175 USHORT usReserved2;
5176 ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
5177}ATOM_INTEGRATED_SYSTEM_INFO_V1_8;
5178
5179/**********************************************************************************************************************
5180 ATOM_INTEGRATED_SYSTEM_INFO_V1_8 Description
5181ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
5182ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
5183ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
5184sDISPCLK_Voltage: Report Display clock frequency requirement on GNB voltage(up to 4 voltage levels).
5185
5186ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Trinity projects:
5187 ATOM_DEVICE_CRT1_SUPPORT 0x0001
5188 ATOM_DEVICE_DFP1_SUPPORT 0x0008
5189 ATOM_DEVICE_DFP6_SUPPORT 0x0040
5190 ATOM_DEVICE_DFP2_SUPPORT 0x0080
5191 ATOM_DEVICE_DFP3_SUPPORT 0x0200
5192 ATOM_DEVICE_DFP4_SUPPORT 0x0400
5193 ATOM_DEVICE_DFP5_SUPPORT 0x0800
5194 ATOM_DEVICE_LCD1_SUPPORT 0x0002
5195
5196ulVBIOSMisc: Miscellenous flags for VBIOS requirement and interface
5197 bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS.
5198 =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS.
5199 bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
5200 =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
5201 bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
5202 =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
5203 bit[3]=0: VBIOS fast boot is disable
5204 =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
5205
5206ulGPUCapInfo: bit[0~2]= Reserved
5207 bit[3]=0: Enable AUX HW mode detection logic
5208 =1: Disable AUX HW mode detection logic
5209 bit[4]=0: Disable DFS bypass feature
5210 =1: Enable DFS bypass feature
5211
5212usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
5213 Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
5214
5215 When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
5216 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
5217 VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
5218 Changing BL using VBIOS function is functional in both driver and non-driver present environment;
5219 and enabling VariBri under the driver environment from PP table is optional.
5220
5221 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
5222 that BL control from GPU is expected.
5223 VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
5224 Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
5225 it's per platform
5226 and enabling VariBri under the driver environment from PP table is optional.
5227
5228ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt. Threshold on value to enter HTC_active state.
5229ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
5230 To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
5231
5232ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
5233 =1: PCIE Power Gating Enabled
5234 Bit[1]=0: DDR-DLL shut-down feature disabled.
5235 1: DDR-DLL shut-down feature enabled.
5236 Bit[2]=0: DDR-PLL Power down feature disabled.
5237 1: DDR-PLL Power down feature enabled.
5238 Bit[3]=0: GNB DPM is disabled
5239 =1: GNB DPM is enabled
5240ulCPUCapInfo: TBD
5241
5242usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
5243usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
5244 to indicate a range.
5245 SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
5246 SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
5247 SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
5248 SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
5249
5250ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3;=5:GDDR5; [7:4] is reserved.
5251ucUMAChannelNumber: System memory channel numbers.
5252
5253strVBIOSMsg[40]: VBIOS boot up customized message string
5254
5255sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
5256
5257ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
5258ulIdleNClk: NCLK speed while memory runs in self-refresh state, used to calculate self-refresh latency. Unit in 10kHz.
5259ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
5260ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
5261
5262usPCIEClkSSPercentage: PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
5263usPCIEClkSSType: PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
5264usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
5265usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
5266usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
5267usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
5268usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
5269usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
5270
5271usGPUReservedSysMemSize: Reserved system memory size for ACP engine in APU GNB, units in MB. 0/2/4MB based on CMOS options, current default could be 0MB. KV only, not on KB.
5272ulGPUReservedSysMemBaseAddrLo: Low 32 bits base address to the reserved system memory.
5273ulGPUReservedSysMemBaseAddrHi: High 32 bits base address to the reserved system memory.
5274
5275usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
5276ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
5277 [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
5278 [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
5279 [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
5280 [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
5281 [bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4
5282ucTravisLVDSVolAdjust When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust
5283 value to program Travis register LVDS_CTRL_4
5284ucLVDSPwrOnSeqDIGONtoDE_in4Ms:
5285 LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
5286 =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
5287 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5288ucLVDSPwrOnDEtoVARY_BL_in4Ms:
5289 LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).
5290 =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
5291 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5292ucLVDSPwrOffVARY_BLtoDE_in4Ms:
5293 LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off.
5294 =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
5295 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5296ucLVDSPwrOffDEtoDIGON_in4Ms:
5297 LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off.
5298 =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
5299 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5300ucLVDSOffToOnDelay_in4Ms:
5301 LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active.
5302 =0 means to use VBIOS default delay which is 125 ( 500ms ).
5303 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5304ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms:
5305 LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
5306 =0 means to use VBIOS default delay which is 0 ( 0ms ).
5307 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5308
5309ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms:
5310 LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
5311 =0 means to use VBIOS default delay which is 0 ( 0ms ).
5312 This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5313ucMinAllowedBL_Level: Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0.
5314
5315ulLCDBitDepthControlVal: GPU display control encoder bit dither control setting, used to program register mmFMT_BIT_DEPTH_CONTROL
5316
5317ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB P-State(P0, P1, P2 & P3).
5318ulNbpStateNClkFreq[4]: NB P-State NClk frequency in different NB P-State
5319usNBPStateVoltage[4]: NB P-State (P0/P1 & P2/P3) voltage; NBP3 refers to lowes voltage
5320usBootUpNBVoltage: NB P-State voltage during boot up before driver loaded
5321sExtDispConnInfo: Display connector information table provided to VBIOS
5322
5323**********************************************************************************************************************/
5324
5325// this Table is used for Kaveri/Kabini APU
5326typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
5327{
5328 ATOM_INTEGRATED_SYSTEM_INFO_V1_8 sIntegratedSysInfo; // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
5329 ULONG ulPowerplayTable[128]; // Update comments here to link new powerplay table definition structure
5330}ATOM_FUSION_SYSTEM_INFO_V2;
5331
5332
4979/**************************************************************************/ 5333/**************************************************************************/
4980// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design 5334// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
4981//Memory SS Info Table 5335//Memory SS Info Table
@@ -5026,22 +5380,24 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
5026 5380
5027//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type. 5381//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type.
5028//SS is not required or enabled if a match is not found. 5382//SS is not required or enabled if a match is not found.
5029#define ASIC_INTERNAL_MEMORY_SS 1 5383#define ASIC_INTERNAL_MEMORY_SS 1
5030#define ASIC_INTERNAL_ENGINE_SS 2 5384#define ASIC_INTERNAL_ENGINE_SS 2
5031#define ASIC_INTERNAL_UVD_SS 3 5385#define ASIC_INTERNAL_UVD_SS 3
5032#define ASIC_INTERNAL_SS_ON_TMDS 4 5386#define ASIC_INTERNAL_SS_ON_TMDS 4
5033#define ASIC_INTERNAL_SS_ON_HDMI 5 5387#define ASIC_INTERNAL_SS_ON_HDMI 5
5034#define ASIC_INTERNAL_SS_ON_LVDS 6 5388#define ASIC_INTERNAL_SS_ON_LVDS 6
5035#define ASIC_INTERNAL_SS_ON_DP 7 5389#define ASIC_INTERNAL_SS_ON_DP 7
5036#define ASIC_INTERNAL_SS_ON_DCPLL 8 5390#define ASIC_INTERNAL_SS_ON_DCPLL 8
5037#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9 5391#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
5038#define ASIC_INTERNAL_VCE_SS 10 5392#define ASIC_INTERNAL_VCE_SS 10
5393#define ASIC_INTERNAL_GPUPLL_SS 11
5394
5039 5395
5040typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2 5396typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
5041{ 5397{
5042 ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz 5398 ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
5043 //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 ) 5399 //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
5044 USHORT usSpreadSpectrumPercentage; //in unit of 0.01% 5400 USHORT usSpreadSpectrumPercentage; //in unit of 0.01% or 0.001%, decided by ucSpreadSpectrumMode bit4
5045 USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq 5401 USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
5046 UCHAR ucClockIndication; //Indicate which clock source needs SS 5402 UCHAR ucClockIndication; //Indicate which clock source needs SS
5047 UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS 5403 UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
@@ -5079,6 +5435,11 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
5079 UCHAR ucReserved[2]; 5435 UCHAR ucReserved[2];
5080}ATOM_ASIC_SS_ASSIGNMENT_V3; 5436}ATOM_ASIC_SS_ASSIGNMENT_V3;
5081 5437
5438//ATOM_ASIC_SS_ASSIGNMENT_V3.ucSpreadSpectrumMode
5439#define SS_MODE_V3_CENTRE_SPREAD_MASK 0x01
5440#define SS_MODE_V3_EXTERNAL_SS_MASK 0x02
5441#define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK 0x10
5442
5082typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 5443typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
5083{ 5444{
5084 ATOM_COMMON_TABLE_HEADER sHeader; 5445 ATOM_COMMON_TABLE_HEADER sHeader;
@@ -5719,6 +6080,7 @@ typedef struct _INDIRECT_IO_ACCESS
5719#define INDIRECT_IO_PCIE 3 6080#define INDIRECT_IO_PCIE 3
5720#define INDIRECT_IO_PCIEP 4 6081#define INDIRECT_IO_PCIEP 4
5721#define INDIRECT_IO_NBMISC 5 6082#define INDIRECT_IO_NBMISC 5
6083#define INDIRECT_IO_SMU 5
5722 6084
5723#define INDIRECT_IO_PLL_READ INDIRECT_IO_PLL | INDIRECT_READ 6085#define INDIRECT_IO_PLL_READ INDIRECT_IO_PLL | INDIRECT_READ
5724#define INDIRECT_IO_PLL_WRITE INDIRECT_IO_PLL | INDIRECT_WRITE 6086#define INDIRECT_IO_PLL_WRITE INDIRECT_IO_PLL | INDIRECT_WRITE
@@ -5730,6 +6092,8 @@ typedef struct _INDIRECT_IO_ACCESS
5730#define INDIRECT_IO_PCIEP_WRITE INDIRECT_IO_PCIEP | INDIRECT_WRITE 6092#define INDIRECT_IO_PCIEP_WRITE INDIRECT_IO_PCIEP | INDIRECT_WRITE
5731#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ 6093#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ
5732#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE 6094#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE
6095#define INDIRECT_IO_SMU_READ INDIRECT_IO_SMU | INDIRECT_READ
6096#define INDIRECT_IO_SMU_WRITE INDIRECT_IO_SMU | INDIRECT_WRITE
5733 6097
5734typedef struct _ATOM_OEM_INFO 6098typedef struct _ATOM_OEM_INFO
5735{ 6099{
@@ -5875,6 +6239,7 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
5875#define _64Mx32 0x43 6239#define _64Mx32 0x43
5876#define _128Mx8 0x51 6240#define _128Mx8 0x51
5877#define _128Mx16 0x52 6241#define _128Mx16 0x52
6242#define _128Mx32 0x53
5878#define _256Mx8 0x61 6243#define _256Mx8 0x61
5879#define _256Mx16 0x62 6244#define _256Mx16 0x62
5880 6245
@@ -5893,6 +6258,8 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
5893#define PROMOS MOSEL 6258#define PROMOS MOSEL
5894#define KRETON INFINEON 6259#define KRETON INFINEON
5895#define ELIXIR NANYA 6260#define ELIXIR NANYA
6261#define MEZZA ELPIDA
6262
5896 6263
5897/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// 6264/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
5898 6265
@@ -6625,6 +6992,10 @@ typedef struct _ATOM_DISP_OUT_INFO_V3
6625 ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only 6992 ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
6626}ATOM_DISP_OUT_INFO_V3; 6993}ATOM_DISP_OUT_INFO_V3;
6627 6994
6995//ucDispCaps
6996#define DISPLAY_CAPS__DP_PCLK_FROM_PPLL 0x01
6997#define DISPLAY_CAPS__FORCE_DISPDEV_CONNECTED 0x02
6998
6628typedef enum CORE_REF_CLK_SOURCE{ 6999typedef enum CORE_REF_CLK_SOURCE{
6629 CLOCK_SRC_XTALIN=0, 7000 CLOCK_SRC_XTALIN=0,
6630 CLOCK_SRC_XO_IN=1, 7001 CLOCK_SRC_XO_IN=1,
@@ -6829,6 +7200,17 @@ typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{
6829 USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings 7200 USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
6830}DIG_TRANSMITTER_INFO_HEADER_V3_1; 7201}DIG_TRANSMITTER_INFO_HEADER_V3_1;
6831 7202
7203typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_2{
7204 ATOM_COMMON_TABLE_HEADER sHeader;
7205 USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock
7206 USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info
7207 USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
7208 USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info
7209 USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
7210 USHORT usDPSSRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy SS Pll register Info
7211 USHORT usDPSSSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy SS Pll Settings
7212}DIG_TRANSMITTER_INFO_HEADER_V3_2;
7213
6832typedef struct _CLOCK_CONDITION_REGESTER_INFO{ 7214typedef struct _CLOCK_CONDITION_REGESTER_INFO{
6833 USHORT usRegisterIndex; 7215 USHORT usRegisterIndex;
6834 UCHAR ucStartBit; 7216 UCHAR ucStartBit;
@@ -6852,12 +7234,24 @@ typedef struct _PHY_CONDITION_REG_VAL{
6852 ULONG ulRegVal; 7234 ULONG ulRegVal;
6853}PHY_CONDITION_REG_VAL; 7235}PHY_CONDITION_REG_VAL;
6854 7236
7237typedef struct _PHY_CONDITION_REG_VAL_V2{
7238 ULONG ulCondition;
7239 UCHAR ucCondition2;
7240 ULONG ulRegVal;
7241}PHY_CONDITION_REG_VAL_V2;
7242
6855typedef struct _PHY_CONDITION_REG_INFO{ 7243typedef struct _PHY_CONDITION_REG_INFO{
6856 USHORT usRegIndex; 7244 USHORT usRegIndex;
6857 USHORT usSize; 7245 USHORT usSize;
6858 PHY_CONDITION_REG_VAL asRegVal[1]; 7246 PHY_CONDITION_REG_VAL asRegVal[1];
6859}PHY_CONDITION_REG_INFO; 7247}PHY_CONDITION_REG_INFO;
6860 7248
7249typedef struct _PHY_CONDITION_REG_INFO_V2{
7250 USHORT usRegIndex;
7251 USHORT usSize;
7252 PHY_CONDITION_REG_VAL_V2 asRegVal[1];
7253}PHY_CONDITION_REG_INFO_V2;
7254
6861typedef struct _PHY_ANALOG_SETTING_INFO{ 7255typedef struct _PHY_ANALOG_SETTING_INFO{
6862 UCHAR ucEncodeMode; 7256 UCHAR ucEncodeMode;
6863 UCHAR ucPhySel; 7257 UCHAR ucPhySel;
@@ -6865,6 +7259,25 @@ typedef struct _PHY_ANALOG_SETTING_INFO{
6865 PHY_CONDITION_REG_INFO asAnalogSetting[1]; 7259 PHY_CONDITION_REG_INFO asAnalogSetting[1];
6866}PHY_ANALOG_SETTING_INFO; 7260}PHY_ANALOG_SETTING_INFO;
6867 7261
7262typedef struct _PHY_ANALOG_SETTING_INFO_V2{
7263 UCHAR ucEncodeMode;
7264 UCHAR ucPhySel;
7265 USHORT usSize;
7266 PHY_CONDITION_REG_INFO_V2 asAnalogSetting[1];
7267}PHY_ANALOG_SETTING_INFO_V2;
7268
7269typedef struct _GFX_HAVESTING_PARAMETERS {
7270 UCHAR ucGfxBlkId; //GFX blk id to be harvested, like CU, RB or PRIM
7271 UCHAR ucReserved; //reserved
7272 UCHAR ucActiveUnitNumPerSH; //requested active CU/RB/PRIM number per shader array
7273 UCHAR ucMaxUnitNumPerSH; //max CU/RB/PRIM number per shader array
7274} GFX_HAVESTING_PARAMETERS;
7275
7276//ucGfxBlkId
7277#define GFX_HARVESTING_CU_ID 0
7278#define GFX_HARVESTING_RB_ID 1
7279#define GFX_HARVESTING_PRIM_ID 2
7280
6868/****************************************************************************/ 7281/****************************************************************************/
6869//Portion VI: Definitinos for vbios MC scratch registers that driver used 7282//Portion VI: Definitinos for vbios MC scratch registers that driver used
6870/****************************************************************************/ 7283/****************************************************************************/
@@ -6875,8 +7288,17 @@ typedef struct _PHY_ANALOG_SETTING_INFO{
6875#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000 7288#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000
6876#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000 7289#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000
6877#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000 7290#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000
7291#define MC_MISC0__MEMORY_TYPE__HBM 0x60000000
6878#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000 7292#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000
6879 7293
7294#define ATOM_MEM_TYPE_DDR_STRING "DDR"
7295#define ATOM_MEM_TYPE_DDR2_STRING "DDR2"
7296#define ATOM_MEM_TYPE_GDDR3_STRING "GDDR3"
7297#define ATOM_MEM_TYPE_GDDR4_STRING "GDDR4"
7298#define ATOM_MEM_TYPE_GDDR5_STRING "GDDR5"
7299#define ATOM_MEM_TYPE_HBM_STRING "HBM"
7300#define ATOM_MEM_TYPE_DDR3_STRING "DDR3"
7301
6880/****************************************************************************/ 7302/****************************************************************************/
6881//Portion VI: Definitinos being oboselete 7303//Portion VI: Definitinos being oboselete
6882/****************************************************************************/ 7304/****************************************************************************/
@@ -7274,6 +7696,7 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
7274#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15 7696#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
7275#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16 7697#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
7276#define ATOM_PP_THERMALCONTROLLER_LM96163 17 7698#define ATOM_PP_THERMALCONTROLLER_LM96163 17
7699#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18
7277 7700
7278// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal. 7701// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
7279// We probably should reserve the bit 0x80 for this use. 7702// We probably should reserve the bit 0x80 for this use.
@@ -7316,6 +7739,8 @@ typedef struct _ATOM_PPLIB_EXTENDEDHEADER
7316 // Add extra system parameters here, always adjust size to include all fields. 7739 // Add extra system parameters here, always adjust size to include all fields.
7317 USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table 7740 USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
7318 USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table 7741 USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
7742 USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table
7743 USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table
7319} ATOM_PPLIB_EXTENDEDHEADER; 7744} ATOM_PPLIB_EXTENDEDHEADER;
7320 7745
7321//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps 7746//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
@@ -7337,7 +7762,10 @@ typedef struct _ATOM_PPLIB_EXTENDEDHEADER
7337#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC. 7762#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
7338#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature. 7763#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
7339#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state. 7764#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
7340 7765#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table.
7766#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity.
7767#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17.
7768#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable.
7341 7769
7342typedef struct _ATOM_PPLIB_POWERPLAYTABLE 7770typedef struct _ATOM_PPLIB_POWERPLAYTABLE
7343{ 7771{
@@ -7398,7 +7826,7 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
7398 USHORT usVddcDependencyOnMCLKOffset; 7826 USHORT usVddcDependencyOnMCLKOffset;
7399 USHORT usMaxClockVoltageOnDCOffset; 7827 USHORT usMaxClockVoltageOnDCOffset;
7400 USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table 7828 USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
7401 USHORT usReserved; 7829 USHORT usMvddDependencyOnMCLKOffset;
7402} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4; 7830} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
7403 7831
7404typedef struct _ATOM_PPLIB_POWERPLAYTABLE5 7832typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
@@ -7563,6 +7991,17 @@ typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
7563 7991
7564} ATOM_PPLIB_SI_CLOCK_INFO; 7992} ATOM_PPLIB_SI_CLOCK_INFO;
7565 7993
7994typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
7995{
7996 USHORT usEngineClockLow;
7997 UCHAR ucEngineClockHigh;
7998
7999 USHORT usMemoryClockLow;
8000 UCHAR ucMemoryClockHigh;
8001
8002 UCHAR ucPCIEGen;
8003 USHORT usPCIELane;
8004} ATOM_PPLIB_CI_CLOCK_INFO;
7566 8005
7567typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO 8006typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
7568 8007
@@ -7680,8 +8119,8 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
7680 8119
7681typedef struct _ATOM_PPLIB_CAC_Leakage_Record 8120typedef struct _ATOM_PPLIB_CAC_Leakage_Record
7682{ 8121{
7683 USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations 8122 USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value.
7684 ULONG ulLeakageValue; 8123 ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value.
7685}ATOM_PPLIB_CAC_Leakage_Record; 8124}ATOM_PPLIB_CAC_Leakage_Record;
7686 8125
7687typedef struct _ATOM_PPLIB_CAC_Leakage_Table 8126typedef struct _ATOM_PPLIB_CAC_Leakage_Table
@@ -7796,6 +8235,42 @@ typedef struct _ATOM_PPLIB_UVD_Table
7796// ATOM_PPLIB_UVD_State_Table states; 8235// ATOM_PPLIB_UVD_State_Table states;
7797}ATOM_PPLIB_UVD_Table; 8236}ATOM_PPLIB_UVD_Table;
7798 8237
8238
8239typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
8240{
8241 USHORT usVoltage;
8242 USHORT usSAMClockLow;
8243 UCHAR ucSAMClockHigh;
8244}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
8245
8246typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
8247 UCHAR numEntries;
8248 ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
8249}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
8250
8251typedef struct _ATOM_PPLIB_SAMU_Table
8252{
8253 UCHAR revid;
8254 ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
8255}ATOM_PPLIB_SAMU_Table;
8256
8257#define ATOM_PPM_A_A 1
8258#define ATOM_PPM_A_I 2
8259typedef struct _ATOM_PPLIB_PPM_Table
8260{
8261 UCHAR ucRevId;
8262 UCHAR ucPpmDesign; //A+I or A+A
8263 USHORT usCpuCoreNumber;
8264 ULONG ulPlatformTDP;
8265 ULONG ulSmallACPlatformTDP;
8266 ULONG ulPlatformTDC;
8267 ULONG ulSmallACPlatformTDC;
8268 ULONG ulApuTDP;
8269 ULONG ulDGpuTDP;
8270 ULONG ulDGpuUlvPower;
8271 ULONG ulTjmax;
8272} ATOM_PPLIB_PPM_Table;
8273
7799/**************************************************************************/ 8274/**************************************************************************/
7800 8275
7801 8276
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index d5df8fd10217..c7ad4b930850 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -555,7 +555,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
555 if (rdev->family < CHIP_RV770) 555 if (rdev->family < CHIP_RV770)
556 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 556 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
557 /* use frac fb div on APUs */ 557 /* use frac fb div on APUs */
558 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) 558 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
559 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 559 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
560 /* use frac fb div on RS780/RS880 */ 560 /* use frac fb div on RS780/RS880 */
561 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) 561 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
@@ -743,7 +743,7 @@ static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev,
743 * SetPixelClock provides the dividers 743 * SetPixelClock provides the dividers
744 */ 744 */
745 args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk); 745 args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
746 if (ASIC_IS_DCE61(rdev)) 746 if (ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
747 args.v6.ucPpll = ATOM_EXT_PLL1; 747 args.v6.ucPpll = ATOM_EXT_PLL1;
748 else if (ASIC_IS_DCE6(rdev)) 748 else if (ASIC_IS_DCE6(rdev))
749 args.v6.ucPpll = ATOM_PPLL0; 749 args.v6.ucPpll = ATOM_PPLL0;
@@ -1143,7 +1143,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1143 } 1143 }
1144 1144
1145 if (tiling_flags & RADEON_TILING_MACRO) { 1145 if (tiling_flags & RADEON_TILING_MACRO) {
1146 if (rdev->family >= CHIP_TAHITI) 1146 if (rdev->family >= CHIP_BONAIRE)
1147 tmp = rdev->config.cik.tile_config;
1148 else if (rdev->family >= CHIP_TAHITI)
1147 tmp = rdev->config.si.tile_config; 1149 tmp = rdev->config.si.tile_config;
1148 else if (rdev->family >= CHIP_CAYMAN) 1150 else if (rdev->family >= CHIP_CAYMAN)
1149 tmp = rdev->config.cayman.tile_config; 1151 tmp = rdev->config.cayman.tile_config;
@@ -1170,11 +1172,29 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1170 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); 1172 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
1171 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); 1173 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
1172 fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect); 1174 fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
1175 if (rdev->family >= CHIP_BONAIRE) {
1176 /* XXX need to know more about the surface tiling mode */
1177 fb_format |= CIK_GRPH_MICRO_TILE_MODE(CIK_DISPLAY_MICRO_TILING);
1178 }
1173 } else if (tiling_flags & RADEON_TILING_MICRO) 1179 } else if (tiling_flags & RADEON_TILING_MICRO)
1174 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1180 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1175 1181
1176 if ((rdev->family == CHIP_TAHITI) || 1182 if (rdev->family >= CHIP_BONAIRE) {
1177 (rdev->family == CHIP_PITCAIRN)) 1183 u32 num_pipe_configs = rdev->config.cik.max_tile_pipes;
1184 u32 num_rb = rdev->config.cik.max_backends_per_se;
1185 if (num_pipe_configs > 8)
1186 num_pipe_configs = 8;
1187 if (num_pipe_configs == 8)
1188 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16);
1189 else if (num_pipe_configs == 4) {
1190 if (num_rb == 4)
1191 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
1192 else if (num_rb < 4)
1193 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
1194 } else if (num_pipe_configs == 2)
1195 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
1196 } else if ((rdev->family == CHIP_TAHITI) ||
1197 (rdev->family == CHIP_PITCAIRN))
1178 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); 1198 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
1179 else if (rdev->family == CHIP_VERDE) 1199 else if (rdev->family == CHIP_VERDE)
1180 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); 1200 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
@@ -1224,8 +1244,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1224 WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); 1244 WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
1225 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); 1245 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
1226 1246
1227 WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, 1247 if (rdev->family >= CHIP_BONAIRE)
1228 target_fb->height); 1248 WREG32(CIK_LB_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
1249 target_fb->height);
1250 else
1251 WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
1252 target_fb->height);
1229 x &= ~3; 1253 x &= ~3;
1230 y &= ~1; 1254 y &= ~1;
1231 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, 1255 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
@@ -1597,6 +1621,12 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
1597 * 1621 *
1598 * Asic specific PLL information 1622 * Asic specific PLL information
1599 * 1623 *
1624 * DCE 8.x
1625 * KB/KV
1626 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
1627 * CI
1628 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
1629 *
1600 * DCE 6.1 1630 * DCE 6.1
1601 * - PPLL2 is only available to UNIPHYA (both DP and non-DP) 1631 * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
1602 * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP) 1632 * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
@@ -1623,7 +1653,47 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1623 u32 pll_in_use; 1653 u32 pll_in_use;
1624 int pll; 1654 int pll;
1625 1655
1626 if (ASIC_IS_DCE61(rdev)) { 1656 if (ASIC_IS_DCE8(rdev)) {
1657 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
1658 if (rdev->clock.dp_extclk)
1659 /* skip PPLL programming if using ext clock */
1660 return ATOM_PPLL_INVALID;
1661 else {
1662 /* use the same PPLL for all DP monitors */
1663 pll = radeon_get_shared_dp_ppll(crtc);
1664 if (pll != ATOM_PPLL_INVALID)
1665 return pll;
1666 }
1667 } else {
1668 /* use the same PPLL for all monitors with the same clock */
1669 pll = radeon_get_shared_nondp_ppll(crtc);
1670 if (pll != ATOM_PPLL_INVALID)
1671 return pll;
1672 }
1673 /* otherwise, pick one of the plls */
1674 if ((rdev->family == CHIP_KAVERI) ||
1675 (rdev->family == CHIP_KABINI)) {
1676 /* KB/KV has PPLL1 and PPLL2 */
1677 pll_in_use = radeon_get_pll_use_mask(crtc);
1678 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1679 return ATOM_PPLL2;
1680 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1681 return ATOM_PPLL1;
1682 DRM_ERROR("unable to allocate a PPLL\n");
1683 return ATOM_PPLL_INVALID;
1684 } else {
1685 /* CI has PPLL0, PPLL1, and PPLL2 */
1686 pll_in_use = radeon_get_pll_use_mask(crtc);
1687 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1688 return ATOM_PPLL2;
1689 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1690 return ATOM_PPLL1;
1691 if (!(pll_in_use & (1 << ATOM_PPLL0)))
1692 return ATOM_PPLL0;
1693 DRM_ERROR("unable to allocate a PPLL\n");
1694 return ATOM_PPLL_INVALID;
1695 }
1696 } else if (ASIC_IS_DCE61(rdev)) {
1627 struct radeon_encoder_atom_dig *dig = 1697 struct radeon_encoder_atom_dig *dig =
1628 radeon_encoder->enc_priv; 1698 radeon_encoder->enc_priv;
1629 1699
@@ -1861,7 +1931,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1861 break; 1931 break;
1862 case ATOM_PPLL0: 1932 case ATOM_PPLL0:
1863 /* disable the ppll */ 1933 /* disable the ppll */
1864 if (ASIC_IS_DCE61(rdev)) 1934 if ((rdev->family == CHIP_ARUBA) || (rdev->family == CHIP_BONAIRE))
1865 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1935 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1866 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 1936 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
1867 break; 1937 break;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 8406c8251fbf..092275d53d4a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -186,6 +186,13 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
186 u8 backlight_level; 186 u8 backlight_level;
187 char bl_name[16]; 187 char bl_name[16];
188 188
189 /* Mac laptops with multiple GPUs use the gmux driver for backlight
190 * so don't register a backlight device
191 */
192 if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
193 (rdev->pdev->device == 0x6741))
194 return;
195
189 if (!radeon_encoder->enc_priv) 196 if (!radeon_encoder->enc_priv)
190 return; 197 return;
191 198
@@ -296,6 +303,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
296 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 303 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
297 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 304 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
298 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 305 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
306 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
299 return true; 307 return true;
300 default: 308 default:
301 return false; 309 return false;
@@ -479,11 +487,11 @@ static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
479 } 487 }
480} 488}
481 489
482
483union dvo_encoder_control { 490union dvo_encoder_control {
484 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; 491 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
485 DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; 492 DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
486 DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3; 493 DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
494 DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 dvo_v4;
487}; 495};
488 496
489void 497void
@@ -533,6 +541,13 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action)
533 args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 541 args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
534 args.dvo_v3.ucDVOConfig = 0; /* XXX */ 542 args.dvo_v3.ucDVOConfig = 0; /* XXX */
535 break; 543 break;
544 case 4:
545 /* DCE8 */
546 args.dvo_v4.ucAction = action;
547 args.dvo_v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
548 args.dvo_v4.ucDVOConfig = 0; /* XXX */
549 args.dvo_v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
550 break;
536 default: 551 default:
537 DRM_ERROR("Unknown table version %d, %d\n", frev, crev); 552 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
538 break; 553 break;
@@ -915,10 +930,14 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
915 args.v4.ucLaneNum = 4; 930 args.v4.ucLaneNum = 4;
916 931
917 if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) { 932 if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
918 if (dp_clock == 270000) 933 if (dp_clock == 540000)
919 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
920 else if (dp_clock == 540000)
921 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; 934 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
935 else if (dp_clock == 324000)
936 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ;
937 else if (dp_clock == 270000)
938 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
939 else
940 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ;
922 } 941 }
923 args.v4.acConfig.ucDigSel = dig->dig_encoder; 942 args.v4.acConfig.ucDigSel = dig->dig_encoder;
924 args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder); 943 args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
@@ -1012,6 +1031,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1012 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1031 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1013 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1032 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1014 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1033 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1034 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1015 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); 1035 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
1016 break; 1036 break;
1017 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1037 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
@@ -1271,6 +1291,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1271 else 1291 else
1272 args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE; 1292 args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE;
1273 break; 1293 break;
1294 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1295 args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYG;
1296 break;
1274 } 1297 }
1275 if (is_dp) 1298 if (is_dp)
1276 args.v5.ucLaneNum = dp_lane_count; 1299 args.v5.ucLaneNum = dp_lane_count;
@@ -1735,6 +1758,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1735 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1758 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1736 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1759 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1737 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1760 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1761 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1738 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1762 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1739 radeon_atom_encoder_dpms_dig(encoder, mode); 1763 radeon_atom_encoder_dpms_dig(encoder, mode);
1740 break; 1764 break;
@@ -1872,6 +1896,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1872 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1896 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1873 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1897 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1874 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1898 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1899 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1875 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1900 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1876 dig = radeon_encoder->enc_priv; 1901 dig = radeon_encoder->enc_priv;
1877 switch (dig->dig_encoder) { 1902 switch (dig->dig_encoder) {
@@ -1893,6 +1918,9 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1893 case 5: 1918 case 5:
1894 args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID; 1919 args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
1895 break; 1920 break;
1921 case 6:
1922 args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
1923 break;
1896 } 1924 }
1897 break; 1925 break;
1898 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1926 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
@@ -1955,7 +1983,13 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1955 /* set scaler clears this on some chips */ 1983 /* set scaler clears this on some chips */
1956 if (ASIC_IS_AVIVO(rdev) && 1984 if (ASIC_IS_AVIVO(rdev) &&
1957 (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) { 1985 (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
1958 if (ASIC_IS_DCE4(rdev)) { 1986 if (ASIC_IS_DCE8(rdev)) {
1987 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1988 WREG32(CIK_LB_DATA_FORMAT + radeon_crtc->crtc_offset,
1989 CIK_INTERLEAVE_EN);
1990 else
1991 WREG32(CIK_LB_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1992 } else if (ASIC_IS_DCE4(rdev)) {
1959 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1993 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1960 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 1994 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
1961 EVERGREEN_INTERLEAVE_EN); 1995 EVERGREEN_INTERLEAVE_EN);
@@ -2002,6 +2036,9 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
2002 else 2036 else
2003 return 4; 2037 return 4;
2004 break; 2038 break;
2039 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2040 return 6;
2041 break;
2005 } 2042 }
2006 } else if (ASIC_IS_DCE4(rdev)) { 2043 } else if (ASIC_IS_DCE4(rdev)) {
2007 /* DCE4/5 */ 2044 /* DCE4/5 */
@@ -2086,6 +2123,7 @@ radeon_atom_encoder_init(struct radeon_device *rdev)
2086 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2123 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2087 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2124 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2088 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2125 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2126 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2089 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 2127 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2090 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); 2128 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
2091 break; 2129 break;
@@ -2130,6 +2168,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2130 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2168 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2131 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2169 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2132 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2170 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2171 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2133 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 2172 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2134 /* handled in dpms */ 2173 /* handled in dpms */
2135 break; 2174 break;
@@ -2395,6 +2434,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
2395 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2434 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2396 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2435 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2397 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2436 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2437 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2398 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 2438 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2399 /* handled in dpms */ 2439 /* handled in dpms */
2400 break; 2440 break;
@@ -2626,6 +2666,7 @@ radeon_add_atom_encoder(struct drm_device *dev,
2626 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 2666 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2627 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2667 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2628 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2668 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2669 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2629 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 2670 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2630 radeon_encoder->rmx_type = RMX_FULL; 2671 radeon_encoder->rmx_type = RMX_FULL;
2631 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); 2672 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
new file mode 100644
index 000000000000..f072660c7665
--- /dev/null
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -0,0 +1,2737 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "btcd.h"
28#include "r600_dpm.h"
29#include "cypress_dpm.h"
30#include "btc_dpm.h"
31#include "atom.h"
32
33#define MC_CG_ARB_FREQ_F0 0x0a
34#define MC_CG_ARB_FREQ_F1 0x0b
35#define MC_CG_ARB_FREQ_F2 0x0c
36#define MC_CG_ARB_FREQ_F3 0x0d
37
38#define MC_CG_SEQ_DRAMCONF_S0 0x05
39#define MC_CG_SEQ_DRAMCONF_S1 0x06
40#define MC_CG_SEQ_YCLK_SUSPEND 0x04
41#define MC_CG_SEQ_YCLK_RESUME 0x0a
42
43#define SMC_RAM_END 0x8000
44
45#ifndef BTC_MGCG_SEQUENCE
46#define BTC_MGCG_SEQUENCE 300
47
48struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps);
49struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
50struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
51
52
53//********* BARTS **************//
54static const u32 barts_cgcg_cgls_default[] =
55{
56 /* Register, Value, Mask bits */
57 0x000008f8, 0x00000010, 0xffffffff,
58 0x000008fc, 0x00000000, 0xffffffff,
59 0x000008f8, 0x00000011, 0xffffffff,
60 0x000008fc, 0x00000000, 0xffffffff,
61 0x000008f8, 0x00000012, 0xffffffff,
62 0x000008fc, 0x00000000, 0xffffffff,
63 0x000008f8, 0x00000013, 0xffffffff,
64 0x000008fc, 0x00000000, 0xffffffff,
65 0x000008f8, 0x00000014, 0xffffffff,
66 0x000008fc, 0x00000000, 0xffffffff,
67 0x000008f8, 0x00000015, 0xffffffff,
68 0x000008fc, 0x00000000, 0xffffffff,
69 0x000008f8, 0x00000016, 0xffffffff,
70 0x000008fc, 0x00000000, 0xffffffff,
71 0x000008f8, 0x00000017, 0xffffffff,
72 0x000008fc, 0x00000000, 0xffffffff,
73 0x000008f8, 0x00000018, 0xffffffff,
74 0x000008fc, 0x00000000, 0xffffffff,
75 0x000008f8, 0x00000019, 0xffffffff,
76 0x000008fc, 0x00000000, 0xffffffff,
77 0x000008f8, 0x0000001a, 0xffffffff,
78 0x000008fc, 0x00000000, 0xffffffff,
79 0x000008f8, 0x0000001b, 0xffffffff,
80 0x000008fc, 0x00000000, 0xffffffff,
81 0x000008f8, 0x00000020, 0xffffffff,
82 0x000008fc, 0x00000000, 0xffffffff,
83 0x000008f8, 0x00000021, 0xffffffff,
84 0x000008fc, 0x00000000, 0xffffffff,
85 0x000008f8, 0x00000022, 0xffffffff,
86 0x000008fc, 0x00000000, 0xffffffff,
87 0x000008f8, 0x00000023, 0xffffffff,
88 0x000008fc, 0x00000000, 0xffffffff,
89 0x000008f8, 0x00000024, 0xffffffff,
90 0x000008fc, 0x00000000, 0xffffffff,
91 0x000008f8, 0x00000025, 0xffffffff,
92 0x000008fc, 0x00000000, 0xffffffff,
93 0x000008f8, 0x00000026, 0xffffffff,
94 0x000008fc, 0x00000000, 0xffffffff,
95 0x000008f8, 0x00000027, 0xffffffff,
96 0x000008fc, 0x00000000, 0xffffffff,
97 0x000008f8, 0x00000028, 0xffffffff,
98 0x000008fc, 0x00000000, 0xffffffff,
99 0x000008f8, 0x00000029, 0xffffffff,
100 0x000008fc, 0x00000000, 0xffffffff,
101 0x000008f8, 0x0000002a, 0xffffffff,
102 0x000008fc, 0x00000000, 0xffffffff,
103 0x000008f8, 0x0000002b, 0xffffffff,
104 0x000008fc, 0x00000000, 0xffffffff
105};
106#define BARTS_CGCG_CGLS_DEFAULT_LENGTH sizeof(barts_cgcg_cgls_default) / (3 * sizeof(u32))
107
108static const u32 barts_cgcg_cgls_disable[] =
109{
110 0x000008f8, 0x00000010, 0xffffffff,
111 0x000008fc, 0xffffffff, 0xffffffff,
112 0x000008f8, 0x00000011, 0xffffffff,
113 0x000008fc, 0xffffffff, 0xffffffff,
114 0x000008f8, 0x00000012, 0xffffffff,
115 0x000008fc, 0xffffffff, 0xffffffff,
116 0x000008f8, 0x00000013, 0xffffffff,
117 0x000008fc, 0xffffffff, 0xffffffff,
118 0x000008f8, 0x00000014, 0xffffffff,
119 0x000008fc, 0xffffffff, 0xffffffff,
120 0x000008f8, 0x00000015, 0xffffffff,
121 0x000008fc, 0xffffffff, 0xffffffff,
122 0x000008f8, 0x00000016, 0xffffffff,
123 0x000008fc, 0xffffffff, 0xffffffff,
124 0x000008f8, 0x00000017, 0xffffffff,
125 0x000008fc, 0xffffffff, 0xffffffff,
126 0x000008f8, 0x00000018, 0xffffffff,
127 0x000008fc, 0xffffffff, 0xffffffff,
128 0x000008f8, 0x00000019, 0xffffffff,
129 0x000008fc, 0xffffffff, 0xffffffff,
130 0x000008f8, 0x0000001a, 0xffffffff,
131 0x000008fc, 0xffffffff, 0xffffffff,
132 0x000008f8, 0x0000001b, 0xffffffff,
133 0x000008fc, 0xffffffff, 0xffffffff,
134 0x000008f8, 0x00000020, 0xffffffff,
135 0x000008fc, 0x00000000, 0xffffffff,
136 0x000008f8, 0x00000021, 0xffffffff,
137 0x000008fc, 0x00000000, 0xffffffff,
138 0x000008f8, 0x00000022, 0xffffffff,
139 0x000008fc, 0x00000000, 0xffffffff,
140 0x000008f8, 0x00000023, 0xffffffff,
141 0x000008fc, 0x00000000, 0xffffffff,
142 0x000008f8, 0x00000024, 0xffffffff,
143 0x000008fc, 0x00000000, 0xffffffff,
144 0x000008f8, 0x00000025, 0xffffffff,
145 0x000008fc, 0x00000000, 0xffffffff,
146 0x000008f8, 0x00000026, 0xffffffff,
147 0x000008fc, 0x00000000, 0xffffffff,
148 0x000008f8, 0x00000027, 0xffffffff,
149 0x000008fc, 0x00000000, 0xffffffff,
150 0x000008f8, 0x00000028, 0xffffffff,
151 0x000008fc, 0x00000000, 0xffffffff,
152 0x000008f8, 0x00000029, 0xffffffff,
153 0x000008fc, 0x00000000, 0xffffffff,
154 0x000008f8, 0x0000002a, 0xffffffff,
155 0x000008fc, 0x00000000, 0xffffffff,
156 0x000008f8, 0x0000002b, 0xffffffff,
157 0x000008fc, 0x00000000, 0xffffffff,
158 0x00000644, 0x000f7912, 0x001f4180,
159 0x00000644, 0x000f3812, 0x001f4180
160};
161#define BARTS_CGCG_CGLS_DISABLE_LENGTH sizeof(barts_cgcg_cgls_disable) / (3 * sizeof(u32))
162
163static const u32 barts_cgcg_cgls_enable[] =
164{
165 /* 0x0000c124, 0x84180000, 0x00180000, */
166 0x00000644, 0x000f7892, 0x001f4080,
167 0x000008f8, 0x00000010, 0xffffffff,
168 0x000008fc, 0x00000000, 0xffffffff,
169 0x000008f8, 0x00000011, 0xffffffff,
170 0x000008fc, 0x00000000, 0xffffffff,
171 0x000008f8, 0x00000012, 0xffffffff,
172 0x000008fc, 0x00000000, 0xffffffff,
173 0x000008f8, 0x00000013, 0xffffffff,
174 0x000008fc, 0x00000000, 0xffffffff,
175 0x000008f8, 0x00000014, 0xffffffff,
176 0x000008fc, 0x00000000, 0xffffffff,
177 0x000008f8, 0x00000015, 0xffffffff,
178 0x000008fc, 0x00000000, 0xffffffff,
179 0x000008f8, 0x00000016, 0xffffffff,
180 0x000008fc, 0x00000000, 0xffffffff,
181 0x000008f8, 0x00000017, 0xffffffff,
182 0x000008fc, 0x00000000, 0xffffffff,
183 0x000008f8, 0x00000018, 0xffffffff,
184 0x000008fc, 0x00000000, 0xffffffff,
185 0x000008f8, 0x00000019, 0xffffffff,
186 0x000008fc, 0x00000000, 0xffffffff,
187 0x000008f8, 0x0000001a, 0xffffffff,
188 0x000008fc, 0x00000000, 0xffffffff,
189 0x000008f8, 0x0000001b, 0xffffffff,
190 0x000008fc, 0x00000000, 0xffffffff,
191 0x000008f8, 0x00000020, 0xffffffff,
192 0x000008fc, 0xffffffff, 0xffffffff,
193 0x000008f8, 0x00000021, 0xffffffff,
194 0x000008fc, 0xffffffff, 0xffffffff,
195 0x000008f8, 0x00000022, 0xffffffff,
196 0x000008fc, 0xffffffff, 0xffffffff,
197 0x000008f8, 0x00000023, 0xffffffff,
198 0x000008fc, 0xffffffff, 0xffffffff,
199 0x000008f8, 0x00000024, 0xffffffff,
200 0x000008fc, 0xffffffff, 0xffffffff,
201 0x000008f8, 0x00000025, 0xffffffff,
202 0x000008fc, 0xffffffff, 0xffffffff,
203 0x000008f8, 0x00000026, 0xffffffff,
204 0x000008fc, 0xffffffff, 0xffffffff,
205 0x000008f8, 0x00000027, 0xffffffff,
206 0x000008fc, 0xffffffff, 0xffffffff,
207 0x000008f8, 0x00000028, 0xffffffff,
208 0x000008fc, 0xffffffff, 0xffffffff,
209 0x000008f8, 0x00000029, 0xffffffff,
210 0x000008fc, 0xffffffff, 0xffffffff,
211 0x000008f8, 0x0000002a, 0xffffffff,
212 0x000008fc, 0xffffffff, 0xffffffff,
213 0x000008f8, 0x0000002b, 0xffffffff,
214 0x000008fc, 0xffffffff, 0xffffffff
215};
216#define BARTS_CGCG_CGLS_ENABLE_LENGTH sizeof(barts_cgcg_cgls_enable) / (3 * sizeof(u32))
217
218static const u32 barts_mgcg_default[] =
219{
220 0x0000802c, 0xc0000000, 0xffffffff,
221 0x00005448, 0x00000100, 0xffffffff,
222 0x000055e4, 0x00600100, 0xffffffff,
223 0x0000160c, 0x00000100, 0xffffffff,
224 0x0000c164, 0x00000100, 0xffffffff,
225 0x00008a18, 0x00000100, 0xffffffff,
226 0x0000897c, 0x06000100, 0xffffffff,
227 0x00008b28, 0x00000100, 0xffffffff,
228 0x00009144, 0x00000100, 0xffffffff,
229 0x00009a60, 0x00000100, 0xffffffff,
230 0x00009868, 0x00000100, 0xffffffff,
231 0x00008d58, 0x00000100, 0xffffffff,
232 0x00009510, 0x00000100, 0xffffffff,
233 0x0000949c, 0x00000100, 0xffffffff,
234 0x00009654, 0x00000100, 0xffffffff,
235 0x00009030, 0x00000100, 0xffffffff,
236 0x00009034, 0x00000100, 0xffffffff,
237 0x00009038, 0x00000100, 0xffffffff,
238 0x0000903c, 0x00000100, 0xffffffff,
239 0x00009040, 0x00000100, 0xffffffff,
240 0x0000a200, 0x00000100, 0xffffffff,
241 0x0000a204, 0x00000100, 0xffffffff,
242 0x0000a208, 0x00000100, 0xffffffff,
243 0x0000a20c, 0x00000100, 0xffffffff,
244 0x0000977c, 0x00000100, 0xffffffff,
245 0x00003f80, 0x00000100, 0xffffffff,
246 0x0000a210, 0x00000100, 0xffffffff,
247 0x0000a214, 0x00000100, 0xffffffff,
248 0x000004d8, 0x00000100, 0xffffffff,
249 0x00009784, 0x00000100, 0xffffffff,
250 0x00009698, 0x00000100, 0xffffffff,
251 0x000004d4, 0x00000200, 0xffffffff,
252 0x000004d0, 0x00000000, 0xffffffff,
253 0x000030cc, 0x00000100, 0xffffffff,
254 0x0000d0c0, 0xff000100, 0xffffffff,
255 0x0000802c, 0x40000000, 0xffffffff,
256 0x0000915c, 0x00010000, 0xffffffff,
257 0x00009160, 0x00030002, 0xffffffff,
258 0x00009164, 0x00050004, 0xffffffff,
259 0x00009168, 0x00070006, 0xffffffff,
260 0x00009178, 0x00070000, 0xffffffff,
261 0x0000917c, 0x00030002, 0xffffffff,
262 0x00009180, 0x00050004, 0xffffffff,
263 0x0000918c, 0x00010006, 0xffffffff,
264 0x00009190, 0x00090008, 0xffffffff,
265 0x00009194, 0x00070000, 0xffffffff,
266 0x00009198, 0x00030002, 0xffffffff,
267 0x0000919c, 0x00050004, 0xffffffff,
268 0x000091a8, 0x00010006, 0xffffffff,
269 0x000091ac, 0x00090008, 0xffffffff,
270 0x000091b0, 0x00070000, 0xffffffff,
271 0x000091b4, 0x00030002, 0xffffffff,
272 0x000091b8, 0x00050004, 0xffffffff,
273 0x000091c4, 0x00010006, 0xffffffff,
274 0x000091c8, 0x00090008, 0xffffffff,
275 0x000091cc, 0x00070000, 0xffffffff,
276 0x000091d0, 0x00030002, 0xffffffff,
277 0x000091d4, 0x00050004, 0xffffffff,
278 0x000091e0, 0x00010006, 0xffffffff,
279 0x000091e4, 0x00090008, 0xffffffff,
280 0x000091e8, 0x00000000, 0xffffffff,
281 0x000091ec, 0x00070000, 0xffffffff,
282 0x000091f0, 0x00030002, 0xffffffff,
283 0x000091f4, 0x00050004, 0xffffffff,
284 0x00009200, 0x00010006, 0xffffffff,
285 0x00009204, 0x00090008, 0xffffffff,
286 0x00009208, 0x00070000, 0xffffffff,
287 0x0000920c, 0x00030002, 0xffffffff,
288 0x00009210, 0x00050004, 0xffffffff,
289 0x0000921c, 0x00010006, 0xffffffff,
290 0x00009220, 0x00090008, 0xffffffff,
291 0x00009224, 0x00070000, 0xffffffff,
292 0x00009228, 0x00030002, 0xffffffff,
293 0x0000922c, 0x00050004, 0xffffffff,
294 0x00009238, 0x00010006, 0xffffffff,
295 0x0000923c, 0x00090008, 0xffffffff,
296 0x00009294, 0x00000000, 0xffffffff,
297 0x0000802c, 0x40010000, 0xffffffff,
298 0x0000915c, 0x00010000, 0xffffffff,
299 0x00009160, 0x00030002, 0xffffffff,
300 0x00009164, 0x00050004, 0xffffffff,
301 0x00009168, 0x00070006, 0xffffffff,
302 0x00009178, 0x00070000, 0xffffffff,
303 0x0000917c, 0x00030002, 0xffffffff,
304 0x00009180, 0x00050004, 0xffffffff,
305 0x0000918c, 0x00010006, 0xffffffff,
306 0x00009190, 0x00090008, 0xffffffff,
307 0x00009194, 0x00070000, 0xffffffff,
308 0x00009198, 0x00030002, 0xffffffff,
309 0x0000919c, 0x00050004, 0xffffffff,
310 0x000091a8, 0x00010006, 0xffffffff,
311 0x000091ac, 0x00090008, 0xffffffff,
312 0x000091b0, 0x00070000, 0xffffffff,
313 0x000091b4, 0x00030002, 0xffffffff,
314 0x000091b8, 0x00050004, 0xffffffff,
315 0x000091c4, 0x00010006, 0xffffffff,
316 0x000091c8, 0x00090008, 0xffffffff,
317 0x000091cc, 0x00070000, 0xffffffff,
318 0x000091d0, 0x00030002, 0xffffffff,
319 0x000091d4, 0x00050004, 0xffffffff,
320 0x000091e0, 0x00010006, 0xffffffff,
321 0x000091e4, 0x00090008, 0xffffffff,
322 0x000091e8, 0x00000000, 0xffffffff,
323 0x000091ec, 0x00070000, 0xffffffff,
324 0x000091f0, 0x00030002, 0xffffffff,
325 0x000091f4, 0x00050004, 0xffffffff,
326 0x00009200, 0x00010006, 0xffffffff,
327 0x00009204, 0x00090008, 0xffffffff,
328 0x00009208, 0x00070000, 0xffffffff,
329 0x0000920c, 0x00030002, 0xffffffff,
330 0x00009210, 0x00050004, 0xffffffff,
331 0x0000921c, 0x00010006, 0xffffffff,
332 0x00009220, 0x00090008, 0xffffffff,
333 0x00009224, 0x00070000, 0xffffffff,
334 0x00009228, 0x00030002, 0xffffffff,
335 0x0000922c, 0x00050004, 0xffffffff,
336 0x00009238, 0x00010006, 0xffffffff,
337 0x0000923c, 0x00090008, 0xffffffff,
338 0x00009294, 0x00000000, 0xffffffff,
339 0x0000802c, 0xc0000000, 0xffffffff,
340 0x000008f8, 0x00000010, 0xffffffff,
341 0x000008fc, 0x00000000, 0xffffffff,
342 0x000008f8, 0x00000011, 0xffffffff,
343 0x000008fc, 0x00000000, 0xffffffff,
344 0x000008f8, 0x00000012, 0xffffffff,
345 0x000008fc, 0x00000000, 0xffffffff,
346 0x000008f8, 0x00000013, 0xffffffff,
347 0x000008fc, 0x00000000, 0xffffffff,
348 0x000008f8, 0x00000014, 0xffffffff,
349 0x000008fc, 0x00000000, 0xffffffff,
350 0x000008f8, 0x00000015, 0xffffffff,
351 0x000008fc, 0x00000000, 0xffffffff,
352 0x000008f8, 0x00000016, 0xffffffff,
353 0x000008fc, 0x00000000, 0xffffffff,
354 0x000008f8, 0x00000017, 0xffffffff,
355 0x000008fc, 0x00000000, 0xffffffff,
356 0x000008f8, 0x00000018, 0xffffffff,
357 0x000008fc, 0x00000000, 0xffffffff,
358 0x000008f8, 0x00000019, 0xffffffff,
359 0x000008fc, 0x00000000, 0xffffffff,
360 0x000008f8, 0x0000001a, 0xffffffff,
361 0x000008fc, 0x00000000, 0xffffffff,
362 0x000008f8, 0x0000001b, 0xffffffff,
363 0x000008fc, 0x00000000, 0xffffffff
364};
365#define BARTS_MGCG_DEFAULT_LENGTH sizeof(barts_mgcg_default) / (3 * sizeof(u32))
366
367static const u32 barts_mgcg_disable[] =
368{
369 0x0000802c, 0xc0000000, 0xffffffff,
370 0x000008f8, 0x00000000, 0xffffffff,
371 0x000008fc, 0xffffffff, 0xffffffff,
372 0x000008f8, 0x00000001, 0xffffffff,
373 0x000008fc, 0xffffffff, 0xffffffff,
374 0x000008f8, 0x00000002, 0xffffffff,
375 0x000008fc, 0xffffffff, 0xffffffff,
376 0x000008f8, 0x00000003, 0xffffffff,
377 0x000008fc, 0xffffffff, 0xffffffff,
378 0x00009150, 0x00600000, 0xffffffff
379};
380#define BARTS_MGCG_DISABLE_LENGTH sizeof(barts_mgcg_disable) / (3 * sizeof(u32))
381
382static const u32 barts_mgcg_enable[] =
383{
384 0x0000802c, 0xc0000000, 0xffffffff,
385 0x000008f8, 0x00000000, 0xffffffff,
386 0x000008fc, 0x00000000, 0xffffffff,
387 0x000008f8, 0x00000001, 0xffffffff,
388 0x000008fc, 0x00000000, 0xffffffff,
389 0x000008f8, 0x00000002, 0xffffffff,
390 0x000008fc, 0x00000000, 0xffffffff,
391 0x000008f8, 0x00000003, 0xffffffff,
392 0x000008fc, 0x00000000, 0xffffffff,
393 0x00009150, 0x81944000, 0xffffffff
394};
395#define BARTS_MGCG_ENABLE_LENGTH sizeof(barts_mgcg_enable) / (3 * sizeof(u32))
396
397//********* CAICOS **************//
398static const u32 caicos_cgcg_cgls_default[] =
399{
400 0x000008f8, 0x00000010, 0xffffffff,
401 0x000008fc, 0x00000000, 0xffffffff,
402 0x000008f8, 0x00000011, 0xffffffff,
403 0x000008fc, 0x00000000, 0xffffffff,
404 0x000008f8, 0x00000012, 0xffffffff,
405 0x000008fc, 0x00000000, 0xffffffff,
406 0x000008f8, 0x00000013, 0xffffffff,
407 0x000008fc, 0x00000000, 0xffffffff,
408 0x000008f8, 0x00000014, 0xffffffff,
409 0x000008fc, 0x00000000, 0xffffffff,
410 0x000008f8, 0x00000015, 0xffffffff,
411 0x000008fc, 0x00000000, 0xffffffff,
412 0x000008f8, 0x00000016, 0xffffffff,
413 0x000008fc, 0x00000000, 0xffffffff,
414 0x000008f8, 0x00000017, 0xffffffff,
415 0x000008fc, 0x00000000, 0xffffffff,
416 0x000008f8, 0x00000018, 0xffffffff,
417 0x000008fc, 0x00000000, 0xffffffff,
418 0x000008f8, 0x00000019, 0xffffffff,
419 0x000008fc, 0x00000000, 0xffffffff,
420 0x000008f8, 0x0000001a, 0xffffffff,
421 0x000008fc, 0x00000000, 0xffffffff,
422 0x000008f8, 0x0000001b, 0xffffffff,
423 0x000008fc, 0x00000000, 0xffffffff,
424 0x000008f8, 0x00000020, 0xffffffff,
425 0x000008fc, 0x00000000, 0xffffffff,
426 0x000008f8, 0x00000021, 0xffffffff,
427 0x000008fc, 0x00000000, 0xffffffff,
428 0x000008f8, 0x00000022, 0xffffffff,
429 0x000008fc, 0x00000000, 0xffffffff,
430 0x000008f8, 0x00000023, 0xffffffff,
431 0x000008fc, 0x00000000, 0xffffffff,
432 0x000008f8, 0x00000024, 0xffffffff,
433 0x000008fc, 0x00000000, 0xffffffff,
434 0x000008f8, 0x00000025, 0xffffffff,
435 0x000008fc, 0x00000000, 0xffffffff,
436 0x000008f8, 0x00000026, 0xffffffff,
437 0x000008fc, 0x00000000, 0xffffffff,
438 0x000008f8, 0x00000027, 0xffffffff,
439 0x000008fc, 0x00000000, 0xffffffff,
440 0x000008f8, 0x00000028, 0xffffffff,
441 0x000008fc, 0x00000000, 0xffffffff,
442 0x000008f8, 0x00000029, 0xffffffff,
443 0x000008fc, 0x00000000, 0xffffffff,
444 0x000008f8, 0x0000002a, 0xffffffff,
445 0x000008fc, 0x00000000, 0xffffffff,
446 0x000008f8, 0x0000002b, 0xffffffff,
447 0x000008fc, 0x00000000, 0xffffffff
448};
449#define CAICOS_CGCG_CGLS_DEFAULT_LENGTH sizeof(caicos_cgcg_cgls_default) / (3 * sizeof(u32))
450
451static const u32 caicos_cgcg_cgls_disable[] =
452{
453 0x000008f8, 0x00000010, 0xffffffff,
454 0x000008fc, 0xffffffff, 0xffffffff,
455 0x000008f8, 0x00000011, 0xffffffff,
456 0x000008fc, 0xffffffff, 0xffffffff,
457 0x000008f8, 0x00000012, 0xffffffff,
458 0x000008fc, 0xffffffff, 0xffffffff,
459 0x000008f8, 0x00000013, 0xffffffff,
460 0x000008fc, 0xffffffff, 0xffffffff,
461 0x000008f8, 0x00000014, 0xffffffff,
462 0x000008fc, 0xffffffff, 0xffffffff,
463 0x000008f8, 0x00000015, 0xffffffff,
464 0x000008fc, 0xffffffff, 0xffffffff,
465 0x000008f8, 0x00000016, 0xffffffff,
466 0x000008fc, 0xffffffff, 0xffffffff,
467 0x000008f8, 0x00000017, 0xffffffff,
468 0x000008fc, 0xffffffff, 0xffffffff,
469 0x000008f8, 0x00000018, 0xffffffff,
470 0x000008fc, 0xffffffff, 0xffffffff,
471 0x000008f8, 0x00000019, 0xffffffff,
472 0x000008fc, 0xffffffff, 0xffffffff,
473 0x000008f8, 0x0000001a, 0xffffffff,
474 0x000008fc, 0xffffffff, 0xffffffff,
475 0x000008f8, 0x0000001b, 0xffffffff,
476 0x000008fc, 0xffffffff, 0xffffffff,
477 0x000008f8, 0x00000020, 0xffffffff,
478 0x000008fc, 0x00000000, 0xffffffff,
479 0x000008f8, 0x00000021, 0xffffffff,
480 0x000008fc, 0x00000000, 0xffffffff,
481 0x000008f8, 0x00000022, 0xffffffff,
482 0x000008fc, 0x00000000, 0xffffffff,
483 0x000008f8, 0x00000023, 0xffffffff,
484 0x000008fc, 0x00000000, 0xffffffff,
485 0x000008f8, 0x00000024, 0xffffffff,
486 0x000008fc, 0x00000000, 0xffffffff,
487 0x000008f8, 0x00000025, 0xffffffff,
488 0x000008fc, 0x00000000, 0xffffffff,
489 0x000008f8, 0x00000026, 0xffffffff,
490 0x000008fc, 0x00000000, 0xffffffff,
491 0x000008f8, 0x00000027, 0xffffffff,
492 0x000008fc, 0x00000000, 0xffffffff,
493 0x000008f8, 0x00000028, 0xffffffff,
494 0x000008fc, 0x00000000, 0xffffffff,
495 0x000008f8, 0x00000029, 0xffffffff,
496 0x000008fc, 0x00000000, 0xffffffff,
497 0x000008f8, 0x0000002a, 0xffffffff,
498 0x000008fc, 0x00000000, 0xffffffff,
499 0x000008f8, 0x0000002b, 0xffffffff,
500 0x000008fc, 0x00000000, 0xffffffff,
501 0x00000644, 0x000f7912, 0x001f4180,
502 0x00000644, 0x000f3812, 0x001f4180
503};
504#define CAICOS_CGCG_CGLS_DISABLE_LENGTH sizeof(caicos_cgcg_cgls_disable) / (3 * sizeof(u32))
505
506static const u32 caicos_cgcg_cgls_enable[] =
507{
508 /* 0x0000c124, 0x84180000, 0x00180000, */
509 0x00000644, 0x000f7892, 0x001f4080,
510 0x000008f8, 0x00000010, 0xffffffff,
511 0x000008fc, 0x00000000, 0xffffffff,
512 0x000008f8, 0x00000011, 0xffffffff,
513 0x000008fc, 0x00000000, 0xffffffff,
514 0x000008f8, 0x00000012, 0xffffffff,
515 0x000008fc, 0x00000000, 0xffffffff,
516 0x000008f8, 0x00000013, 0xffffffff,
517 0x000008fc, 0x00000000, 0xffffffff,
518 0x000008f8, 0x00000014, 0xffffffff,
519 0x000008fc, 0x00000000, 0xffffffff,
520 0x000008f8, 0x00000015, 0xffffffff,
521 0x000008fc, 0x00000000, 0xffffffff,
522 0x000008f8, 0x00000016, 0xffffffff,
523 0x000008fc, 0x00000000, 0xffffffff,
524 0x000008f8, 0x00000017, 0xffffffff,
525 0x000008fc, 0x00000000, 0xffffffff,
526 0x000008f8, 0x00000018, 0xffffffff,
527 0x000008fc, 0x00000000, 0xffffffff,
528 0x000008f8, 0x00000019, 0xffffffff,
529 0x000008fc, 0x00000000, 0xffffffff,
530 0x000008f8, 0x0000001a, 0xffffffff,
531 0x000008fc, 0x00000000, 0xffffffff,
532 0x000008f8, 0x0000001b, 0xffffffff,
533 0x000008fc, 0x00000000, 0xffffffff,
534 0x000008f8, 0x00000020, 0xffffffff,
535 0x000008fc, 0xffffffff, 0xffffffff,
536 0x000008f8, 0x00000021, 0xffffffff,
537 0x000008fc, 0xffffffff, 0xffffffff,
538 0x000008f8, 0x00000022, 0xffffffff,
539 0x000008fc, 0xffffffff, 0xffffffff,
540 0x000008f8, 0x00000023, 0xffffffff,
541 0x000008fc, 0xffffffff, 0xffffffff,
542 0x000008f8, 0x00000024, 0xffffffff,
543 0x000008fc, 0xffffffff, 0xffffffff,
544 0x000008f8, 0x00000025, 0xffffffff,
545 0x000008fc, 0xffffffff, 0xffffffff,
546 0x000008f8, 0x00000026, 0xffffffff,
547 0x000008fc, 0xffffffff, 0xffffffff,
548 0x000008f8, 0x00000027, 0xffffffff,
549 0x000008fc, 0xffffffff, 0xffffffff,
550 0x000008f8, 0x00000028, 0xffffffff,
551 0x000008fc, 0xffffffff, 0xffffffff,
552 0x000008f8, 0x00000029, 0xffffffff,
553 0x000008fc, 0xffffffff, 0xffffffff,
554 0x000008f8, 0x0000002a, 0xffffffff,
555 0x000008fc, 0xffffffff, 0xffffffff,
556 0x000008f8, 0x0000002b, 0xffffffff,
557 0x000008fc, 0xffffffff, 0xffffffff
558};
559#define CAICOS_CGCG_CGLS_ENABLE_LENGTH sizeof(caicos_cgcg_cgls_enable) / (3 * sizeof(u32))
560
561static const u32 caicos_mgcg_default[] =
562{
563 0x0000802c, 0xc0000000, 0xffffffff,
564 0x00005448, 0x00000100, 0xffffffff,
565 0x000055e4, 0x00600100, 0xffffffff,
566 0x0000160c, 0x00000100, 0xffffffff,
567 0x0000c164, 0x00000100, 0xffffffff,
568 0x00008a18, 0x00000100, 0xffffffff,
569 0x0000897c, 0x06000100, 0xffffffff,
570 0x00008b28, 0x00000100, 0xffffffff,
571 0x00009144, 0x00000100, 0xffffffff,
572 0x00009a60, 0x00000100, 0xffffffff,
573 0x00009868, 0x00000100, 0xffffffff,
574 0x00008d58, 0x00000100, 0xffffffff,
575 0x00009510, 0x00000100, 0xffffffff,
576 0x0000949c, 0x00000100, 0xffffffff,
577 0x00009654, 0x00000100, 0xffffffff,
578 0x00009030, 0x00000100, 0xffffffff,
579 0x00009034, 0x00000100, 0xffffffff,
580 0x00009038, 0x00000100, 0xffffffff,
581 0x0000903c, 0x00000100, 0xffffffff,
582 0x00009040, 0x00000100, 0xffffffff,
583 0x0000a200, 0x00000100, 0xffffffff,
584 0x0000a204, 0x00000100, 0xffffffff,
585 0x0000a208, 0x00000100, 0xffffffff,
586 0x0000a20c, 0x00000100, 0xffffffff,
587 0x0000977c, 0x00000100, 0xffffffff,
588 0x00003f80, 0x00000100, 0xffffffff,
589 0x0000a210, 0x00000100, 0xffffffff,
590 0x0000a214, 0x00000100, 0xffffffff,
591 0x000004d8, 0x00000100, 0xffffffff,
592 0x00009784, 0x00000100, 0xffffffff,
593 0x00009698, 0x00000100, 0xffffffff,
594 0x000004d4, 0x00000200, 0xffffffff,
595 0x000004d0, 0x00000000, 0xffffffff,
596 0x000030cc, 0x00000100, 0xffffffff,
597 0x0000d0c0, 0xff000100, 0xffffffff,
598 0x0000915c, 0x00010000, 0xffffffff,
599 0x00009160, 0x00030002, 0xffffffff,
600 0x00009164, 0x00050004, 0xffffffff,
601 0x00009168, 0x00070006, 0xffffffff,
602 0x00009178, 0x00070000, 0xffffffff,
603 0x0000917c, 0x00030002, 0xffffffff,
604 0x00009180, 0x00050004, 0xffffffff,
605 0x0000918c, 0x00010006, 0xffffffff,
606 0x00009190, 0x00090008, 0xffffffff,
607 0x00009194, 0x00070000, 0xffffffff,
608 0x00009198, 0x00030002, 0xffffffff,
609 0x0000919c, 0x00050004, 0xffffffff,
610 0x000091a8, 0x00010006, 0xffffffff,
611 0x000091ac, 0x00090008, 0xffffffff,
612 0x000091e8, 0x00000000, 0xffffffff,
613 0x00009294, 0x00000000, 0xffffffff,
614 0x000008f8, 0x00000010, 0xffffffff,
615 0x000008fc, 0x00000000, 0xffffffff,
616 0x000008f8, 0x00000011, 0xffffffff,
617 0x000008fc, 0x00000000, 0xffffffff,
618 0x000008f8, 0x00000012, 0xffffffff,
619 0x000008fc, 0x00000000, 0xffffffff,
620 0x000008f8, 0x00000013, 0xffffffff,
621 0x000008fc, 0x00000000, 0xffffffff,
622 0x000008f8, 0x00000014, 0xffffffff,
623 0x000008fc, 0x00000000, 0xffffffff,
624 0x000008f8, 0x00000015, 0xffffffff,
625 0x000008fc, 0x00000000, 0xffffffff,
626 0x000008f8, 0x00000016, 0xffffffff,
627 0x000008fc, 0x00000000, 0xffffffff,
628 0x000008f8, 0x00000017, 0xffffffff,
629 0x000008fc, 0x00000000, 0xffffffff,
630 0x000008f8, 0x00000018, 0xffffffff,
631 0x000008fc, 0x00000000, 0xffffffff,
632 0x000008f8, 0x00000019, 0xffffffff,
633 0x000008fc, 0x00000000, 0xffffffff,
634 0x000008f8, 0x0000001a, 0xffffffff,
635 0x000008fc, 0x00000000, 0xffffffff,
636 0x000008f8, 0x0000001b, 0xffffffff,
637 0x000008fc, 0x00000000, 0xffffffff
638};
639#define CAICOS_MGCG_DEFAULT_LENGTH sizeof(caicos_mgcg_default) / (3 * sizeof(u32))
640
641static const u32 caicos_mgcg_disable[] =
642{
643 0x0000802c, 0xc0000000, 0xffffffff,
644 0x000008f8, 0x00000000, 0xffffffff,
645 0x000008fc, 0xffffffff, 0xffffffff,
646 0x000008f8, 0x00000001, 0xffffffff,
647 0x000008fc, 0xffffffff, 0xffffffff,
648 0x000008f8, 0x00000002, 0xffffffff,
649 0x000008fc, 0xffffffff, 0xffffffff,
650 0x000008f8, 0x00000003, 0xffffffff,
651 0x000008fc, 0xffffffff, 0xffffffff,
652 0x00009150, 0x00600000, 0xffffffff
653};
654#define CAICOS_MGCG_DISABLE_LENGTH sizeof(caicos_mgcg_disable) / (3 * sizeof(u32))
655
656static const u32 caicos_mgcg_enable[] =
657{
658 0x0000802c, 0xc0000000, 0xffffffff,
659 0x000008f8, 0x00000000, 0xffffffff,
660 0x000008fc, 0x00000000, 0xffffffff,
661 0x000008f8, 0x00000001, 0xffffffff,
662 0x000008fc, 0x00000000, 0xffffffff,
663 0x000008f8, 0x00000002, 0xffffffff,
664 0x000008fc, 0x00000000, 0xffffffff,
665 0x000008f8, 0x00000003, 0xffffffff,
666 0x000008fc, 0x00000000, 0xffffffff,
667 0x00009150, 0x46944040, 0xffffffff
668};
669#define CAICOS_MGCG_ENABLE_LENGTH sizeof(caicos_mgcg_enable) / (3 * sizeof(u32))
670
671//********* TURKS **************//
672static const u32 turks_cgcg_cgls_default[] =
673{
674 0x000008f8, 0x00000010, 0xffffffff,
675 0x000008fc, 0x00000000, 0xffffffff,
676 0x000008f8, 0x00000011, 0xffffffff,
677 0x000008fc, 0x00000000, 0xffffffff,
678 0x000008f8, 0x00000012, 0xffffffff,
679 0x000008fc, 0x00000000, 0xffffffff,
680 0x000008f8, 0x00000013, 0xffffffff,
681 0x000008fc, 0x00000000, 0xffffffff,
682 0x000008f8, 0x00000014, 0xffffffff,
683 0x000008fc, 0x00000000, 0xffffffff,
684 0x000008f8, 0x00000015, 0xffffffff,
685 0x000008fc, 0x00000000, 0xffffffff,
686 0x000008f8, 0x00000016, 0xffffffff,
687 0x000008fc, 0x00000000, 0xffffffff,
688 0x000008f8, 0x00000017, 0xffffffff,
689 0x000008fc, 0x00000000, 0xffffffff,
690 0x000008f8, 0x00000018, 0xffffffff,
691 0x000008fc, 0x00000000, 0xffffffff,
692 0x000008f8, 0x00000019, 0xffffffff,
693 0x000008fc, 0x00000000, 0xffffffff,
694 0x000008f8, 0x0000001a, 0xffffffff,
695 0x000008fc, 0x00000000, 0xffffffff,
696 0x000008f8, 0x0000001b, 0xffffffff,
697 0x000008fc, 0x00000000, 0xffffffff,
698 0x000008f8, 0x00000020, 0xffffffff,
699 0x000008fc, 0x00000000, 0xffffffff,
700 0x000008f8, 0x00000021, 0xffffffff,
701 0x000008fc, 0x00000000, 0xffffffff,
702 0x000008f8, 0x00000022, 0xffffffff,
703 0x000008fc, 0x00000000, 0xffffffff,
704 0x000008f8, 0x00000023, 0xffffffff,
705 0x000008fc, 0x00000000, 0xffffffff,
706 0x000008f8, 0x00000024, 0xffffffff,
707 0x000008fc, 0x00000000, 0xffffffff,
708 0x000008f8, 0x00000025, 0xffffffff,
709 0x000008fc, 0x00000000, 0xffffffff,
710 0x000008f8, 0x00000026, 0xffffffff,
711 0x000008fc, 0x00000000, 0xffffffff,
712 0x000008f8, 0x00000027, 0xffffffff,
713 0x000008fc, 0x00000000, 0xffffffff,
714 0x000008f8, 0x00000028, 0xffffffff,
715 0x000008fc, 0x00000000, 0xffffffff,
716 0x000008f8, 0x00000029, 0xffffffff,
717 0x000008fc, 0x00000000, 0xffffffff,
718 0x000008f8, 0x0000002a, 0xffffffff,
719 0x000008fc, 0x00000000, 0xffffffff,
720 0x000008f8, 0x0000002b, 0xffffffff,
721 0x000008fc, 0x00000000, 0xffffffff
722};
723#define TURKS_CGCG_CGLS_DEFAULT_LENGTH sizeof(turks_cgcg_cgls_default) / (3 * sizeof(u32))
724
725static const u32 turks_cgcg_cgls_disable[] =
726{
727 0x000008f8, 0x00000010, 0xffffffff,
728 0x000008fc, 0xffffffff, 0xffffffff,
729 0x000008f8, 0x00000011, 0xffffffff,
730 0x000008fc, 0xffffffff, 0xffffffff,
731 0x000008f8, 0x00000012, 0xffffffff,
732 0x000008fc, 0xffffffff, 0xffffffff,
733 0x000008f8, 0x00000013, 0xffffffff,
734 0x000008fc, 0xffffffff, 0xffffffff,
735 0x000008f8, 0x00000014, 0xffffffff,
736 0x000008fc, 0xffffffff, 0xffffffff,
737 0x000008f8, 0x00000015, 0xffffffff,
738 0x000008fc, 0xffffffff, 0xffffffff,
739 0x000008f8, 0x00000016, 0xffffffff,
740 0x000008fc, 0xffffffff, 0xffffffff,
741 0x000008f8, 0x00000017, 0xffffffff,
742 0x000008fc, 0xffffffff, 0xffffffff,
743 0x000008f8, 0x00000018, 0xffffffff,
744 0x000008fc, 0xffffffff, 0xffffffff,
745 0x000008f8, 0x00000019, 0xffffffff,
746 0x000008fc, 0xffffffff, 0xffffffff,
747 0x000008f8, 0x0000001a, 0xffffffff,
748 0x000008fc, 0xffffffff, 0xffffffff,
749 0x000008f8, 0x0000001b, 0xffffffff,
750 0x000008fc, 0xffffffff, 0xffffffff,
751 0x000008f8, 0x00000020, 0xffffffff,
752 0x000008fc, 0x00000000, 0xffffffff,
753 0x000008f8, 0x00000021, 0xffffffff,
754 0x000008fc, 0x00000000, 0xffffffff,
755 0x000008f8, 0x00000022, 0xffffffff,
756 0x000008fc, 0x00000000, 0xffffffff,
757 0x000008f8, 0x00000023, 0xffffffff,
758 0x000008fc, 0x00000000, 0xffffffff,
759 0x000008f8, 0x00000024, 0xffffffff,
760 0x000008fc, 0x00000000, 0xffffffff,
761 0x000008f8, 0x00000025, 0xffffffff,
762 0x000008fc, 0x00000000, 0xffffffff,
763 0x000008f8, 0x00000026, 0xffffffff,
764 0x000008fc, 0x00000000, 0xffffffff,
765 0x000008f8, 0x00000027, 0xffffffff,
766 0x000008fc, 0x00000000, 0xffffffff,
767 0x000008f8, 0x00000028, 0xffffffff,
768 0x000008fc, 0x00000000, 0xffffffff,
769 0x000008f8, 0x00000029, 0xffffffff,
770 0x000008fc, 0x00000000, 0xffffffff,
771 0x000008f8, 0x0000002a, 0xffffffff,
772 0x000008fc, 0x00000000, 0xffffffff,
773 0x000008f8, 0x0000002b, 0xffffffff,
774 0x000008fc, 0x00000000, 0xffffffff,
775 0x00000644, 0x000f7912, 0x001f4180,
776 0x00000644, 0x000f3812, 0x001f4180
777};
778#define TURKS_CGCG_CGLS_DISABLE_LENGTH sizeof(turks_cgcg_cgls_disable) / (3 * sizeof(u32))
779
780static const u32 turks_cgcg_cgls_enable[] =
781{
782 /* 0x0000c124, 0x84180000, 0x00180000, */
783 0x00000644, 0x000f7892, 0x001f4080,
784 0x000008f8, 0x00000010, 0xffffffff,
785 0x000008fc, 0x00000000, 0xffffffff,
786 0x000008f8, 0x00000011, 0xffffffff,
787 0x000008fc, 0x00000000, 0xffffffff,
788 0x000008f8, 0x00000012, 0xffffffff,
789 0x000008fc, 0x00000000, 0xffffffff,
790 0x000008f8, 0x00000013, 0xffffffff,
791 0x000008fc, 0x00000000, 0xffffffff,
792 0x000008f8, 0x00000014, 0xffffffff,
793 0x000008fc, 0x00000000, 0xffffffff,
794 0x000008f8, 0x00000015, 0xffffffff,
795 0x000008fc, 0x00000000, 0xffffffff,
796 0x000008f8, 0x00000016, 0xffffffff,
797 0x000008fc, 0x00000000, 0xffffffff,
798 0x000008f8, 0x00000017, 0xffffffff,
799 0x000008fc, 0x00000000, 0xffffffff,
800 0x000008f8, 0x00000018, 0xffffffff,
801 0x000008fc, 0x00000000, 0xffffffff,
802 0x000008f8, 0x00000019, 0xffffffff,
803 0x000008fc, 0x00000000, 0xffffffff,
804 0x000008f8, 0x0000001a, 0xffffffff,
805 0x000008fc, 0x00000000, 0xffffffff,
806 0x000008f8, 0x0000001b, 0xffffffff,
807 0x000008fc, 0x00000000, 0xffffffff,
808 0x000008f8, 0x00000020, 0xffffffff,
809 0x000008fc, 0xffffffff, 0xffffffff,
810 0x000008f8, 0x00000021, 0xffffffff,
811 0x000008fc, 0xffffffff, 0xffffffff,
812 0x000008f8, 0x00000022, 0xffffffff,
813 0x000008fc, 0xffffffff, 0xffffffff,
814 0x000008f8, 0x00000023, 0xffffffff,
815 0x000008fc, 0xffffffff, 0xffffffff,
816 0x000008f8, 0x00000024, 0xffffffff,
817 0x000008fc, 0xffffffff, 0xffffffff,
818 0x000008f8, 0x00000025, 0xffffffff,
819 0x000008fc, 0xffffffff, 0xffffffff,
820 0x000008f8, 0x00000026, 0xffffffff,
821 0x000008fc, 0xffffffff, 0xffffffff,
822 0x000008f8, 0x00000027, 0xffffffff,
823 0x000008fc, 0xffffffff, 0xffffffff,
824 0x000008f8, 0x00000028, 0xffffffff,
825 0x000008fc, 0xffffffff, 0xffffffff,
826 0x000008f8, 0x00000029, 0xffffffff,
827 0x000008fc, 0xffffffff, 0xffffffff,
828 0x000008f8, 0x0000002a, 0xffffffff,
829 0x000008fc, 0xffffffff, 0xffffffff,
830 0x000008f8, 0x0000002b, 0xffffffff,
831 0x000008fc, 0xffffffff, 0xffffffff
832};
833#define TURKS_CGCG_CGLS_ENABLE_LENGTH sizeof(turks_cgcg_cgls_enable) / (3 * sizeof(u32))
834
835// These are the sequences for turks_mgcg_shls
836static const u32 turks_mgcg_default[] =
837{
838 0x0000802c, 0xc0000000, 0xffffffff,
839 0x00005448, 0x00000100, 0xffffffff,
840 0x000055e4, 0x00600100, 0xffffffff,
841 0x0000160c, 0x00000100, 0xffffffff,
842 0x0000c164, 0x00000100, 0xffffffff,
843 0x00008a18, 0x00000100, 0xffffffff,
844 0x0000897c, 0x06000100, 0xffffffff,
845 0x00008b28, 0x00000100, 0xffffffff,
846 0x00009144, 0x00000100, 0xffffffff,
847 0x00009a60, 0x00000100, 0xffffffff,
848 0x00009868, 0x00000100, 0xffffffff,
849 0x00008d58, 0x00000100, 0xffffffff,
850 0x00009510, 0x00000100, 0xffffffff,
851 0x0000949c, 0x00000100, 0xffffffff,
852 0x00009654, 0x00000100, 0xffffffff,
853 0x00009030, 0x00000100, 0xffffffff,
854 0x00009034, 0x00000100, 0xffffffff,
855 0x00009038, 0x00000100, 0xffffffff,
856 0x0000903c, 0x00000100, 0xffffffff,
857 0x00009040, 0x00000100, 0xffffffff,
858 0x0000a200, 0x00000100, 0xffffffff,
859 0x0000a204, 0x00000100, 0xffffffff,
860 0x0000a208, 0x00000100, 0xffffffff,
861 0x0000a20c, 0x00000100, 0xffffffff,
862 0x0000977c, 0x00000100, 0xffffffff,
863 0x00003f80, 0x00000100, 0xffffffff,
864 0x0000a210, 0x00000100, 0xffffffff,
865 0x0000a214, 0x00000100, 0xffffffff,
866 0x000004d8, 0x00000100, 0xffffffff,
867 0x00009784, 0x00000100, 0xffffffff,
868 0x00009698, 0x00000100, 0xffffffff,
869 0x000004d4, 0x00000200, 0xffffffff,
870 0x000004d0, 0x00000000, 0xffffffff,
871 0x000030cc, 0x00000100, 0xffffffff,
872 0x0000d0c0, 0x00000100, 0xffffffff,
873 0x0000915c, 0x00010000, 0xffffffff,
874 0x00009160, 0x00030002, 0xffffffff,
875 0x00009164, 0x00050004, 0xffffffff,
876 0x00009168, 0x00070006, 0xffffffff,
877 0x00009178, 0x00070000, 0xffffffff,
878 0x0000917c, 0x00030002, 0xffffffff,
879 0x00009180, 0x00050004, 0xffffffff,
880 0x0000918c, 0x00010006, 0xffffffff,
881 0x00009190, 0x00090008, 0xffffffff,
882 0x00009194, 0x00070000, 0xffffffff,
883 0x00009198, 0x00030002, 0xffffffff,
884 0x0000919c, 0x00050004, 0xffffffff,
885 0x000091a8, 0x00010006, 0xffffffff,
886 0x000091ac, 0x00090008, 0xffffffff,
887 0x000091b0, 0x00070000, 0xffffffff,
888 0x000091b4, 0x00030002, 0xffffffff,
889 0x000091b8, 0x00050004, 0xffffffff,
890 0x000091c4, 0x00010006, 0xffffffff,
891 0x000091c8, 0x00090008, 0xffffffff,
892 0x000091cc, 0x00070000, 0xffffffff,
893 0x000091d0, 0x00030002, 0xffffffff,
894 0x000091d4, 0x00050004, 0xffffffff,
895 0x000091e0, 0x00010006, 0xffffffff,
896 0x000091e4, 0x00090008, 0xffffffff,
897 0x000091e8, 0x00000000, 0xffffffff,
898 0x000091ec, 0x00070000, 0xffffffff,
899 0x000091f0, 0x00030002, 0xffffffff,
900 0x000091f4, 0x00050004, 0xffffffff,
901 0x00009200, 0x00010006, 0xffffffff,
902 0x00009204, 0x00090008, 0xffffffff,
903 0x00009208, 0x00070000, 0xffffffff,
904 0x0000920c, 0x00030002, 0xffffffff,
905 0x00009210, 0x00050004, 0xffffffff,
906 0x0000921c, 0x00010006, 0xffffffff,
907 0x00009220, 0x00090008, 0xffffffff,
908 0x00009294, 0x00000000, 0xffffffff,
909 0x000008f8, 0x00000010, 0xffffffff,
910 0x000008fc, 0x00000000, 0xffffffff,
911 0x000008f8, 0x00000011, 0xffffffff,
912 0x000008fc, 0x00000000, 0xffffffff,
913 0x000008f8, 0x00000012, 0xffffffff,
914 0x000008fc, 0x00000000, 0xffffffff,
915 0x000008f8, 0x00000013, 0xffffffff,
916 0x000008fc, 0x00000000, 0xffffffff,
917 0x000008f8, 0x00000014, 0xffffffff,
918 0x000008fc, 0x00000000, 0xffffffff,
919 0x000008f8, 0x00000015, 0xffffffff,
920 0x000008fc, 0x00000000, 0xffffffff,
921 0x000008f8, 0x00000016, 0xffffffff,
922 0x000008fc, 0x00000000, 0xffffffff,
923 0x000008f8, 0x00000017, 0xffffffff,
924 0x000008fc, 0x00000000, 0xffffffff,
925 0x000008f8, 0x00000018, 0xffffffff,
926 0x000008fc, 0x00000000, 0xffffffff,
927 0x000008f8, 0x00000019, 0xffffffff,
928 0x000008fc, 0x00000000, 0xffffffff,
929 0x000008f8, 0x0000001a, 0xffffffff,
930 0x000008fc, 0x00000000, 0xffffffff,
931 0x000008f8, 0x0000001b, 0xffffffff,
932 0x000008fc, 0x00000000, 0xffffffff
933};
934#define TURKS_MGCG_DEFAULT_LENGTH sizeof(turks_mgcg_default) / (3 * sizeof(u32))
935
936static const u32 turks_mgcg_disable[] =
937{
938 0x0000802c, 0xc0000000, 0xffffffff,
939 0x000008f8, 0x00000000, 0xffffffff,
940 0x000008fc, 0xffffffff, 0xffffffff,
941 0x000008f8, 0x00000001, 0xffffffff,
942 0x000008fc, 0xffffffff, 0xffffffff,
943 0x000008f8, 0x00000002, 0xffffffff,
944 0x000008fc, 0xffffffff, 0xffffffff,
945 0x000008f8, 0x00000003, 0xffffffff,
946 0x000008fc, 0xffffffff, 0xffffffff,
947 0x00009150, 0x00600000, 0xffffffff
948};
949#define TURKS_MGCG_DISABLE_LENGTH sizeof(turks_mgcg_disable) / (3 * sizeof(u32))
950
951static const u32 turks_mgcg_enable[] =
952{
953 0x0000802c, 0xc0000000, 0xffffffff,
954 0x000008f8, 0x00000000, 0xffffffff,
955 0x000008fc, 0x00000000, 0xffffffff,
956 0x000008f8, 0x00000001, 0xffffffff,
957 0x000008fc, 0x00000000, 0xffffffff,
958 0x000008f8, 0x00000002, 0xffffffff,
959 0x000008fc, 0x00000000, 0xffffffff,
960 0x000008f8, 0x00000003, 0xffffffff,
961 0x000008fc, 0x00000000, 0xffffffff,
962 0x00009150, 0x6e944000, 0xffffffff
963};
964#define TURKS_MGCG_ENABLE_LENGTH sizeof(turks_mgcg_enable) / (3 * sizeof(u32))
965
966#endif
967
968#ifndef BTC_SYSLS_SEQUENCE
969#define BTC_SYSLS_SEQUENCE 100
970
971
972//********* BARTS **************//
973static const u32 barts_sysls_default[] =
974{
975 /* Register, Value, Mask bits */
976 0x000055e8, 0x00000000, 0xffffffff,
977 0x0000d0bc, 0x00000000, 0xffffffff,
978 0x000015c0, 0x000c1401, 0xffffffff,
979 0x0000264c, 0x000c0400, 0xffffffff,
980 0x00002648, 0x000c0400, 0xffffffff,
981 0x00002650, 0x000c0400, 0xffffffff,
982 0x000020b8, 0x000c0400, 0xffffffff,
983 0x000020bc, 0x000c0400, 0xffffffff,
984 0x000020c0, 0x000c0c80, 0xffffffff,
985 0x0000f4a0, 0x000000c0, 0xffffffff,
986 0x0000f4a4, 0x00680fff, 0xffffffff,
987 0x000004c8, 0x00000001, 0xffffffff,
988 0x000064ec, 0x00000000, 0xffffffff,
989 0x00000c7c, 0x00000000, 0xffffffff,
990 0x00006dfc, 0x00000000, 0xffffffff
991};
992#define BARTS_SYSLS_DEFAULT_LENGTH sizeof(barts_sysls_default) / (3 * sizeof(u32))
993
994static const u32 barts_sysls_disable[] =
995{
996 0x000055e8, 0x00000000, 0xffffffff,
997 0x0000d0bc, 0x00000000, 0xffffffff,
998 0x000015c0, 0x00041401, 0xffffffff,
999 0x0000264c, 0x00040400, 0xffffffff,
1000 0x00002648, 0x00040400, 0xffffffff,
1001 0x00002650, 0x00040400, 0xffffffff,
1002 0x000020b8, 0x00040400, 0xffffffff,
1003 0x000020bc, 0x00040400, 0xffffffff,
1004 0x000020c0, 0x00040c80, 0xffffffff,
1005 0x0000f4a0, 0x000000c0, 0xffffffff,
1006 0x0000f4a4, 0x00680000, 0xffffffff,
1007 0x000004c8, 0x00000001, 0xffffffff,
1008 0x000064ec, 0x00007ffd, 0xffffffff,
1009 0x00000c7c, 0x0000ff00, 0xffffffff,
1010 0x00006dfc, 0x0000007f, 0xffffffff
1011};
1012#define BARTS_SYSLS_DISABLE_LENGTH sizeof(barts_sysls_disable) / (3 * sizeof(u32))
1013
1014static const u32 barts_sysls_enable[] =
1015{
1016 0x000055e8, 0x00000001, 0xffffffff,
1017 0x0000d0bc, 0x00000100, 0xffffffff,
1018 0x000015c0, 0x000c1401, 0xffffffff,
1019 0x0000264c, 0x000c0400, 0xffffffff,
1020 0x00002648, 0x000c0400, 0xffffffff,
1021 0x00002650, 0x000c0400, 0xffffffff,
1022 0x000020b8, 0x000c0400, 0xffffffff,
1023 0x000020bc, 0x000c0400, 0xffffffff,
1024 0x000020c0, 0x000c0c80, 0xffffffff,
1025 0x0000f4a0, 0x000000c0, 0xffffffff,
1026 0x0000f4a4, 0x00680fff, 0xffffffff,
1027 0x000004c8, 0x00000000, 0xffffffff,
1028 0x000064ec, 0x00000000, 0xffffffff,
1029 0x00000c7c, 0x00000000, 0xffffffff,
1030 0x00006dfc, 0x00000000, 0xffffffff
1031};
1032#define BARTS_SYSLS_ENABLE_LENGTH sizeof(barts_sysls_enable) / (3 * sizeof(u32))
1033
1034//********* CAICOS **************//
1035static const u32 caicos_sysls_default[] =
1036{
1037 0x000055e8, 0x00000000, 0xffffffff,
1038 0x0000d0bc, 0x00000000, 0xffffffff,
1039 0x000015c0, 0x000c1401, 0xffffffff,
1040 0x0000264c, 0x000c0400, 0xffffffff,
1041 0x00002648, 0x000c0400, 0xffffffff,
1042 0x00002650, 0x000c0400, 0xffffffff,
1043 0x000020b8, 0x000c0400, 0xffffffff,
1044 0x000020bc, 0x000c0400, 0xffffffff,
1045 0x0000f4a0, 0x000000c0, 0xffffffff,
1046 0x0000f4a4, 0x00680fff, 0xffffffff,
1047 0x000004c8, 0x00000001, 0xffffffff,
1048 0x000064ec, 0x00000000, 0xffffffff,
1049 0x00000c7c, 0x00000000, 0xffffffff,
1050 0x00006dfc, 0x00000000, 0xffffffff
1051};
1052#define CAICOS_SYSLS_DEFAULT_LENGTH sizeof(caicos_sysls_default) / (3 * sizeof(u32))
1053
1054static const u32 caicos_sysls_disable[] =
1055{
1056 0x000055e8, 0x00000000, 0xffffffff,
1057 0x0000d0bc, 0x00000000, 0xffffffff,
1058 0x000015c0, 0x00041401, 0xffffffff,
1059 0x0000264c, 0x00040400, 0xffffffff,
1060 0x00002648, 0x00040400, 0xffffffff,
1061 0x00002650, 0x00040400, 0xffffffff,
1062 0x000020b8, 0x00040400, 0xffffffff,
1063 0x000020bc, 0x00040400, 0xffffffff,
1064 0x0000f4a0, 0x000000c0, 0xffffffff,
1065 0x0000f4a4, 0x00680000, 0xffffffff,
1066 0x000004c8, 0x00000001, 0xffffffff,
1067 0x000064ec, 0x00007ffd, 0xffffffff,
1068 0x00000c7c, 0x0000ff00, 0xffffffff,
1069 0x00006dfc, 0x0000007f, 0xffffffff
1070};
1071#define CAICOS_SYSLS_DISABLE_LENGTH sizeof(caicos_sysls_disable) / (3 * sizeof(u32))
1072
1073static const u32 caicos_sysls_enable[] =
1074{
1075 0x000055e8, 0x00000001, 0xffffffff,
1076 0x0000d0bc, 0x00000100, 0xffffffff,
1077 0x000015c0, 0x000c1401, 0xffffffff,
1078 0x0000264c, 0x000c0400, 0xffffffff,
1079 0x00002648, 0x000c0400, 0xffffffff,
1080 0x00002650, 0x000c0400, 0xffffffff,
1081 0x000020b8, 0x000c0400, 0xffffffff,
1082 0x000020bc, 0x000c0400, 0xffffffff,
1083 0x0000f4a0, 0x000000c0, 0xffffffff,
1084 0x0000f4a4, 0x00680fff, 0xffffffff,
1085 0x000064ec, 0x00000000, 0xffffffff,
1086 0x00000c7c, 0x00000000, 0xffffffff,
1087 0x00006dfc, 0x00000000, 0xffffffff,
1088 0x000004c8, 0x00000000, 0xffffffff
1089};
1090#define CAICOS_SYSLS_ENABLE_LENGTH sizeof(caicos_sysls_enable) / (3 * sizeof(u32))
1091
1092//********* TURKS **************//
1093static const u32 turks_sysls_default[] =
1094{
1095 0x000055e8, 0x00000000, 0xffffffff,
1096 0x0000d0bc, 0x00000000, 0xffffffff,
1097 0x000015c0, 0x000c1401, 0xffffffff,
1098 0x0000264c, 0x000c0400, 0xffffffff,
1099 0x00002648, 0x000c0400, 0xffffffff,
1100 0x00002650, 0x000c0400, 0xffffffff,
1101 0x000020b8, 0x000c0400, 0xffffffff,
1102 0x000020bc, 0x000c0400, 0xffffffff,
1103 0x000020c0, 0x000c0c80, 0xffffffff,
1104 0x0000f4a0, 0x000000c0, 0xffffffff,
1105 0x0000f4a4, 0x00680fff, 0xffffffff,
1106 0x000004c8, 0x00000001, 0xffffffff,
1107 0x000064ec, 0x00000000, 0xffffffff,
1108 0x00000c7c, 0x00000000, 0xffffffff,
1109 0x00006dfc, 0x00000000, 0xffffffff
1110};
1111#define TURKS_SYSLS_DEFAULT_LENGTH sizeof(turks_sysls_default) / (3 * sizeof(u32))
1112
1113static const u32 turks_sysls_disable[] =
1114{
1115 0x000055e8, 0x00000000, 0xffffffff,
1116 0x0000d0bc, 0x00000000, 0xffffffff,
1117 0x000015c0, 0x00041401, 0xffffffff,
1118 0x0000264c, 0x00040400, 0xffffffff,
1119 0x00002648, 0x00040400, 0xffffffff,
1120 0x00002650, 0x00040400, 0xffffffff,
1121 0x000020b8, 0x00040400, 0xffffffff,
1122 0x000020bc, 0x00040400, 0xffffffff,
1123 0x000020c0, 0x00040c80, 0xffffffff,
1124 0x0000f4a0, 0x000000c0, 0xffffffff,
1125 0x0000f4a4, 0x00680000, 0xffffffff,
1126 0x000004c8, 0x00000001, 0xffffffff,
1127 0x000064ec, 0x00007ffd, 0xffffffff,
1128 0x00000c7c, 0x0000ff00, 0xffffffff,
1129 0x00006dfc, 0x0000007f, 0xffffffff
1130};
1131#define TURKS_SYSLS_DISABLE_LENGTH sizeof(turks_sysls_disable) / (3 * sizeof(u32))
1132
1133static const u32 turks_sysls_enable[] =
1134{
1135 0x000055e8, 0x00000001, 0xffffffff,
1136 0x0000d0bc, 0x00000100, 0xffffffff,
1137 0x000015c0, 0x000c1401, 0xffffffff,
1138 0x0000264c, 0x000c0400, 0xffffffff,
1139 0x00002648, 0x000c0400, 0xffffffff,
1140 0x00002650, 0x000c0400, 0xffffffff,
1141 0x000020b8, 0x000c0400, 0xffffffff,
1142 0x000020bc, 0x000c0400, 0xffffffff,
1143 0x000020c0, 0x000c0c80, 0xffffffff,
1144 0x0000f4a0, 0x000000c0, 0xffffffff,
1145 0x0000f4a4, 0x00680fff, 0xffffffff,
1146 0x000004c8, 0x00000000, 0xffffffff,
1147 0x000064ec, 0x00000000, 0xffffffff,
1148 0x00000c7c, 0x00000000, 0xffffffff,
1149 0x00006dfc, 0x00000000, 0xffffffff
1150};
1151#define TURKS_SYSLS_ENABLE_LENGTH sizeof(turks_sysls_enable) / (3 * sizeof(u32))
1152
1153#endif
1154
1155u32 btc_valid_sclk[40] =
1156{
1157 5000, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000,
1158 55000, 60000, 65000, 70000, 75000, 80000, 85000, 90000, 95000, 100000,
1159 105000, 110000, 11500, 120000, 125000, 130000, 135000, 140000, 145000, 150000,
1160 155000, 160000, 165000, 170000, 175000, 180000, 185000, 190000, 195000, 200000
1161};
1162
1163static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
1164{
1165 { 10000, 30000, RADEON_SCLK_UP },
1166 { 15000, 30000, RADEON_SCLK_UP },
1167 { 20000, 30000, RADEON_SCLK_UP },
1168 { 25000, 30000, RADEON_SCLK_UP }
1169};
1170
1171void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
1172 u32 clock, u16 max_voltage, u16 *voltage)
1173{
1174 u32 i;
1175
1176 if ((table == NULL) || (table->count == 0))
1177 return;
1178
1179 for (i= 0; i < table->count; i++) {
1180 if (clock <= table->entries[i].clk) {
1181 if (*voltage < table->entries[i].v)
1182 *voltage = (u16)((table->entries[i].v < max_voltage) ?
1183 table->entries[i].v : max_voltage);
1184 return;
1185 }
1186 }
1187
1188 *voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
1189}
1190
1191static u32 btc_find_valid_clock(struct radeon_clock_array *clocks,
1192 u32 max_clock, u32 requested_clock)
1193{
1194 unsigned int i;
1195
1196 if ((clocks == NULL) || (clocks->count == 0))
1197 return (requested_clock < max_clock) ? requested_clock : max_clock;
1198
1199 for (i = 0; i < clocks->count; i++) {
1200 if (clocks->values[i] >= requested_clock)
1201 return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
1202 }
1203
1204 return (clocks->values[clocks->count - 1] < max_clock) ?
1205 clocks->values[clocks->count - 1] : max_clock;
1206}
1207
1208static u32 btc_get_valid_mclk(struct radeon_device *rdev,
1209 u32 max_mclk, u32 requested_mclk)
1210{
1211 return btc_find_valid_clock(&rdev->pm.dpm.dyn_state.valid_mclk_values,
1212 max_mclk, requested_mclk);
1213}
1214
1215static u32 btc_get_valid_sclk(struct radeon_device *rdev,
1216 u32 max_sclk, u32 requested_sclk)
1217{
1218 return btc_find_valid_clock(&rdev->pm.dpm.dyn_state.valid_sclk_values,
1219 max_sclk, requested_sclk);
1220}
1221
1222void btc_skip_blacklist_clocks(struct radeon_device *rdev,
1223 const u32 max_sclk, const u32 max_mclk,
1224 u32 *sclk, u32 *mclk)
1225{
1226 int i, num_blacklist_clocks;
1227
1228 if ((sclk == NULL) || (mclk == NULL))
1229 return;
1230
1231 num_blacklist_clocks = ARRAY_SIZE(btc_blacklist_clocks);
1232
1233 for (i = 0; i < num_blacklist_clocks; i++) {
1234 if ((btc_blacklist_clocks[i].sclk == *sclk) &&
1235 (btc_blacklist_clocks[i].mclk == *mclk))
1236 break;
1237 }
1238
1239 if (i < num_blacklist_clocks) {
1240 if (btc_blacklist_clocks[i].action == RADEON_SCLK_UP) {
1241 *sclk = btc_get_valid_sclk(rdev, max_sclk, *sclk + 1);
1242
1243 if (*sclk < max_sclk)
1244 btc_skip_blacklist_clocks(rdev, max_sclk, max_mclk, sclk, mclk);
1245 }
1246 }
1247}
1248
1249void btc_adjust_clock_combinations(struct radeon_device *rdev,
1250 const struct radeon_clock_and_voltage_limits *max_limits,
1251 struct rv7xx_pl *pl)
1252{
1253
1254 if ((pl->mclk == 0) || (pl->sclk == 0))
1255 return;
1256
1257 if (pl->mclk == pl->sclk)
1258 return;
1259
1260 if (pl->mclk > pl->sclk) {
1261 if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > rdev->pm.dpm.dyn_state.mclk_sclk_ratio)
1262 pl->sclk = btc_get_valid_sclk(rdev,
1263 max_limits->sclk,
1264 (pl->mclk +
1265 (rdev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
1266 rdev->pm.dpm.dyn_state.mclk_sclk_ratio);
1267 } else {
1268 if ((pl->sclk - pl->mclk) > rdev->pm.dpm.dyn_state.sclk_mclk_delta)
1269 pl->mclk = btc_get_valid_mclk(rdev,
1270 max_limits->mclk,
1271 pl->sclk -
1272 rdev->pm.dpm.dyn_state.sclk_mclk_delta);
1273 }
1274}
1275
1276static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
1277{
1278 unsigned int i;
1279
1280 for (i = 0; i < table->count; i++) {
1281 if (voltage <= table->entries[i].value)
1282 return table->entries[i].value;
1283 }
1284
1285 return table->entries[table->count - 1].value;
1286}
1287
1288void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
1289 u16 max_vddc, u16 max_vddci,
1290 u16 *vddc, u16 *vddci)
1291{
1292 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1293 u16 new_voltage;
1294
1295 if ((0 == *vddc) || (0 == *vddci))
1296 return;
1297
1298 if (*vddc > *vddci) {
1299 if ((*vddc - *vddci) > rdev->pm.dpm.dyn_state.vddc_vddci_delta) {
1300 new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
1301 (*vddc - rdev->pm.dpm.dyn_state.vddc_vddci_delta));
1302 *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
1303 }
1304 } else {
1305 if ((*vddci - *vddc) > rdev->pm.dpm.dyn_state.vddc_vddci_delta) {
1306 new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
1307 (*vddci - rdev->pm.dpm.dyn_state.vddc_vddci_delta));
1308 *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
1309 }
1310 }
1311}
1312
1313static void btc_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
1314 bool enable)
1315{
1316 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1317 u32 tmp, bif;
1318
1319 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1320 if (enable) {
1321 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
1322 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
1323 if (!pi->boot_in_gen2) {
1324 bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
1325 bif |= CG_CLIENT_REQ(0xd);
1326 WREG32(CG_BIF_REQ_AND_RSP, bif);
1327
1328 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
1329 tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
1330 tmp |= LC_GEN2_EN_STRAP;
1331
1332 tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT;
1333 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
1334 udelay(10);
1335 tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
1336 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
1337 }
1338 }
1339 } else {
1340 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
1341 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
1342 if (!pi->boot_in_gen2) {
1343 bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
1344 bif |= CG_CLIENT_REQ(0xd);
1345 WREG32(CG_BIF_REQ_AND_RSP, bif);
1346
1347 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
1348 tmp &= ~LC_GEN2_EN_STRAP;
1349 }
1350 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
1351 }
1352 }
1353}
1354
1355static void btc_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
1356 bool enable)
1357{
1358 btc_enable_bif_dynamic_pcie_gen2(rdev, enable);
1359
1360 if (enable)
1361 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
1362 else
1363 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
1364}
1365
1366static int btc_disable_ulv(struct radeon_device *rdev)
1367{
1368 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1369
1370 if (eg_pi->ulv.supported) {
1371 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) != PPSMC_Result_OK)
1372 return -EINVAL;
1373 }
1374 return 0;
1375}
1376
1377static int btc_populate_ulv_state(struct radeon_device *rdev,
1378 RV770_SMC_STATETABLE *table)
1379{
1380 int ret = -EINVAL;
1381 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1382 struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl;
1383
1384 if (ulv_pl->vddc) {
1385 ret = cypress_convert_power_level_to_smc(rdev,
1386 ulv_pl,
1387 &table->ULVState.levels[0],
1388 PPSMC_DISPLAY_WATERMARK_LOW);
1389 if (ret == 0) {
1390 table->ULVState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
1391 table->ULVState.levels[0].ACIndex = 1;
1392
1393 table->ULVState.levels[1] = table->ULVState.levels[0];
1394 table->ULVState.levels[2] = table->ULVState.levels[0];
1395
1396 table->ULVState.flags |= PPSMC_SWSTATE_FLAG_DC;
1397
1398 WREG32(CG_ULV_CONTROL, BTC_CGULVCONTROL_DFLT);
1399 WREG32(CG_ULV_PARAMETER, BTC_CGULVPARAMETER_DFLT);
1400 }
1401 }
1402
1403 return ret;
1404}
1405
1406static int btc_populate_smc_acpi_state(struct radeon_device *rdev,
1407 RV770_SMC_STATETABLE *table)
1408{
1409 int ret = cypress_populate_smc_acpi_state(rdev, table);
1410
1411 if (ret == 0) {
1412 table->ACPIState.levels[0].ACIndex = 0;
1413 table->ACPIState.levels[1].ACIndex = 0;
1414 table->ACPIState.levels[2].ACIndex = 0;
1415 }
1416
1417 return ret;
1418}
1419
1420void btc_program_mgcg_hw_sequence(struct radeon_device *rdev,
1421 const u32 *sequence, u32 count)
1422{
1423 u32 i, length = count * 3;
1424 u32 tmp;
1425
1426 for (i = 0; i < length; i+=3) {
1427 tmp = RREG32(sequence[i]);
1428 tmp &= ~sequence[i+2];
1429 tmp |= sequence[i+1] & sequence[i+2];
1430 WREG32(sequence[i], tmp);
1431 }
1432}
1433
1434static void btc_cg_clock_gating_default(struct radeon_device *rdev)
1435{
1436 u32 count;
1437 const u32 *p = NULL;
1438
1439 if (rdev->family == CHIP_BARTS) {
1440 p = (const u32 *)&barts_cgcg_cgls_default;
1441 count = BARTS_CGCG_CGLS_DEFAULT_LENGTH;
1442 } else if (rdev->family == CHIP_TURKS) {
1443 p = (const u32 *)&turks_cgcg_cgls_default;
1444 count = TURKS_CGCG_CGLS_DEFAULT_LENGTH;
1445 } else if (rdev->family == CHIP_CAICOS) {
1446 p = (const u32 *)&caicos_cgcg_cgls_default;
1447 count = CAICOS_CGCG_CGLS_DEFAULT_LENGTH;
1448 } else
1449 return;
1450
1451 btc_program_mgcg_hw_sequence(rdev, p, count);
1452}
1453
1454static void btc_cg_clock_gating_enable(struct radeon_device *rdev,
1455 bool enable)
1456{
1457 u32 count;
1458 const u32 *p = NULL;
1459
1460 if (enable) {
1461 if (rdev->family == CHIP_BARTS) {
1462 p = (const u32 *)&barts_cgcg_cgls_enable;
1463 count = BARTS_CGCG_CGLS_ENABLE_LENGTH;
1464 } else if (rdev->family == CHIP_TURKS) {
1465 p = (const u32 *)&turks_cgcg_cgls_enable;
1466 count = TURKS_CGCG_CGLS_ENABLE_LENGTH;
1467 } else if (rdev->family == CHIP_CAICOS) {
1468 p = (const u32 *)&caicos_cgcg_cgls_enable;
1469 count = CAICOS_CGCG_CGLS_ENABLE_LENGTH;
1470 } else
1471 return;
1472 } else {
1473 if (rdev->family == CHIP_BARTS) {
1474 p = (const u32 *)&barts_cgcg_cgls_disable;
1475 count = BARTS_CGCG_CGLS_DISABLE_LENGTH;
1476 } else if (rdev->family == CHIP_TURKS) {
1477 p = (const u32 *)&turks_cgcg_cgls_disable;
1478 count = TURKS_CGCG_CGLS_DISABLE_LENGTH;
1479 } else if (rdev->family == CHIP_CAICOS) {
1480 p = (const u32 *)&caicos_cgcg_cgls_disable;
1481 count = CAICOS_CGCG_CGLS_DISABLE_LENGTH;
1482 } else
1483 return;
1484 }
1485
1486 btc_program_mgcg_hw_sequence(rdev, p, count);
1487}
1488
1489static void btc_mg_clock_gating_default(struct radeon_device *rdev)
1490{
1491 u32 count;
1492 const u32 *p = NULL;
1493
1494 if (rdev->family == CHIP_BARTS) {
1495 p = (const u32 *)&barts_mgcg_default;
1496 count = BARTS_MGCG_DEFAULT_LENGTH;
1497 } else if (rdev->family == CHIP_TURKS) {
1498 p = (const u32 *)&turks_mgcg_default;
1499 count = TURKS_MGCG_DEFAULT_LENGTH;
1500 } else if (rdev->family == CHIP_CAICOS) {
1501 p = (const u32 *)&caicos_mgcg_default;
1502 count = CAICOS_MGCG_DEFAULT_LENGTH;
1503 } else
1504 return;
1505
1506 btc_program_mgcg_hw_sequence(rdev, p, count);
1507}
1508
1509static void btc_mg_clock_gating_enable(struct radeon_device *rdev,
1510 bool enable)
1511{
1512 u32 count;
1513 const u32 *p = NULL;
1514
1515 if (enable) {
1516 if (rdev->family == CHIP_BARTS) {
1517 p = (const u32 *)&barts_mgcg_enable;
1518 count = BARTS_MGCG_ENABLE_LENGTH;
1519 } else if (rdev->family == CHIP_TURKS) {
1520 p = (const u32 *)&turks_mgcg_enable;
1521 count = TURKS_MGCG_ENABLE_LENGTH;
1522 } else if (rdev->family == CHIP_CAICOS) {
1523 p = (const u32 *)&caicos_mgcg_enable;
1524 count = CAICOS_MGCG_ENABLE_LENGTH;
1525 } else
1526 return;
1527 } else {
1528 if (rdev->family == CHIP_BARTS) {
1529 p = (const u32 *)&barts_mgcg_disable[0];
1530 count = BARTS_MGCG_DISABLE_LENGTH;
1531 } else if (rdev->family == CHIP_TURKS) {
1532 p = (const u32 *)&turks_mgcg_disable[0];
1533 count = TURKS_MGCG_DISABLE_LENGTH;
1534 } else if (rdev->family == CHIP_CAICOS) {
1535 p = (const u32 *)&caicos_mgcg_disable[0];
1536 count = CAICOS_MGCG_DISABLE_LENGTH;
1537 } else
1538 return;
1539 }
1540
1541 btc_program_mgcg_hw_sequence(rdev, p, count);
1542}
1543
1544static void btc_ls_clock_gating_default(struct radeon_device *rdev)
1545{
1546 u32 count;
1547 const u32 *p = NULL;
1548
1549 if (rdev->family == CHIP_BARTS) {
1550 p = (const u32 *)&barts_sysls_default;
1551 count = BARTS_SYSLS_DEFAULT_LENGTH;
1552 } else if (rdev->family == CHIP_TURKS) {
1553 p = (const u32 *)&turks_sysls_default;
1554 count = TURKS_SYSLS_DEFAULT_LENGTH;
1555 } else if (rdev->family == CHIP_CAICOS) {
1556 p = (const u32 *)&caicos_sysls_default;
1557 count = CAICOS_SYSLS_DEFAULT_LENGTH;
1558 } else
1559 return;
1560
1561 btc_program_mgcg_hw_sequence(rdev, p, count);
1562}
1563
1564static void btc_ls_clock_gating_enable(struct radeon_device *rdev,
1565 bool enable)
1566{
1567 u32 count;
1568 const u32 *p = NULL;
1569
1570 if (enable) {
1571 if (rdev->family == CHIP_BARTS) {
1572 p = (const u32 *)&barts_sysls_enable;
1573 count = BARTS_SYSLS_ENABLE_LENGTH;
1574 } else if (rdev->family == CHIP_TURKS) {
1575 p = (const u32 *)&turks_sysls_enable;
1576 count = TURKS_SYSLS_ENABLE_LENGTH;
1577 } else if (rdev->family == CHIP_CAICOS) {
1578 p = (const u32 *)&caicos_sysls_enable;
1579 count = CAICOS_SYSLS_ENABLE_LENGTH;
1580 } else
1581 return;
1582 } else {
1583 if (rdev->family == CHIP_BARTS) {
1584 p = (const u32 *)&barts_sysls_disable;
1585 count = BARTS_SYSLS_DISABLE_LENGTH;
1586 } else if (rdev->family == CHIP_TURKS) {
1587 p = (const u32 *)&turks_sysls_disable;
1588 count = TURKS_SYSLS_DISABLE_LENGTH;
1589 } else if (rdev->family == CHIP_CAICOS) {
1590 p = (const u32 *)&caicos_sysls_disable;
1591 count = CAICOS_SYSLS_DISABLE_LENGTH;
1592 } else
1593 return;
1594 }
1595
1596 btc_program_mgcg_hw_sequence(rdev, p, count);
1597}
1598
1599bool btc_dpm_enabled(struct radeon_device *rdev)
1600{
1601 if (rv770_is_smc_running(rdev))
1602 return true;
1603 else
1604 return false;
1605}
1606
1607static int btc_init_smc_table(struct radeon_device *rdev,
1608 struct radeon_ps *radeon_boot_state)
1609{
1610 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1611 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1612 RV770_SMC_STATETABLE *table = &pi->smc_statetable;
1613 int ret;
1614
1615 memset(table, 0, sizeof(RV770_SMC_STATETABLE));
1616
1617 cypress_populate_smc_voltage_tables(rdev, table);
1618
1619 switch (rdev->pm.int_thermal_type) {
1620 case THERMAL_TYPE_EVERGREEN:
1621 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
1622 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
1623 break;
1624 case THERMAL_TYPE_NONE:
1625 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
1626 break;
1627 default:
1628 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
1629 break;
1630 }
1631
1632 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
1633 table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1634
1635 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1636 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
1637
1638 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1639 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1640
1641 if (pi->mem_gddr5)
1642 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1643
1644 ret = cypress_populate_smc_initial_state(rdev, radeon_boot_state, table);
1645 if (ret)
1646 return ret;
1647
1648 if (eg_pi->sclk_deep_sleep)
1649 WREG32_P(SCLK_PSKIP_CNTL, PSKIP_ON_ALLOW_STOP_HI(32),
1650 ~PSKIP_ON_ALLOW_STOP_HI_MASK);
1651
1652 ret = btc_populate_smc_acpi_state(rdev, table);
1653 if (ret)
1654 return ret;
1655
1656 if (eg_pi->ulv.supported) {
1657 ret = btc_populate_ulv_state(rdev, table);
1658 if (ret)
1659 eg_pi->ulv.supported = false;
1660 }
1661
1662 table->driverState = table->initialState;
1663
1664 return rv770_copy_bytes_to_smc(rdev,
1665 pi->state_table_start,
1666 (u8 *)table,
1667 sizeof(RV770_SMC_STATETABLE),
1668 pi->sram_end);
1669}
1670
1671static void btc_set_at_for_uvd(struct radeon_device *rdev,
1672 struct radeon_ps *radeon_new_state)
1673{
1674 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1675 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1676 int idx = 0;
1677
1678 if (r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2))
1679 idx = 1;
1680
1681 if ((idx == 1) && !eg_pi->smu_uvd_hs) {
1682 pi->rlp = 10;
1683 pi->rmp = 100;
1684 pi->lhp = 100;
1685 pi->lmp = 10;
1686 } else {
1687 pi->rlp = eg_pi->ats[idx].rlp;
1688 pi->rmp = eg_pi->ats[idx].rmp;
1689 pi->lhp = eg_pi->ats[idx].lhp;
1690 pi->lmp = eg_pi->ats[idx].lmp;
1691 }
1692
1693}
1694
1695void btc_notify_uvd_to_smc(struct radeon_device *rdev,
1696 struct radeon_ps *radeon_new_state)
1697{
1698 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1699
1700 if (r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
1701 rv770_write_smc_soft_register(rdev,
1702 RV770_SMC_SOFT_REGISTER_uvd_enabled, 1);
1703 eg_pi->uvd_enabled = true;
1704 } else {
1705 rv770_write_smc_soft_register(rdev,
1706 RV770_SMC_SOFT_REGISTER_uvd_enabled, 0);
1707 eg_pi->uvd_enabled = false;
1708 }
1709}
1710
1711int btc_reset_to_default(struct radeon_device *rdev)
1712{
1713 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) != PPSMC_Result_OK)
1714 return -EINVAL;
1715
1716 return 0;
1717}
1718
1719static void btc_stop_smc(struct radeon_device *rdev)
1720{
1721 int i;
1722
1723 for (i = 0; i < rdev->usec_timeout; i++) {
1724 if (((RREG32(LB_SYNC_RESET_SEL) & LB_SYNC_RESET_SEL_MASK) >> LB_SYNC_RESET_SEL_SHIFT) != 1)
1725 break;
1726 udelay(1);
1727 }
1728 udelay(100);
1729
1730 r7xx_stop_smc(rdev);
1731}
1732
1733void btc_read_arb_registers(struct radeon_device *rdev)
1734{
1735 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1736 struct evergreen_arb_registers *arb_registers =
1737 &eg_pi->bootup_arb_registers;
1738
1739 arb_registers->mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
1740 arb_registers->mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
1741 arb_registers->mc_arb_rfsh_rate = RREG32(MC_ARB_RFSH_RATE);
1742 arb_registers->mc_arb_burst_time = RREG32(MC_ARB_BURST_TIME);
1743}
1744
1745
1746static void btc_set_arb0_registers(struct radeon_device *rdev,
1747 struct evergreen_arb_registers *arb_registers)
1748{
1749 u32 val;
1750
1751 WREG32(MC_ARB_DRAM_TIMING, arb_registers->mc_arb_dram_timing);
1752 WREG32(MC_ARB_DRAM_TIMING2, arb_registers->mc_arb_dram_timing2);
1753
1754 val = (arb_registers->mc_arb_rfsh_rate & POWERMODE0_MASK) >>
1755 POWERMODE0_SHIFT;
1756 WREG32_P(MC_ARB_RFSH_RATE, POWERMODE0(val), ~POWERMODE0_MASK);
1757
1758 val = (arb_registers->mc_arb_burst_time & STATE0_MASK) >>
1759 STATE0_SHIFT;
1760 WREG32_P(MC_ARB_BURST_TIME, STATE0(val), ~STATE0_MASK);
1761}
1762
1763static void btc_set_boot_state_timing(struct radeon_device *rdev)
1764{
1765 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1766
1767 if (eg_pi->ulv.supported)
1768 btc_set_arb0_registers(rdev, &eg_pi->bootup_arb_registers);
1769}
1770
1771static bool btc_is_state_ulv_compatible(struct radeon_device *rdev,
1772 struct radeon_ps *radeon_state)
1773{
1774 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
1775 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1776 struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl;
1777
1778 if (state->low.mclk != ulv_pl->mclk)
1779 return false;
1780
1781 if (state->low.vddci != ulv_pl->vddci)
1782 return false;
1783
1784 /* XXX check minclocks, etc. */
1785
1786 return true;
1787}
1788
1789
1790static int btc_set_ulv_dram_timing(struct radeon_device *rdev)
1791{
1792 u32 val;
1793 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1794 struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl;
1795
1796 radeon_atom_set_engine_dram_timings(rdev,
1797 ulv_pl->sclk,
1798 ulv_pl->mclk);
1799
1800 val = rv770_calculate_memory_refresh_rate(rdev, ulv_pl->sclk);
1801 WREG32_P(MC_ARB_RFSH_RATE, POWERMODE0(val), ~POWERMODE0_MASK);
1802
1803 val = cypress_calculate_burst_time(rdev, ulv_pl->sclk, ulv_pl->mclk);
1804 WREG32_P(MC_ARB_BURST_TIME, STATE0(val), ~STATE0_MASK);
1805
1806 return 0;
1807}
1808
1809static int btc_enable_ulv(struct radeon_device *rdev)
1810{
1811 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) != PPSMC_Result_OK)
1812 return -EINVAL;
1813
1814 return 0;
1815}
1816
1817static int btc_set_power_state_conditionally_enable_ulv(struct radeon_device *rdev,
1818 struct radeon_ps *radeon_new_state)
1819{
1820 int ret = 0;
1821 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1822
1823 if (eg_pi->ulv.supported) {
1824 if (btc_is_state_ulv_compatible(rdev, radeon_new_state)) {
1825 // Set ARB[0] to reflect the DRAM timing needed for ULV.
1826 ret = btc_set_ulv_dram_timing(rdev);
1827 if (ret == 0)
1828 ret = btc_enable_ulv(rdev);
1829 }
1830 }
1831
1832 return ret;
1833}
1834
1835static bool btc_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
1836{
1837 bool result = true;
1838
1839 switch (in_reg) {
1840 case MC_SEQ_RAS_TIMING >> 2:
1841 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
1842 break;
1843 case MC_SEQ_CAS_TIMING >> 2:
1844 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
1845 break;
1846 case MC_SEQ_MISC_TIMING >> 2:
1847 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
1848 break;
1849 case MC_SEQ_MISC_TIMING2 >> 2:
1850 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
1851 break;
1852 case MC_SEQ_RD_CTL_D0 >> 2:
1853 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
1854 break;
1855 case MC_SEQ_RD_CTL_D1 >> 2:
1856 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
1857 break;
1858 case MC_SEQ_WR_CTL_D0 >> 2:
1859 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
1860 break;
1861 case MC_SEQ_WR_CTL_D1 >> 2:
1862 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
1863 break;
1864 case MC_PMG_CMD_EMRS >> 2:
1865 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
1866 break;
1867 case MC_PMG_CMD_MRS >> 2:
1868 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
1869 break;
1870 case MC_PMG_CMD_MRS1 >> 2:
1871 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
1872 break;
1873 default:
1874 result = false;
1875 break;
1876 }
1877
1878 return result;
1879}
1880
1881static void btc_set_valid_flag(struct evergreen_mc_reg_table *table)
1882{
1883 u8 i, j;
1884
1885 for (i = 0; i < table->last; i++) {
1886 for (j = 1; j < table->num_entries; j++) {
1887 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
1888 table->mc_reg_table_entry[j].mc_data[i]) {
1889 table->valid_flag |= (1 << i);
1890 break;
1891 }
1892 }
1893 }
1894}
1895
1896static int btc_set_mc_special_registers(struct radeon_device *rdev,
1897 struct evergreen_mc_reg_table *table)
1898{
1899 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1900 u8 i, j, k;
1901 u32 tmp;
1902
1903 for (i = 0, j = table->last; i < table->last; i++) {
1904 switch (table->mc_reg_address[i].s1) {
1905 case MC_SEQ_MISC1 >> 2:
1906 tmp = RREG32(MC_PMG_CMD_EMRS);
1907 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
1908 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
1909 for (k = 0; k < table->num_entries; k++) {
1910 table->mc_reg_table_entry[k].mc_data[j] =
1911 ((tmp & 0xffff0000)) |
1912 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
1913 }
1914 j++;
1915
1916 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1917 return -EINVAL;
1918
1919 tmp = RREG32(MC_PMG_CMD_MRS);
1920 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
1921 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
1922 for (k = 0; k < table->num_entries; k++) {
1923 table->mc_reg_table_entry[k].mc_data[j] =
1924 (tmp & 0xffff0000) |
1925 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
1926 if (!pi->mem_gddr5)
1927 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
1928 }
1929 j++;
1930
1931 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1932 return -EINVAL;
1933 break;
1934 case MC_SEQ_RESERVE_M >> 2:
1935 tmp = RREG32(MC_PMG_CMD_MRS1);
1936 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
1937 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
1938 for (k = 0; k < table->num_entries; k++) {
1939 table->mc_reg_table_entry[k].mc_data[j] =
1940 (tmp & 0xffff0000) |
1941 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
1942 }
1943 j++;
1944
1945 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1946 return -EINVAL;
1947 break;
1948 default:
1949 break;
1950 }
1951 }
1952
1953 table->last = j;
1954
1955 return 0;
1956}
1957
1958static void btc_set_s0_mc_reg_index(struct evergreen_mc_reg_table *table)
1959{
1960 u32 i;
1961 u16 address;
1962
1963 for (i = 0; i < table->last; i++) {
1964 table->mc_reg_address[i].s0 =
1965 btc_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
1966 address : table->mc_reg_address[i].s1;
1967 }
1968}
1969
1970static int btc_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
1971 struct evergreen_mc_reg_table *eg_table)
1972{
1973 u8 i, j;
1974
1975 if (table->last > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1976 return -EINVAL;
1977
1978 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
1979 return -EINVAL;
1980
1981 for (i = 0; i < table->last; i++)
1982 eg_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
1983 eg_table->last = table->last;
1984
1985 for (i = 0; i < table->num_entries; i++) {
1986 eg_table->mc_reg_table_entry[i].mclk_max =
1987 table->mc_reg_table_entry[i].mclk_max;
1988 for(j = 0; j < table->last; j++)
1989 eg_table->mc_reg_table_entry[i].mc_data[j] =
1990 table->mc_reg_table_entry[i].mc_data[j];
1991 }
1992 eg_table->num_entries = table->num_entries;
1993
1994 return 0;
1995}
1996
1997static int btc_initialize_mc_reg_table(struct radeon_device *rdev)
1998{
1999 int ret;
2000 struct atom_mc_reg_table *table;
2001 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2002 struct evergreen_mc_reg_table *eg_table = &eg_pi->mc_reg_table;
2003 u8 module_index = rv770_get_memory_module_index(rdev);
2004
2005 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
2006 if (!table)
2007 return -ENOMEM;
2008
2009 /* Program additional LP registers that are no longer programmed by VBIOS */
2010 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
2011 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
2012 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
2013 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
2014 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
2015 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
2016 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
2017 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
2018 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
2019 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
2020 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
2021
2022 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
2023
2024 if (ret)
2025 goto init_mc_done;
2026
2027 ret = btc_copy_vbios_mc_reg_table(table, eg_table);
2028
2029 if (ret)
2030 goto init_mc_done;
2031
2032 btc_set_s0_mc_reg_index(eg_table);
2033 ret = btc_set_mc_special_registers(rdev, eg_table);
2034
2035 if (ret)
2036 goto init_mc_done;
2037
2038 btc_set_valid_flag(eg_table);
2039
2040init_mc_done:
2041 kfree(table);
2042
2043 return ret;
2044}
2045
2046static void btc_init_stutter_mode(struct radeon_device *rdev)
2047{
2048 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2049 u32 tmp;
2050
2051 if (pi->mclk_stutter_mode_threshold) {
2052 if (pi->mem_gddr5) {
2053 tmp = RREG32(MC_PMG_AUTO_CFG);
2054 if ((0x200 & tmp) == 0) {
2055 tmp = (tmp & 0xfffffc0b) | 0x204;
2056 WREG32(MC_PMG_AUTO_CFG, tmp);
2057 }
2058 }
2059 }
2060}
2061
2062static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
2063 struct radeon_ps *rps)
2064{
2065 struct rv7xx_ps *ps = rv770_get_ps(rps);
2066 struct radeon_clock_and_voltage_limits *max_limits;
2067 bool disable_mclk_switching;
2068 u32 mclk, sclk;
2069 u16 vddc, vddci;
2070
2071 if (rdev->pm.dpm.new_active_crtc_count > 1)
2072 disable_mclk_switching = true;
2073 else
2074 disable_mclk_switching = false;
2075
2076 if (rdev->pm.dpm.ac_power)
2077 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2078 else
2079 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
2080
2081 if (rdev->pm.dpm.ac_power == false) {
2082 if (ps->high.mclk > max_limits->mclk)
2083 ps->high.mclk = max_limits->mclk;
2084 if (ps->high.sclk > max_limits->sclk)
2085 ps->high.sclk = max_limits->sclk;
2086 if (ps->high.vddc > max_limits->vddc)
2087 ps->high.vddc = max_limits->vddc;
2088 if (ps->high.vddci > max_limits->vddci)
2089 ps->high.vddci = max_limits->vddci;
2090
2091 if (ps->medium.mclk > max_limits->mclk)
2092 ps->medium.mclk = max_limits->mclk;
2093 if (ps->medium.sclk > max_limits->sclk)
2094 ps->medium.sclk = max_limits->sclk;
2095 if (ps->medium.vddc > max_limits->vddc)
2096 ps->medium.vddc = max_limits->vddc;
2097 if (ps->medium.vddci > max_limits->vddci)
2098 ps->medium.vddci = max_limits->vddci;
2099
2100 if (ps->low.mclk > max_limits->mclk)
2101 ps->low.mclk = max_limits->mclk;
2102 if (ps->low.sclk > max_limits->sclk)
2103 ps->low.sclk = max_limits->sclk;
2104 if (ps->low.vddc > max_limits->vddc)
2105 ps->low.vddc = max_limits->vddc;
2106 if (ps->low.vddci > max_limits->vddci)
2107 ps->low.vddci = max_limits->vddci;
2108 }
2109
2110 /* XXX validate the min clocks required for display */
2111
2112 if (disable_mclk_switching) {
2113 sclk = ps->low.sclk;
2114 mclk = ps->high.mclk;
2115 vddc = ps->low.vddc;
2116 vddci = ps->high.vddci;
2117 } else {
2118 sclk = ps->low.sclk;
2119 mclk = ps->low.mclk;
2120 vddc = ps->low.vddc;
2121 vddci = ps->low.vddci;
2122 }
2123
2124 /* adjusted low state */
2125 ps->low.sclk = sclk;
2126 ps->low.mclk = mclk;
2127 ps->low.vddc = vddc;
2128 ps->low.vddci = vddci;
2129
2130 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
2131 &ps->low.sclk, &ps->low.mclk);
2132
2133 /* adjusted medium, high states */
2134 if (ps->medium.sclk < ps->low.sclk)
2135 ps->medium.sclk = ps->low.sclk;
2136 if (ps->medium.vddc < ps->low.vddc)
2137 ps->medium.vddc = ps->low.vddc;
2138 if (ps->high.sclk < ps->medium.sclk)
2139 ps->high.sclk = ps->medium.sclk;
2140 if (ps->high.vddc < ps->medium.vddc)
2141 ps->high.vddc = ps->medium.vddc;
2142
2143 if (disable_mclk_switching) {
2144 mclk = ps->low.mclk;
2145 if (mclk < ps->medium.mclk)
2146 mclk = ps->medium.mclk;
2147 if (mclk < ps->high.mclk)
2148 mclk = ps->high.mclk;
2149 ps->low.mclk = mclk;
2150 ps->low.vddci = vddci;
2151 ps->medium.mclk = mclk;
2152 ps->medium.vddci = vddci;
2153 ps->high.mclk = mclk;
2154 ps->high.vddci = vddci;
2155 } else {
2156 if (ps->medium.mclk < ps->low.mclk)
2157 ps->medium.mclk = ps->low.mclk;
2158 if (ps->medium.vddci < ps->low.vddci)
2159 ps->medium.vddci = ps->low.vddci;
2160 if (ps->high.mclk < ps->medium.mclk)
2161 ps->high.mclk = ps->medium.mclk;
2162 if (ps->high.vddci < ps->medium.vddci)
2163 ps->high.vddci = ps->medium.vddci;
2164 }
2165
2166 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
2167 &ps->medium.sclk, &ps->medium.mclk);
2168 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
2169 &ps->high.sclk, &ps->high.mclk);
2170
2171 btc_adjust_clock_combinations(rdev, max_limits, &ps->low);
2172 btc_adjust_clock_combinations(rdev, max_limits, &ps->medium);
2173 btc_adjust_clock_combinations(rdev, max_limits, &ps->high);
2174
2175 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2176 ps->low.sclk, max_limits->vddc, &ps->low.vddc);
2177 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2178 ps->low.mclk, max_limits->vddci, &ps->low.vddci);
2179 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2180 ps->low.mclk, max_limits->vddc, &ps->low.vddc);
2181 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
2182 rdev->clock.current_dispclk, max_limits->vddc, &ps->low.vddc);
2183
2184 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2185 ps->medium.sclk, max_limits->vddc, &ps->medium.vddc);
2186 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2187 ps->medium.mclk, max_limits->vddci, &ps->medium.vddci);
2188 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2189 ps->medium.mclk, max_limits->vddc, &ps->medium.vddc);
2190 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
2191 rdev->clock.current_dispclk, max_limits->vddc, &ps->medium.vddc);
2192
2193 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2194 ps->high.sclk, max_limits->vddc, &ps->high.vddc);
2195 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2196 ps->high.mclk, max_limits->vddci, &ps->high.vddci);
2197 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2198 ps->high.mclk, max_limits->vddc, &ps->high.vddc);
2199 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
2200 rdev->clock.current_dispclk, max_limits->vddc, &ps->high.vddc);
2201
2202 btc_apply_voltage_delta_rules(rdev, max_limits->vddc, max_limits->vddci,
2203 &ps->low.vddc, &ps->low.vddci);
2204 btc_apply_voltage_delta_rules(rdev, max_limits->vddc, max_limits->vddci,
2205 &ps->medium.vddc, &ps->medium.vddci);
2206 btc_apply_voltage_delta_rules(rdev, max_limits->vddc, max_limits->vddci,
2207 &ps->high.vddc, &ps->high.vddci);
2208
2209 if ((ps->high.vddc <= rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) &&
2210 (ps->medium.vddc <= rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) &&
2211 (ps->low.vddc <= rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc))
2212 ps->dc_compatible = true;
2213 else
2214 ps->dc_compatible = false;
2215
2216 if (ps->low.vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
2217 ps->low.flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
2218 if (ps->medium.vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
2219 ps->medium.flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
2220 if (ps->high.vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
2221 ps->high.flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
2222}
2223
2224static void btc_update_current_ps(struct radeon_device *rdev,
2225 struct radeon_ps *rps)
2226{
2227 struct rv7xx_ps *new_ps = rv770_get_ps(rps);
2228 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2229
2230 eg_pi->current_rps = *rps;
2231 eg_pi->current_ps = *new_ps;
2232 eg_pi->current_rps.ps_priv = &eg_pi->current_ps;
2233}
2234
2235static void btc_update_requested_ps(struct radeon_device *rdev,
2236 struct radeon_ps *rps)
2237{
2238 struct rv7xx_ps *new_ps = rv770_get_ps(rps);
2239 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2240
2241 eg_pi->requested_rps = *rps;
2242 eg_pi->requested_ps = *new_ps;
2243 eg_pi->requested_rps.ps_priv = &eg_pi->requested_ps;
2244}
2245
2246void btc_dpm_reset_asic(struct radeon_device *rdev)
2247{
2248 rv770_restrict_performance_levels_before_switch(rdev);
2249 btc_disable_ulv(rdev);
2250 btc_set_boot_state_timing(rdev);
2251 rv770_set_boot_state(rdev);
2252}
2253
2254int btc_dpm_pre_set_power_state(struct radeon_device *rdev)
2255{
2256 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2257 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
2258 struct radeon_ps *new_ps = &requested_ps;
2259
2260 btc_update_requested_ps(rdev, new_ps);
2261
2262 btc_apply_state_adjust_rules(rdev, &eg_pi->requested_rps);
2263
2264 return 0;
2265}
2266
2267int btc_dpm_set_power_state(struct radeon_device *rdev)
2268{
2269 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2270 struct radeon_ps *new_ps = &eg_pi->requested_rps;
2271 struct radeon_ps *old_ps = &eg_pi->current_rps;
2272 int ret;
2273
2274 ret = btc_disable_ulv(rdev);
2275 btc_set_boot_state_timing(rdev);
2276 ret = rv770_restrict_performance_levels_before_switch(rdev);
2277 if (ret) {
2278 DRM_ERROR("rv770_restrict_performance_levels_before_switch failed\n");
2279 return ret;
2280 }
2281 if (eg_pi->pcie_performance_request)
2282 cypress_notify_link_speed_change_before_state_change(rdev, new_ps, old_ps);
2283
2284 rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
2285 ret = rv770_halt_smc(rdev);
2286 if (ret) {
2287 DRM_ERROR("rv770_halt_smc failed\n");
2288 return ret;
2289 }
2290 btc_set_at_for_uvd(rdev, new_ps);
2291 if (eg_pi->smu_uvd_hs)
2292 btc_notify_uvd_to_smc(rdev, new_ps);
2293 ret = cypress_upload_sw_state(rdev, new_ps);
2294 if (ret) {
2295 DRM_ERROR("cypress_upload_sw_state failed\n");
2296 return ret;
2297 }
2298 if (eg_pi->dynamic_ac_timing) {
2299 ret = cypress_upload_mc_reg_table(rdev, new_ps);
2300 if (ret) {
2301 DRM_ERROR("cypress_upload_mc_reg_table failed\n");
2302 return ret;
2303 }
2304 }
2305
2306 cypress_program_memory_timing_parameters(rdev, new_ps);
2307
2308 ret = rv770_resume_smc(rdev);
2309 if (ret) {
2310 DRM_ERROR("rv770_resume_smc failed\n");
2311 return ret;
2312 }
2313 ret = rv770_set_sw_state(rdev);
2314 if (ret) {
2315 DRM_ERROR("rv770_set_sw_state failed\n");
2316 return ret;
2317 }
2318 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
2319
2320 if (eg_pi->pcie_performance_request)
2321 cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
2322
2323 ret = btc_set_power_state_conditionally_enable_ulv(rdev, new_ps);
2324 if (ret) {
2325 DRM_ERROR("btc_set_power_state_conditionally_enable_ulv failed\n");
2326 return ret;
2327 }
2328
2329 ret = rv770_unrestrict_performance_levels_after_switch(rdev);
2330 if (ret) {
2331 DRM_ERROR("rv770_unrestrict_performance_levels_after_switch failed\n");
2332 return ret;
2333 }
2334
2335 return 0;
2336}
2337
2338void btc_dpm_post_set_power_state(struct radeon_device *rdev)
2339{
2340 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2341 struct radeon_ps *new_ps = &eg_pi->requested_rps;
2342
2343 btc_update_current_ps(rdev, new_ps);
2344}
2345
2346int btc_dpm_enable(struct radeon_device *rdev)
2347{
2348 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2349 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2350 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
2351 int ret;
2352
2353 if (pi->gfx_clock_gating)
2354 btc_cg_clock_gating_default(rdev);
2355
2356 if (btc_dpm_enabled(rdev))
2357 return -EINVAL;
2358
2359 if (pi->mg_clock_gating)
2360 btc_mg_clock_gating_default(rdev);
2361
2362 if (eg_pi->ls_clock_gating)
2363 btc_ls_clock_gating_default(rdev);
2364
2365 if (pi->voltage_control) {
2366 rv770_enable_voltage_control(rdev, true);
2367 ret = cypress_construct_voltage_tables(rdev);
2368 if (ret) {
2369 DRM_ERROR("cypress_construct_voltage_tables failed\n");
2370 return ret;
2371 }
2372 }
2373
2374 if (pi->mvdd_control) {
2375 ret = cypress_get_mvdd_configuration(rdev);
2376 if (ret) {
2377 DRM_ERROR("cypress_get_mvdd_configuration failed\n");
2378 return ret;
2379 }
2380 }
2381
2382 if (eg_pi->dynamic_ac_timing) {
2383 ret = btc_initialize_mc_reg_table(rdev);
2384 if (ret)
2385 eg_pi->dynamic_ac_timing = false;
2386 }
2387
2388 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
2389 rv770_enable_backbias(rdev, true);
2390
2391 if (pi->dynamic_ss)
2392 cypress_enable_spread_spectrum(rdev, true);
2393
2394 if (pi->thermal_protection)
2395 rv770_enable_thermal_protection(rdev, true);
2396
2397 rv770_setup_bsp(rdev);
2398 rv770_program_git(rdev);
2399 rv770_program_tp(rdev);
2400 rv770_program_tpp(rdev);
2401 rv770_program_sstp(rdev);
2402 rv770_program_engine_speed_parameters(rdev);
2403 cypress_enable_display_gap(rdev);
2404 rv770_program_vc(rdev);
2405
2406 if (pi->dynamic_pcie_gen2)
2407 btc_enable_dynamic_pcie_gen2(rdev, true);
2408
2409 ret = rv770_upload_firmware(rdev);
2410 if (ret) {
2411 DRM_ERROR("rv770_upload_firmware failed\n");
2412 return ret;
2413 }
2414 ret = cypress_get_table_locations(rdev);
2415 if (ret) {
2416 DRM_ERROR("cypress_get_table_locations failed\n");
2417 return ret;
2418 }
2419 ret = btc_init_smc_table(rdev, boot_ps);
2420 if (ret)
2421 return ret;
2422
2423 if (eg_pi->dynamic_ac_timing) {
2424 ret = cypress_populate_mc_reg_table(rdev, boot_ps);
2425 if (ret) {
2426 DRM_ERROR("cypress_populate_mc_reg_table failed\n");
2427 return ret;
2428 }
2429 }
2430
2431 cypress_program_response_times(rdev);
2432 r7xx_start_smc(rdev);
2433 ret = cypress_notify_smc_display_change(rdev, false);
2434 if (ret) {
2435 DRM_ERROR("cypress_notify_smc_display_change failed\n");
2436 return ret;
2437 }
2438 cypress_enable_sclk_control(rdev, true);
2439
2440 if (eg_pi->memory_transition)
2441 cypress_enable_mclk_control(rdev, true);
2442
2443 cypress_start_dpm(rdev);
2444
2445 if (pi->gfx_clock_gating)
2446 btc_cg_clock_gating_enable(rdev, true);
2447
2448 if (pi->mg_clock_gating)
2449 btc_mg_clock_gating_enable(rdev, true);
2450
2451 if (eg_pi->ls_clock_gating)
2452 btc_ls_clock_gating_enable(rdev, true);
2453
2454 if (rdev->irq.installed &&
2455 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
2456 PPSMC_Result result;
2457
2458 ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
2459 if (ret)
2460 return ret;
2461 rdev->irq.dpm_thermal = true;
2462 radeon_irq_set(rdev);
2463 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
2464
2465 if (result != PPSMC_Result_OK)
2466 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
2467 }
2468
2469 rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
2470
2471 btc_init_stutter_mode(rdev);
2472
2473 btc_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
2474
2475 return 0;
2476};
2477
2478void btc_dpm_disable(struct radeon_device *rdev)
2479{
2480 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2481 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2482
2483 if (!btc_dpm_enabled(rdev))
2484 return;
2485
2486 rv770_clear_vc(rdev);
2487
2488 if (pi->thermal_protection)
2489 rv770_enable_thermal_protection(rdev, false);
2490
2491 if (pi->dynamic_pcie_gen2)
2492 btc_enable_dynamic_pcie_gen2(rdev, false);
2493
2494 if (rdev->irq.installed &&
2495 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
2496 rdev->irq.dpm_thermal = false;
2497 radeon_irq_set(rdev);
2498 }
2499
2500 if (pi->gfx_clock_gating)
2501 btc_cg_clock_gating_enable(rdev, false);
2502
2503 if (pi->mg_clock_gating)
2504 btc_mg_clock_gating_enable(rdev, false);
2505
2506 if (eg_pi->ls_clock_gating)
2507 btc_ls_clock_gating_enable(rdev, false);
2508
2509 rv770_stop_dpm(rdev);
2510 btc_reset_to_default(rdev);
2511 btc_stop_smc(rdev);
2512 cypress_enable_spread_spectrum(rdev, false);
2513
2514 btc_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
2515}
2516
2517void btc_dpm_setup_asic(struct radeon_device *rdev)
2518{
2519 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2520
2521 rv770_get_memory_type(rdev);
2522 rv740_read_clock_registers(rdev);
2523 btc_read_arb_registers(rdev);
2524 rv770_read_voltage_smio_registers(rdev);
2525
2526 if (eg_pi->pcie_performance_request)
2527 cypress_advertise_gen2_capability(rdev);
2528
2529 rv770_get_pcie_gen2_status(rdev);
2530 rv770_enable_acpi_pm(rdev);
2531}
2532
2533int btc_dpm_init(struct radeon_device *rdev)
2534{
2535 struct rv7xx_power_info *pi;
2536 struct evergreen_power_info *eg_pi;
2537 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
2538 u16 data_offset, size;
2539 u8 frev, crev;
2540 struct atom_clock_dividers dividers;
2541 int ret;
2542
2543 eg_pi = kzalloc(sizeof(struct evergreen_power_info), GFP_KERNEL);
2544 if (eg_pi == NULL)
2545 return -ENOMEM;
2546 rdev->pm.dpm.priv = eg_pi;
2547 pi = &eg_pi->rv7xx;
2548
2549 rv770_get_max_vddc(rdev);
2550
2551 eg_pi->ulv.supported = false;
2552 pi->acpi_vddc = 0;
2553 eg_pi->acpi_vddci = 0;
2554 pi->min_vddc_in_table = 0;
2555 pi->max_vddc_in_table = 0;
2556
2557 ret = rv7xx_parse_power_table(rdev);
2558 if (ret)
2559 return ret;
2560 ret = r600_parse_extended_power_table(rdev);
2561 if (ret)
2562 return ret;
2563
2564 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
2565 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
2566 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
2567 r600_free_extended_power_table(rdev);
2568 return -ENOMEM;
2569 }
2570 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
2571 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
2572 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
2573 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
2574 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 800;
2575 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
2576 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 800;
2577 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
2578 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 800;
2579
2580 if (rdev->pm.dpm.voltage_response_time == 0)
2581 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
2582 if (rdev->pm.dpm.backbias_response_time == 0)
2583 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
2584
2585 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
2586 0, false, &dividers);
2587 if (ret)
2588 pi->ref_div = dividers.ref_div + 1;
2589 else
2590 pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
2591
2592 pi->mclk_strobe_mode_threshold = 40000;
2593 pi->mclk_edc_enable_threshold = 40000;
2594 eg_pi->mclk_edc_wr_enable_threshold = 40000;
2595
2596 pi->rlp = RV770_RLP_DFLT;
2597 pi->rmp = RV770_RMP_DFLT;
2598 pi->lhp = RV770_LHP_DFLT;
2599 pi->lmp = RV770_LMP_DFLT;
2600
2601 eg_pi->ats[0].rlp = RV770_RLP_DFLT;
2602 eg_pi->ats[0].rmp = RV770_RMP_DFLT;
2603 eg_pi->ats[0].lhp = RV770_LHP_DFLT;
2604 eg_pi->ats[0].lmp = RV770_LMP_DFLT;
2605
2606 eg_pi->ats[1].rlp = BTC_RLP_UVD_DFLT;
2607 eg_pi->ats[1].rmp = BTC_RMP_UVD_DFLT;
2608 eg_pi->ats[1].lhp = BTC_LHP_UVD_DFLT;
2609 eg_pi->ats[1].lmp = BTC_LMP_UVD_DFLT;
2610
2611 eg_pi->smu_uvd_hs = true;
2612
2613 pi->voltage_control =
2614 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
2615
2616 pi->mvdd_control =
2617 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
2618
2619 eg_pi->vddci_control =
2620 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
2621
2622 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
2623 &frev, &crev, &data_offset)) {
2624 pi->sclk_ss = true;
2625 pi->mclk_ss = true;
2626 pi->dynamic_ss = true;
2627 } else {
2628 pi->sclk_ss = false;
2629 pi->mclk_ss = false;
2630 pi->dynamic_ss = true;
2631 }
2632
2633 pi->asi = RV770_ASI_DFLT;
2634 pi->pasi = CYPRESS_HASI_DFLT;
2635 pi->vrc = CYPRESS_VRC_DFLT;
2636
2637 pi->power_gating = false;
2638
2639 pi->gfx_clock_gating = true;
2640
2641 pi->mg_clock_gating = true;
2642 pi->mgcgtssm = true;
2643 eg_pi->ls_clock_gating = false;
2644 eg_pi->sclk_deep_sleep = false;
2645
2646 pi->dynamic_pcie_gen2 = true;
2647
2648 if (pi->gfx_clock_gating &&
2649 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2650 pi->thermal_protection = true;
2651 else
2652 pi->thermal_protection = false;
2653
2654 pi->display_gap = true;
2655
2656 if (rdev->flags & RADEON_IS_MOBILITY)
2657 pi->dcodt = true;
2658 else
2659 pi->dcodt = false;
2660
2661 pi->ulps = true;
2662
2663 eg_pi->dynamic_ac_timing = true;
2664 eg_pi->abm = true;
2665 eg_pi->mcls = true;
2666 eg_pi->light_sleep = true;
2667 eg_pi->memory_transition = true;
2668#if defined(CONFIG_ACPI)
2669 eg_pi->pcie_performance_request =
2670 radeon_acpi_is_pcie_performance_request_supported(rdev);
2671#else
2672 eg_pi->pcie_performance_request = false;
2673#endif
2674
2675 if (rdev->family == CHIP_BARTS)
2676 eg_pi->dll_default_on = true;
2677 else
2678 eg_pi->dll_default_on = false;
2679
2680 eg_pi->sclk_deep_sleep = false;
2681 if (ASIC_IS_LOMBOK(rdev))
2682 pi->mclk_stutter_mode_threshold = 30000;
2683 else
2684 pi->mclk_stutter_mode_threshold = 0;
2685
2686 pi->sram_end = SMC_RAM_END;
2687
2688 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
2689 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
2690 rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2 = 900;
2691 rdev->pm.dpm.dyn_state.valid_sclk_values.count = ARRAY_SIZE(btc_valid_sclk);
2692 rdev->pm.dpm.dyn_state.valid_sclk_values.values = btc_valid_sclk;
2693 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
2694 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
2695
2696 if (rdev->family == CHIP_TURKS)
2697 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
2698 else
2699 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 10000;
2700
2701 return 0;
2702}
2703
2704void btc_dpm_fini(struct radeon_device *rdev)
2705{
2706 int i;
2707
2708 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2709 kfree(rdev->pm.dpm.ps[i].ps_priv);
2710 }
2711 kfree(rdev->pm.dpm.ps);
2712 kfree(rdev->pm.dpm.priv);
2713 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
2714 r600_free_extended_power_table(rdev);
2715}
2716
2717u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low)
2718{
2719 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2720 struct rv7xx_ps *requested_state = rv770_get_ps(&eg_pi->requested_rps);
2721
2722 if (low)
2723 return requested_state->low.sclk;
2724 else
2725 return requested_state->high.sclk;
2726}
2727
2728u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low)
2729{
2730 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2731 struct rv7xx_ps *requested_state = rv770_get_ps(&eg_pi->requested_rps);
2732
2733 if (low)
2734 return requested_state->low.mclk;
2735 else
2736 return requested_state->high.mclk;
2737}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h
new file mode 100644
index 000000000000..1a15e0e41950
--- /dev/null
+++ b/drivers/gpu/drm/radeon/btc_dpm.h
@@ -0,0 +1,57 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __BTC_DPM_H__
24#define __BTC_DPM_H__
25
26#define BTC_RLP_UVD_DFLT 20
27#define BTC_RMP_UVD_DFLT 50
28#define BTC_LHP_UVD_DFLT 50
29#define BTC_LMP_UVD_DFLT 20
30#define BARTS_MGCGCGTSSMCTRL_DFLT 0x81944000
31#define TURKS_MGCGCGTSSMCTRL_DFLT 0x6e944000
32#define CAICOS_MGCGCGTSSMCTRL_DFLT 0x46944040
33#define BTC_CGULVPARAMETER_DFLT 0x00040035
34#define BTC_CGULVCONTROL_DFLT 0x00001450
35
36extern u32 btc_valid_sclk[40];
37
38void btc_read_arb_registers(struct radeon_device *rdev);
39void btc_program_mgcg_hw_sequence(struct radeon_device *rdev,
40 const u32 *sequence, u32 count);
41void btc_skip_blacklist_clocks(struct radeon_device *rdev,
42 const u32 max_sclk, const u32 max_mclk,
43 u32 *sclk, u32 *mclk);
44void btc_adjust_clock_combinations(struct radeon_device *rdev,
45 const struct radeon_clock_and_voltage_limits *max_limits,
46 struct rv7xx_pl *pl);
47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
48 u32 clock, u16 max_voltage, u16 *voltage);
49void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
50 u16 max_vddc, u16 max_vddci,
51 u16 *vddc, u16 *vddci);
52bool btc_dpm_enabled(struct radeon_device *rdev);
53int btc_reset_to_default(struct radeon_device *rdev);
54void btc_notify_uvd_to_smc(struct radeon_device *rdev,
55 struct radeon_ps *radeon_new_state);
56
57#endif
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h
new file mode 100644
index 000000000000..29e32de7e025
--- /dev/null
+++ b/drivers/gpu/drm/radeon/btcd.h
@@ -0,0 +1,181 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef _BTCD_H_
25#define _BTCD_H_
26
27/* pm registers */
28
29#define GENERAL_PWRMGT 0x63c
30# define GLOBAL_PWRMGT_EN (1 << 0)
31# define STATIC_PM_EN (1 << 1)
32# define THERMAL_PROTECTION_DIS (1 << 2)
33# define THERMAL_PROTECTION_TYPE (1 << 3)
34# define ENABLE_GEN2PCIE (1 << 4)
35# define ENABLE_GEN2XSP (1 << 5)
36# define SW_SMIO_INDEX(x) ((x) << 6)
37# define SW_SMIO_INDEX_MASK (3 << 6)
38# define SW_SMIO_INDEX_SHIFT 6
39# define LOW_VOLT_D2_ACPI (1 << 8)
40# define LOW_VOLT_D3_ACPI (1 << 9)
41# define VOLT_PWRMGT_EN (1 << 10)
42# define BACKBIAS_PAD_EN (1 << 18)
43# define BACKBIAS_VALUE (1 << 19)
44# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
45# define AC_DC_SW (1 << 24)
46
47#define CG_BIF_REQ_AND_RSP 0x7f4
48#define CG_CLIENT_REQ(x) ((x) << 0)
49#define CG_CLIENT_REQ_MASK (0xff << 0)
50#define CG_CLIENT_REQ_SHIFT 0
51#define CG_CLIENT_RESP(x) ((x) << 8)
52#define CG_CLIENT_RESP_MASK (0xff << 8)
53#define CG_CLIENT_RESP_SHIFT 8
54#define CLIENT_CG_REQ(x) ((x) << 16)
55#define CLIENT_CG_REQ_MASK (0xff << 16)
56#define CLIENT_CG_REQ_SHIFT 16
57#define CLIENT_CG_RESP(x) ((x) << 24)
58#define CLIENT_CG_RESP_MASK (0xff << 24)
59#define CLIENT_CG_RESP_SHIFT 24
60
61#define SCLK_PSKIP_CNTL 0x8c0
62#define PSKIP_ON_ALLOW_STOP_HI(x) ((x) << 16)
63#define PSKIP_ON_ALLOW_STOP_HI_MASK (0xff << 16)
64#define PSKIP_ON_ALLOW_STOP_HI_SHIFT 16
65
66#define CG_ULV_CONTROL 0x8c8
67#define CG_ULV_PARAMETER 0x8cc
68
69#define MC_ARB_DRAM_TIMING 0x2774
70#define MC_ARB_DRAM_TIMING2 0x2778
71
72#define MC_ARB_RFSH_RATE 0x27b0
73#define POWERMODE0(x) ((x) << 0)
74#define POWERMODE0_MASK (0xff << 0)
75#define POWERMODE0_SHIFT 0
76#define POWERMODE1(x) ((x) << 8)
77#define POWERMODE1_MASK (0xff << 8)
78#define POWERMODE1_SHIFT 8
79#define POWERMODE2(x) ((x) << 16)
80#define POWERMODE2_MASK (0xff << 16)
81#define POWERMODE2_SHIFT 16
82#define POWERMODE3(x) ((x) << 24)
83#define POWERMODE3_MASK (0xff << 24)
84#define POWERMODE3_SHIFT 24
85
86#define MC_ARB_BURST_TIME 0x2808
87#define STATE0(x) ((x) << 0)
88#define STATE0_MASK (0x1f << 0)
89#define STATE0_SHIFT 0
90#define STATE1(x) ((x) << 5)
91#define STATE1_MASK (0x1f << 5)
92#define STATE1_SHIFT 5
93#define STATE2(x) ((x) << 10)
94#define STATE2_MASK (0x1f << 10)
95#define STATE2_SHIFT 10
96#define STATE3(x) ((x) << 15)
97#define STATE3_MASK (0x1f << 15)
98#define STATE3_SHIFT 15
99
100#define MC_SEQ_RAS_TIMING 0x28a0
101#define MC_SEQ_CAS_TIMING 0x28a4
102#define MC_SEQ_MISC_TIMING 0x28a8
103#define MC_SEQ_MISC_TIMING2 0x28ac
104
105#define MC_SEQ_RD_CTL_D0 0x28b4
106#define MC_SEQ_RD_CTL_D1 0x28b8
107#define MC_SEQ_WR_CTL_D0 0x28bc
108#define MC_SEQ_WR_CTL_D1 0x28c0
109
110#define MC_PMG_AUTO_CFG 0x28d4
111
112#define MC_SEQ_STATUS_M 0x29f4
113# define PMG_PWRSTATE (1 << 16)
114
115#define MC_SEQ_MISC0 0x2a00
116#define MC_SEQ_MISC0_GDDR5_SHIFT 28
117#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
118#define MC_SEQ_MISC0_GDDR5_VALUE 5
119#define MC_SEQ_MISC1 0x2a04
120#define MC_SEQ_RESERVE_M 0x2a08
121#define MC_PMG_CMD_EMRS 0x2a0c
122
123#define MC_SEQ_MISC3 0x2a2c
124
125#define MC_SEQ_MISC5 0x2a54
126#define MC_SEQ_MISC6 0x2a58
127
128#define MC_SEQ_MISC7 0x2a64
129
130#define MC_SEQ_CG 0x2a68
131#define CG_SEQ_REQ(x) ((x) << 0)
132#define CG_SEQ_REQ_MASK (0xff << 0)
133#define CG_SEQ_REQ_SHIFT 0
134#define CG_SEQ_RESP(x) ((x) << 8)
135#define CG_SEQ_RESP_MASK (0xff << 8)
136#define CG_SEQ_RESP_SHIFT 8
137#define SEQ_CG_REQ(x) ((x) << 16)
138#define SEQ_CG_REQ_MASK (0xff << 16)
139#define SEQ_CG_REQ_SHIFT 16
140#define SEQ_CG_RESP(x) ((x) << 24)
141#define SEQ_CG_RESP_MASK (0xff << 24)
142#define SEQ_CG_RESP_SHIFT 24
143#define MC_SEQ_RAS_TIMING_LP 0x2a6c
144#define MC_SEQ_CAS_TIMING_LP 0x2a70
145#define MC_SEQ_MISC_TIMING_LP 0x2a74
146#define MC_SEQ_MISC_TIMING2_LP 0x2a78
147#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
148#define MC_SEQ_WR_CTL_D1_LP 0x2a80
149#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
150#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
151
152#define MC_PMG_CMD_MRS 0x2aac
153
154#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
155#define MC_SEQ_RD_CTL_D1_LP 0x2b20
156
157#define MC_PMG_CMD_MRS1 0x2b44
158#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
159
160#define LB_SYNC_RESET_SEL 0x6b28
161#define LB_SYNC_RESET_SEL_MASK (3 << 0)
162#define LB_SYNC_RESET_SEL_SHIFT 0
163
164/* PCIE link stuff */
165#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
166# define LC_GEN2_EN_STRAP (1 << 0)
167# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
168# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
169# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
170# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
171# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
172# define LC_CURRENT_DATA_RATE (1 << 11)
173# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
174# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
175# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
176# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
177# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
178# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
179# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
180
181#endif
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
new file mode 100644
index 000000000000..ed1d91025928
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -0,0 +1,6987 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h>
27#include <linux/module.h>
28#include "drmP.h"
29#include "radeon.h"
30#include "radeon_asic.h"
31#include "cikd.h"
32#include "atom.h"
33#include "cik_blit_shaders.h"
34
35/* GFX */
36#define CIK_PFP_UCODE_SIZE 2144
37#define CIK_ME_UCODE_SIZE 2144
38#define CIK_CE_UCODE_SIZE 2144
39/* compute */
40#define CIK_MEC_UCODE_SIZE 4192
41/* interrupts */
42#define BONAIRE_RLC_UCODE_SIZE 2048
43#define KB_RLC_UCODE_SIZE 2560
44#define KV_RLC_UCODE_SIZE 2560
45/* gddr controller */
46#define CIK_MC_UCODE_SIZE 7866
47/* sdma */
48#define CIK_SDMA_UCODE_SIZE 1050
49#define CIK_SDMA_UCODE_VERSION 64
50
51MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
52MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
53MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
54MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
55MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
56MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
57MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
58MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
59MODULE_FIRMWARE("radeon/KAVERI_me.bin");
60MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
61MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
62MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
63MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
64MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
65MODULE_FIRMWARE("radeon/KABINI_me.bin");
66MODULE_FIRMWARE("radeon/KABINI_ce.bin");
67MODULE_FIRMWARE("radeon/KABINI_mec.bin");
68MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
69MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
70
71extern int r600_ih_ring_alloc(struct radeon_device *rdev);
72extern void r600_ih_ring_fini(struct radeon_device *rdev);
73extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
74extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
75extern bool evergreen_is_display_hung(struct radeon_device *rdev);
76extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
77extern void si_rlc_fini(struct radeon_device *rdev);
78extern int si_rlc_init(struct radeon_device *rdev);
79static void cik_rlc_stop(struct radeon_device *rdev);
80
81/*
82 * Indirect registers accessor
83 */
84u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
85{
86 u32 r;
87
88 WREG32(PCIE_INDEX, reg);
89 (void)RREG32(PCIE_INDEX);
90 r = RREG32(PCIE_DATA);
91 return r;
92}
93
94void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
95{
96 WREG32(PCIE_INDEX, reg);
97 (void)RREG32(PCIE_INDEX);
98 WREG32(PCIE_DATA, v);
99 (void)RREG32(PCIE_DATA);
100}
101
102static const u32 bonaire_golden_spm_registers[] =
103{
104 0x30800, 0xe0ffffff, 0xe0000000
105};
106
107static const u32 bonaire_golden_common_registers[] =
108{
109 0xc770, 0xffffffff, 0x00000800,
110 0xc774, 0xffffffff, 0x00000800,
111 0xc798, 0xffffffff, 0x00007fbf,
112 0xc79c, 0xffffffff, 0x00007faf
113};
114
115static const u32 bonaire_golden_registers[] =
116{
117 0x3354, 0x00000333, 0x00000333,
118 0x3350, 0x000c0fc0, 0x00040200,
119 0x9a10, 0x00010000, 0x00058208,
120 0x3c000, 0xffff1fff, 0x00140000,
121 0x3c200, 0xfdfc0fff, 0x00000100,
122 0x3c234, 0x40000000, 0x40000200,
123 0x9830, 0xffffffff, 0x00000000,
124 0x9834, 0xf00fffff, 0x00000400,
125 0x9838, 0x0002021c, 0x00020200,
126 0xc78, 0x00000080, 0x00000000,
127 0x5bb0, 0x000000f0, 0x00000070,
128 0x5bc0, 0xf0311fff, 0x80300000,
129 0x98f8, 0x73773777, 0x12010001,
130 0x350c, 0x00810000, 0x408af000,
131 0x7030, 0x31000111, 0x00000011,
132 0x2f48, 0x73773777, 0x12010001,
133 0x220c, 0x00007fb6, 0x0021a1b1,
134 0x2210, 0x00007fb6, 0x002021b1,
135 0x2180, 0x00007fb6, 0x00002191,
136 0x2218, 0x00007fb6, 0x002121b1,
137 0x221c, 0x00007fb6, 0x002021b1,
138 0x21dc, 0x00007fb6, 0x00002191,
139 0x21e0, 0x00007fb6, 0x00002191,
140 0x3628, 0x0000003f, 0x0000000a,
141 0x362c, 0x0000003f, 0x0000000a,
142 0x2ae4, 0x00073ffe, 0x000022a2,
143 0x240c, 0x000007ff, 0x00000000,
144 0x8a14, 0xf000003f, 0x00000007,
145 0x8bf0, 0x00002001, 0x00000001,
146 0x8b24, 0xffffffff, 0x00ffffff,
147 0x30a04, 0x0000ff0f, 0x00000000,
148 0x28a4c, 0x07ffffff, 0x06000000,
149 0x4d8, 0x00000fff, 0x00000100,
150 0x3e78, 0x00000001, 0x00000002,
151 0x9100, 0x03000000, 0x0362c688,
152 0x8c00, 0x000000ff, 0x00000001,
153 0xe40, 0x00001fff, 0x00001fff,
154 0x9060, 0x0000007f, 0x00000020,
155 0x9508, 0x00010000, 0x00010000,
156 0xac14, 0x000003ff, 0x000000f3,
157 0xac0c, 0xffffffff, 0x00001032
158};
159
160static const u32 bonaire_mgcg_cgcg_init[] =
161{
162 0xc420, 0xffffffff, 0xfffffffc,
163 0x30800, 0xffffffff, 0xe0000000,
164 0x3c2a0, 0xffffffff, 0x00000100,
165 0x3c208, 0xffffffff, 0x00000100,
166 0x3c2c0, 0xffffffff, 0xc0000100,
167 0x3c2c8, 0xffffffff, 0xc0000100,
168 0x3c2c4, 0xffffffff, 0xc0000100,
169 0x55e4, 0xffffffff, 0x00600100,
170 0x3c280, 0xffffffff, 0x00000100,
171 0x3c214, 0xffffffff, 0x06000100,
172 0x3c220, 0xffffffff, 0x00000100,
173 0x3c218, 0xffffffff, 0x06000100,
174 0x3c204, 0xffffffff, 0x00000100,
175 0x3c2e0, 0xffffffff, 0x00000100,
176 0x3c224, 0xffffffff, 0x00000100,
177 0x3c200, 0xffffffff, 0x00000100,
178 0x3c230, 0xffffffff, 0x00000100,
179 0x3c234, 0xffffffff, 0x00000100,
180 0x3c250, 0xffffffff, 0x00000100,
181 0x3c254, 0xffffffff, 0x00000100,
182 0x3c258, 0xffffffff, 0x00000100,
183 0x3c25c, 0xffffffff, 0x00000100,
184 0x3c260, 0xffffffff, 0x00000100,
185 0x3c27c, 0xffffffff, 0x00000100,
186 0x3c278, 0xffffffff, 0x00000100,
187 0x3c210, 0xffffffff, 0x06000100,
188 0x3c290, 0xffffffff, 0x00000100,
189 0x3c274, 0xffffffff, 0x00000100,
190 0x3c2b4, 0xffffffff, 0x00000100,
191 0x3c2b0, 0xffffffff, 0x00000100,
192 0x3c270, 0xffffffff, 0x00000100,
193 0x30800, 0xffffffff, 0xe0000000,
194 0x3c020, 0xffffffff, 0x00010000,
195 0x3c024, 0xffffffff, 0x00030002,
196 0x3c028, 0xffffffff, 0x00040007,
197 0x3c02c, 0xffffffff, 0x00060005,
198 0x3c030, 0xffffffff, 0x00090008,
199 0x3c034, 0xffffffff, 0x00010000,
200 0x3c038, 0xffffffff, 0x00030002,
201 0x3c03c, 0xffffffff, 0x00040007,
202 0x3c040, 0xffffffff, 0x00060005,
203 0x3c044, 0xffffffff, 0x00090008,
204 0x3c048, 0xffffffff, 0x00010000,
205 0x3c04c, 0xffffffff, 0x00030002,
206 0x3c050, 0xffffffff, 0x00040007,
207 0x3c054, 0xffffffff, 0x00060005,
208 0x3c058, 0xffffffff, 0x00090008,
209 0x3c05c, 0xffffffff, 0x00010000,
210 0x3c060, 0xffffffff, 0x00030002,
211 0x3c064, 0xffffffff, 0x00040007,
212 0x3c068, 0xffffffff, 0x00060005,
213 0x3c06c, 0xffffffff, 0x00090008,
214 0x3c070, 0xffffffff, 0x00010000,
215 0x3c074, 0xffffffff, 0x00030002,
216 0x3c078, 0xffffffff, 0x00040007,
217 0x3c07c, 0xffffffff, 0x00060005,
218 0x3c080, 0xffffffff, 0x00090008,
219 0x3c084, 0xffffffff, 0x00010000,
220 0x3c088, 0xffffffff, 0x00030002,
221 0x3c08c, 0xffffffff, 0x00040007,
222 0x3c090, 0xffffffff, 0x00060005,
223 0x3c094, 0xffffffff, 0x00090008,
224 0x3c098, 0xffffffff, 0x00010000,
225 0x3c09c, 0xffffffff, 0x00030002,
226 0x3c0a0, 0xffffffff, 0x00040007,
227 0x3c0a4, 0xffffffff, 0x00060005,
228 0x3c0a8, 0xffffffff, 0x00090008,
229 0x3c000, 0xffffffff, 0x96e00200,
230 0x8708, 0xffffffff, 0x00900100,
231 0xc424, 0xffffffff, 0x0020003f,
232 0x38, 0xffffffff, 0x0140001c,
233 0x3c, 0x000f0000, 0x000f0000,
234 0x220, 0xffffffff, 0xC060000C,
235 0x224, 0xc0000fff, 0x00000100,
236 0xf90, 0xffffffff, 0x00000100,
237 0xf98, 0x00000101, 0x00000000,
238 0x20a8, 0xffffffff, 0x00000104,
239 0x55e4, 0xff000fff, 0x00000100,
240 0x30cc, 0xc0000fff, 0x00000104,
241 0xc1e4, 0x00000001, 0x00000001,
242 0xd00c, 0xff000ff0, 0x00000100,
243 0xd80c, 0xff000ff0, 0x00000100
244};
245
246static const u32 spectre_golden_spm_registers[] =
247{
248 0x30800, 0xe0ffffff, 0xe0000000
249};
250
251static const u32 spectre_golden_common_registers[] =
252{
253 0xc770, 0xffffffff, 0x00000800,
254 0xc774, 0xffffffff, 0x00000800,
255 0xc798, 0xffffffff, 0x00007fbf,
256 0xc79c, 0xffffffff, 0x00007faf
257};
258
259static const u32 spectre_golden_registers[] =
260{
261 0x3c000, 0xffff1fff, 0x96940200,
262 0x3c00c, 0xffff0001, 0xff000000,
263 0x3c200, 0xfffc0fff, 0x00000100,
264 0x6ed8, 0x00010101, 0x00010000,
265 0x9834, 0xf00fffff, 0x00000400,
266 0x9838, 0xfffffffc, 0x00020200,
267 0x5bb0, 0x000000f0, 0x00000070,
268 0x5bc0, 0xf0311fff, 0x80300000,
269 0x98f8, 0x73773777, 0x12010001,
270 0x9b7c, 0x00ff0000, 0x00fc0000,
271 0x2f48, 0x73773777, 0x12010001,
272 0x8a14, 0xf000003f, 0x00000007,
273 0x8b24, 0xffffffff, 0x00ffffff,
274 0x28350, 0x3f3f3fff, 0x00000082,
275 0x28355, 0x0000003f, 0x00000000,
276 0x3e78, 0x00000001, 0x00000002,
277 0x913c, 0xffff03df, 0x00000004,
278 0xc768, 0x00000008, 0x00000008,
279 0x8c00, 0x000008ff, 0x00000800,
280 0x9508, 0x00010000, 0x00010000,
281 0xac0c, 0xffffffff, 0x54763210,
282 0x214f8, 0x01ff01ff, 0x00000002,
283 0x21498, 0x007ff800, 0x00200000,
284 0x2015c, 0xffffffff, 0x00000f40,
285 0x30934, 0xffffffff, 0x00000001
286};
287
288static const u32 spectre_mgcg_cgcg_init[] =
289{
290 0xc420, 0xffffffff, 0xfffffffc,
291 0x30800, 0xffffffff, 0xe0000000,
292 0x3c2a0, 0xffffffff, 0x00000100,
293 0x3c208, 0xffffffff, 0x00000100,
294 0x3c2c0, 0xffffffff, 0x00000100,
295 0x3c2c8, 0xffffffff, 0x00000100,
296 0x3c2c4, 0xffffffff, 0x00000100,
297 0x55e4, 0xffffffff, 0x00600100,
298 0x3c280, 0xffffffff, 0x00000100,
299 0x3c214, 0xffffffff, 0x06000100,
300 0x3c220, 0xffffffff, 0x00000100,
301 0x3c218, 0xffffffff, 0x06000100,
302 0x3c204, 0xffffffff, 0x00000100,
303 0x3c2e0, 0xffffffff, 0x00000100,
304 0x3c224, 0xffffffff, 0x00000100,
305 0x3c200, 0xffffffff, 0x00000100,
306 0x3c230, 0xffffffff, 0x00000100,
307 0x3c234, 0xffffffff, 0x00000100,
308 0x3c250, 0xffffffff, 0x00000100,
309 0x3c254, 0xffffffff, 0x00000100,
310 0x3c258, 0xffffffff, 0x00000100,
311 0x3c25c, 0xffffffff, 0x00000100,
312 0x3c260, 0xffffffff, 0x00000100,
313 0x3c27c, 0xffffffff, 0x00000100,
314 0x3c278, 0xffffffff, 0x00000100,
315 0x3c210, 0xffffffff, 0x06000100,
316 0x3c290, 0xffffffff, 0x00000100,
317 0x3c274, 0xffffffff, 0x00000100,
318 0x3c2b4, 0xffffffff, 0x00000100,
319 0x3c2b0, 0xffffffff, 0x00000100,
320 0x3c270, 0xffffffff, 0x00000100,
321 0x30800, 0xffffffff, 0xe0000000,
322 0x3c020, 0xffffffff, 0x00010000,
323 0x3c024, 0xffffffff, 0x00030002,
324 0x3c028, 0xffffffff, 0x00040007,
325 0x3c02c, 0xffffffff, 0x00060005,
326 0x3c030, 0xffffffff, 0x00090008,
327 0x3c034, 0xffffffff, 0x00010000,
328 0x3c038, 0xffffffff, 0x00030002,
329 0x3c03c, 0xffffffff, 0x00040007,
330 0x3c040, 0xffffffff, 0x00060005,
331 0x3c044, 0xffffffff, 0x00090008,
332 0x3c048, 0xffffffff, 0x00010000,
333 0x3c04c, 0xffffffff, 0x00030002,
334 0x3c050, 0xffffffff, 0x00040007,
335 0x3c054, 0xffffffff, 0x00060005,
336 0x3c058, 0xffffffff, 0x00090008,
337 0x3c05c, 0xffffffff, 0x00010000,
338 0x3c060, 0xffffffff, 0x00030002,
339 0x3c064, 0xffffffff, 0x00040007,
340 0x3c068, 0xffffffff, 0x00060005,
341 0x3c06c, 0xffffffff, 0x00090008,
342 0x3c070, 0xffffffff, 0x00010000,
343 0x3c074, 0xffffffff, 0x00030002,
344 0x3c078, 0xffffffff, 0x00040007,
345 0x3c07c, 0xffffffff, 0x00060005,
346 0x3c080, 0xffffffff, 0x00090008,
347 0x3c084, 0xffffffff, 0x00010000,
348 0x3c088, 0xffffffff, 0x00030002,
349 0x3c08c, 0xffffffff, 0x00040007,
350 0x3c090, 0xffffffff, 0x00060005,
351 0x3c094, 0xffffffff, 0x00090008,
352 0x3c098, 0xffffffff, 0x00010000,
353 0x3c09c, 0xffffffff, 0x00030002,
354 0x3c0a0, 0xffffffff, 0x00040007,
355 0x3c0a4, 0xffffffff, 0x00060005,
356 0x3c0a8, 0xffffffff, 0x00090008,
357 0x3c0ac, 0xffffffff, 0x00010000,
358 0x3c0b0, 0xffffffff, 0x00030002,
359 0x3c0b4, 0xffffffff, 0x00040007,
360 0x3c0b8, 0xffffffff, 0x00060005,
361 0x3c0bc, 0xffffffff, 0x00090008,
362 0x3c000, 0xffffffff, 0x96e00200,
363 0x8708, 0xffffffff, 0x00900100,
364 0xc424, 0xffffffff, 0x0020003f,
365 0x38, 0xffffffff, 0x0140001c,
366 0x3c, 0x000f0000, 0x000f0000,
367 0x220, 0xffffffff, 0xC060000C,
368 0x224, 0xc0000fff, 0x00000100,
369 0xf90, 0xffffffff, 0x00000100,
370 0xf98, 0x00000101, 0x00000000,
371 0x20a8, 0xffffffff, 0x00000104,
372 0x55e4, 0xff000fff, 0x00000100,
373 0x30cc, 0xc0000fff, 0x00000104,
374 0xc1e4, 0x00000001, 0x00000001,
375 0xd00c, 0xff000ff0, 0x00000100,
376 0xd80c, 0xff000ff0, 0x00000100
377};
378
379static const u32 kalindi_golden_spm_registers[] =
380{
381 0x30800, 0xe0ffffff, 0xe0000000
382};
383
384static const u32 kalindi_golden_common_registers[] =
385{
386 0xc770, 0xffffffff, 0x00000800,
387 0xc774, 0xffffffff, 0x00000800,
388 0xc798, 0xffffffff, 0x00007fbf,
389 0xc79c, 0xffffffff, 0x00007faf
390};
391
392static const u32 kalindi_golden_registers[] =
393{
394 0x3c000, 0xffffdfff, 0x6e944040,
395 0x55e4, 0xff607fff, 0xfc000100,
396 0x3c220, 0xff000fff, 0x00000100,
397 0x3c224, 0xff000fff, 0x00000100,
398 0x3c200, 0xfffc0fff, 0x00000100,
399 0x6ed8, 0x00010101, 0x00010000,
400 0x9830, 0xffffffff, 0x00000000,
401 0x9834, 0xf00fffff, 0x00000400,
402 0x5bb0, 0x000000f0, 0x00000070,
403 0x5bc0, 0xf0311fff, 0x80300000,
404 0x98f8, 0x73773777, 0x12010001,
405 0x98fc, 0xffffffff, 0x00000010,
406 0x9b7c, 0x00ff0000, 0x00fc0000,
407 0x8030, 0x00001f0f, 0x0000100a,
408 0x2f48, 0x73773777, 0x12010001,
409 0x2408, 0x000fffff, 0x000c007f,
410 0x8a14, 0xf000003f, 0x00000007,
411 0x8b24, 0x3fff3fff, 0x00ffcfff,
412 0x30a04, 0x0000ff0f, 0x00000000,
413 0x28a4c, 0x07ffffff, 0x06000000,
414 0x4d8, 0x00000fff, 0x00000100,
415 0x3e78, 0x00000001, 0x00000002,
416 0xc768, 0x00000008, 0x00000008,
417 0x8c00, 0x000000ff, 0x00000003,
418 0x214f8, 0x01ff01ff, 0x00000002,
419 0x21498, 0x007ff800, 0x00200000,
420 0x2015c, 0xffffffff, 0x00000f40,
421 0x88c4, 0x001f3ae3, 0x00000082,
422 0x88d4, 0x0000001f, 0x00000010,
423 0x30934, 0xffffffff, 0x00000000
424};
425
426static const u32 kalindi_mgcg_cgcg_init[] =
427{
428 0xc420, 0xffffffff, 0xfffffffc,
429 0x30800, 0xffffffff, 0xe0000000,
430 0x3c2a0, 0xffffffff, 0x00000100,
431 0x3c208, 0xffffffff, 0x00000100,
432 0x3c2c0, 0xffffffff, 0x00000100,
433 0x3c2c8, 0xffffffff, 0x00000100,
434 0x3c2c4, 0xffffffff, 0x00000100,
435 0x55e4, 0xffffffff, 0x00600100,
436 0x3c280, 0xffffffff, 0x00000100,
437 0x3c214, 0xffffffff, 0x06000100,
438 0x3c220, 0xffffffff, 0x00000100,
439 0x3c218, 0xffffffff, 0x06000100,
440 0x3c204, 0xffffffff, 0x00000100,
441 0x3c2e0, 0xffffffff, 0x00000100,
442 0x3c224, 0xffffffff, 0x00000100,
443 0x3c200, 0xffffffff, 0x00000100,
444 0x3c230, 0xffffffff, 0x00000100,
445 0x3c234, 0xffffffff, 0x00000100,
446 0x3c250, 0xffffffff, 0x00000100,
447 0x3c254, 0xffffffff, 0x00000100,
448 0x3c258, 0xffffffff, 0x00000100,
449 0x3c25c, 0xffffffff, 0x00000100,
450 0x3c260, 0xffffffff, 0x00000100,
451 0x3c27c, 0xffffffff, 0x00000100,
452 0x3c278, 0xffffffff, 0x00000100,
453 0x3c210, 0xffffffff, 0x06000100,
454 0x3c290, 0xffffffff, 0x00000100,
455 0x3c274, 0xffffffff, 0x00000100,
456 0x3c2b4, 0xffffffff, 0x00000100,
457 0x3c2b0, 0xffffffff, 0x00000100,
458 0x3c270, 0xffffffff, 0x00000100,
459 0x30800, 0xffffffff, 0xe0000000,
460 0x3c020, 0xffffffff, 0x00010000,
461 0x3c024, 0xffffffff, 0x00030002,
462 0x3c028, 0xffffffff, 0x00040007,
463 0x3c02c, 0xffffffff, 0x00060005,
464 0x3c030, 0xffffffff, 0x00090008,
465 0x3c034, 0xffffffff, 0x00010000,
466 0x3c038, 0xffffffff, 0x00030002,
467 0x3c03c, 0xffffffff, 0x00040007,
468 0x3c040, 0xffffffff, 0x00060005,
469 0x3c044, 0xffffffff, 0x00090008,
470 0x3c000, 0xffffffff, 0x96e00200,
471 0x8708, 0xffffffff, 0x00900100,
472 0xc424, 0xffffffff, 0x0020003f,
473 0x38, 0xffffffff, 0x0140001c,
474 0x3c, 0x000f0000, 0x000f0000,
475 0x220, 0xffffffff, 0xC060000C,
476 0x224, 0xc0000fff, 0x00000100,
477 0x20a8, 0xffffffff, 0x00000104,
478 0x55e4, 0xff000fff, 0x00000100,
479 0x30cc, 0xc0000fff, 0x00000104,
480 0xc1e4, 0x00000001, 0x00000001,
481 0xd00c, 0xff000ff0, 0x00000100,
482 0xd80c, 0xff000ff0, 0x00000100
483};
484
485static void cik_init_golden_registers(struct radeon_device *rdev)
486{
487 switch (rdev->family) {
488 case CHIP_BONAIRE:
489 radeon_program_register_sequence(rdev,
490 bonaire_mgcg_cgcg_init,
491 (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
492 radeon_program_register_sequence(rdev,
493 bonaire_golden_registers,
494 (const u32)ARRAY_SIZE(bonaire_golden_registers));
495 radeon_program_register_sequence(rdev,
496 bonaire_golden_common_registers,
497 (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
498 radeon_program_register_sequence(rdev,
499 bonaire_golden_spm_registers,
500 (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
501 break;
502 case CHIP_KABINI:
503 radeon_program_register_sequence(rdev,
504 kalindi_mgcg_cgcg_init,
505 (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
506 radeon_program_register_sequence(rdev,
507 kalindi_golden_registers,
508 (const u32)ARRAY_SIZE(kalindi_golden_registers));
509 radeon_program_register_sequence(rdev,
510 kalindi_golden_common_registers,
511 (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
512 radeon_program_register_sequence(rdev,
513 kalindi_golden_spm_registers,
514 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
515 break;
516 case CHIP_KAVERI:
517 radeon_program_register_sequence(rdev,
518 spectre_mgcg_cgcg_init,
519 (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
520 radeon_program_register_sequence(rdev,
521 spectre_golden_registers,
522 (const u32)ARRAY_SIZE(spectre_golden_registers));
523 radeon_program_register_sequence(rdev,
524 spectre_golden_common_registers,
525 (const u32)ARRAY_SIZE(spectre_golden_common_registers));
526 radeon_program_register_sequence(rdev,
527 spectre_golden_spm_registers,
528 (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
529 break;
530 default:
531 break;
532 }
533}
534
535/**
536 * cik_get_xclk - get the xclk
537 *
538 * @rdev: radeon_device pointer
539 *
540 * Returns the reference clock used by the gfx engine
541 * (CIK).
542 */
543u32 cik_get_xclk(struct radeon_device *rdev)
544{
545 u32 reference_clock = rdev->clock.spll.reference_freq;
546
547 if (rdev->flags & RADEON_IS_IGP) {
548 if (RREG32_SMC(GENERAL_PWRMGT) & GPU_COUNTER_CLK)
549 return reference_clock / 2;
550 } else {
551 if (RREG32_SMC(CG_CLKPIN_CNTL) & XTALIN_DIVIDE)
552 return reference_clock / 4;
553 }
554 return reference_clock;
555}
556
557/**
558 * cik_mm_rdoorbell - read a doorbell dword
559 *
560 * @rdev: radeon_device pointer
561 * @offset: byte offset into the aperture
562 *
563 * Returns the value in the doorbell aperture at the
564 * requested offset (CIK).
565 */
566u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
567{
568 if (offset < rdev->doorbell.size) {
569 return readl(((void __iomem *)rdev->doorbell.ptr) + offset);
570 } else {
571 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset);
572 return 0;
573 }
574}
575
576/**
577 * cik_mm_wdoorbell - write a doorbell dword
578 *
579 * @rdev: radeon_device pointer
580 * @offset: byte offset into the aperture
581 * @v: value to write
582 *
583 * Writes @v to the doorbell aperture at the
584 * requested offset (CIK).
585 */
586void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v)
587{
588 if (offset < rdev->doorbell.size) {
589 writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset);
590 } else {
591 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset);
592 }
593}
594
595#define BONAIRE_IO_MC_REGS_SIZE 36
596
597static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
598{
599 {0x00000070, 0x04400000},
600 {0x00000071, 0x80c01803},
601 {0x00000072, 0x00004004},
602 {0x00000073, 0x00000100},
603 {0x00000074, 0x00ff0000},
604 {0x00000075, 0x34000000},
605 {0x00000076, 0x08000014},
606 {0x00000077, 0x00cc08ec},
607 {0x00000078, 0x00000400},
608 {0x00000079, 0x00000000},
609 {0x0000007a, 0x04090000},
610 {0x0000007c, 0x00000000},
611 {0x0000007e, 0x4408a8e8},
612 {0x0000007f, 0x00000304},
613 {0x00000080, 0x00000000},
614 {0x00000082, 0x00000001},
615 {0x00000083, 0x00000002},
616 {0x00000084, 0xf3e4f400},
617 {0x00000085, 0x052024e3},
618 {0x00000087, 0x00000000},
619 {0x00000088, 0x01000000},
620 {0x0000008a, 0x1c0a0000},
621 {0x0000008b, 0xff010000},
622 {0x0000008d, 0xffffefff},
623 {0x0000008e, 0xfff3efff},
624 {0x0000008f, 0xfff3efbf},
625 {0x00000092, 0xf7ffffff},
626 {0x00000093, 0xffffff7f},
627 {0x00000095, 0x00101101},
628 {0x00000096, 0x00000fff},
629 {0x00000097, 0x00116fff},
630 {0x00000098, 0x60010000},
631 {0x00000099, 0x10010000},
632 {0x0000009a, 0x00006000},
633 {0x0000009b, 0x00001000},
634 {0x0000009f, 0x00b48000}
635};
636
637/**
638 * cik_srbm_select - select specific register instances
639 *
640 * @rdev: radeon_device pointer
641 * @me: selected ME (micro engine)
642 * @pipe: pipe
643 * @queue: queue
644 * @vmid: VMID
645 *
646 * Switches the currently active registers instances. Some
647 * registers are instanced per VMID, others are instanced per
648 * me/pipe/queue combination.
649 */
650static void cik_srbm_select(struct radeon_device *rdev,
651 u32 me, u32 pipe, u32 queue, u32 vmid)
652{
653 u32 srbm_gfx_cntl = (PIPEID(pipe & 0x3) |
654 MEID(me & 0x3) |
655 VMID(vmid & 0xf) |
656 QUEUEID(queue & 0x7));
657 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl);
658}
659
660/* ucode loading */
661/**
662 * ci_mc_load_microcode - load MC ucode into the hw
663 *
664 * @rdev: radeon_device pointer
665 *
666 * Load the GDDR MC ucode into the hw (CIK).
667 * Returns 0 on success, error on failure.
668 */
669static int ci_mc_load_microcode(struct radeon_device *rdev)
670{
671 const __be32 *fw_data;
672 u32 running, blackout = 0;
673 u32 *io_mc_regs;
674 int i, ucode_size, regs_size;
675
676 if (!rdev->mc_fw)
677 return -EINVAL;
678
679 switch (rdev->family) {
680 case CHIP_BONAIRE:
681 default:
682 io_mc_regs = (u32 *)&bonaire_io_mc_regs;
683 ucode_size = CIK_MC_UCODE_SIZE;
684 regs_size = BONAIRE_IO_MC_REGS_SIZE;
685 break;
686 }
687
688 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
689
690 if (running == 0) {
691 if (running) {
692 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
693 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
694 }
695
696 /* reset the engine and set to writable */
697 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
698 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
699
700 /* load mc io regs */
701 for (i = 0; i < regs_size; i++) {
702 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
703 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
704 }
705 /* load the MC ucode */
706 fw_data = (const __be32 *)rdev->mc_fw->data;
707 for (i = 0; i < ucode_size; i++)
708 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
709
710 /* put the engine back into the active state */
711 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
712 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
713 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
714
715 /* wait for training to complete */
716 for (i = 0; i < rdev->usec_timeout; i++) {
717 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
718 break;
719 udelay(1);
720 }
721 for (i = 0; i < rdev->usec_timeout; i++) {
722 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
723 break;
724 udelay(1);
725 }
726
727 if (running)
728 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
729 }
730
731 return 0;
732}
733
734/**
735 * cik_init_microcode - load ucode images from disk
736 *
737 * @rdev: radeon_device pointer
738 *
739 * Use the firmware interface to load the ucode images into
740 * the driver (not loaded into hw).
741 * Returns 0 on success, error on failure.
742 */
743static int cik_init_microcode(struct radeon_device *rdev)
744{
745 struct platform_device *pdev;
746 const char *chip_name;
747 size_t pfp_req_size, me_req_size, ce_req_size,
748 mec_req_size, rlc_req_size, mc_req_size,
749 sdma_req_size;
750 char fw_name[30];
751 int err;
752
753 DRM_DEBUG("\n");
754
755 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
756 err = IS_ERR(pdev);
757 if (err) {
758 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
759 return -EINVAL;
760 }
761
762 switch (rdev->family) {
763 case CHIP_BONAIRE:
764 chip_name = "BONAIRE";
765 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
766 me_req_size = CIK_ME_UCODE_SIZE * 4;
767 ce_req_size = CIK_CE_UCODE_SIZE * 4;
768 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
769 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
770 mc_req_size = CIK_MC_UCODE_SIZE * 4;
771 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
772 break;
773 case CHIP_KAVERI:
774 chip_name = "KAVERI";
775 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
776 me_req_size = CIK_ME_UCODE_SIZE * 4;
777 ce_req_size = CIK_CE_UCODE_SIZE * 4;
778 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
779 rlc_req_size = KV_RLC_UCODE_SIZE * 4;
780 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
781 break;
782 case CHIP_KABINI:
783 chip_name = "KABINI";
784 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
785 me_req_size = CIK_ME_UCODE_SIZE * 4;
786 ce_req_size = CIK_CE_UCODE_SIZE * 4;
787 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
788 rlc_req_size = KB_RLC_UCODE_SIZE * 4;
789 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
790 break;
791 default: BUG();
792 }
793
794 DRM_INFO("Loading %s Microcode\n", chip_name);
795
796 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
797 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
798 if (err)
799 goto out;
800 if (rdev->pfp_fw->size != pfp_req_size) {
801 printk(KERN_ERR
802 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
803 rdev->pfp_fw->size, fw_name);
804 err = -EINVAL;
805 goto out;
806 }
807
808 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
809 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
810 if (err)
811 goto out;
812 if (rdev->me_fw->size != me_req_size) {
813 printk(KERN_ERR
814 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
815 rdev->me_fw->size, fw_name);
816 err = -EINVAL;
817 }
818
819 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
820 err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev);
821 if (err)
822 goto out;
823 if (rdev->ce_fw->size != ce_req_size) {
824 printk(KERN_ERR
825 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
826 rdev->ce_fw->size, fw_name);
827 err = -EINVAL;
828 }
829
830 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
831 err = request_firmware(&rdev->mec_fw, fw_name, &pdev->dev);
832 if (err)
833 goto out;
834 if (rdev->mec_fw->size != mec_req_size) {
835 printk(KERN_ERR
836 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
837 rdev->mec_fw->size, fw_name);
838 err = -EINVAL;
839 }
840
841 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
842 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
843 if (err)
844 goto out;
845 if (rdev->rlc_fw->size != rlc_req_size) {
846 printk(KERN_ERR
847 "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
848 rdev->rlc_fw->size, fw_name);
849 err = -EINVAL;
850 }
851
852 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
853 err = request_firmware(&rdev->sdma_fw, fw_name, &pdev->dev);
854 if (err)
855 goto out;
856 if (rdev->sdma_fw->size != sdma_req_size) {
857 printk(KERN_ERR
858 "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
859 rdev->sdma_fw->size, fw_name);
860 err = -EINVAL;
861 }
862
863 /* No MC ucode on APUs */
864 if (!(rdev->flags & RADEON_IS_IGP)) {
865 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
866 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
867 if (err)
868 goto out;
869 if (rdev->mc_fw->size != mc_req_size) {
870 printk(KERN_ERR
871 "cik_mc: Bogus length %zu in firmware \"%s\"\n",
872 rdev->mc_fw->size, fw_name);
873 err = -EINVAL;
874 }
875 }
876
877out:
878 platform_device_unregister(pdev);
879
880 if (err) {
881 if (err != -EINVAL)
882 printk(KERN_ERR
883 "cik_cp: Failed to load firmware \"%s\"\n",
884 fw_name);
885 release_firmware(rdev->pfp_fw);
886 rdev->pfp_fw = NULL;
887 release_firmware(rdev->me_fw);
888 rdev->me_fw = NULL;
889 release_firmware(rdev->ce_fw);
890 rdev->ce_fw = NULL;
891 release_firmware(rdev->rlc_fw);
892 rdev->rlc_fw = NULL;
893 release_firmware(rdev->mc_fw);
894 rdev->mc_fw = NULL;
895 }
896 return err;
897}
898
899/*
900 * Core functions
901 */
902/**
903 * cik_tiling_mode_table_init - init the hw tiling table
904 *
905 * @rdev: radeon_device pointer
906 *
907 * Starting with SI, the tiling setup is done globally in a
908 * set of 32 tiling modes. Rather than selecting each set of
909 * parameters per surface as on older asics, we just select
910 * which index in the tiling table we want to use, and the
911 * surface uses those parameters (CIK).
912 */
913static void cik_tiling_mode_table_init(struct radeon_device *rdev)
914{
915 const u32 num_tile_mode_states = 32;
916 const u32 num_secondary_tile_mode_states = 16;
917 u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
918 u32 num_pipe_configs;
919 u32 num_rbs = rdev->config.cik.max_backends_per_se *
920 rdev->config.cik.max_shader_engines;
921
922 switch (rdev->config.cik.mem_row_size_in_kb) {
923 case 1:
924 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
925 break;
926 case 2:
927 default:
928 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
929 break;
930 case 4:
931 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
932 break;
933 }
934
935 num_pipe_configs = rdev->config.cik.max_tile_pipes;
936 if (num_pipe_configs > 8)
937 num_pipe_configs = 8; /* ??? */
938
939 if (num_pipe_configs == 8) {
940 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
941 switch (reg_offset) {
942 case 0:
943 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
944 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
945 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
946 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
947 break;
948 case 1:
949 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
950 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
951 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
952 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
953 break;
954 case 2:
955 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
956 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
957 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
958 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
959 break;
960 case 3:
961 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
962 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
963 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
964 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
965 break;
966 case 4:
967 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
968 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
969 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
970 TILE_SPLIT(split_equal_to_row_size));
971 break;
972 case 5:
973 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
974 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
975 break;
976 case 6:
977 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
978 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
979 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
980 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
981 break;
982 case 7:
983 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
984 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
985 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
986 TILE_SPLIT(split_equal_to_row_size));
987 break;
988 case 8:
989 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
990 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
991 break;
992 case 9:
993 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
994 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
995 break;
996 case 10:
997 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
998 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
999 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1000 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1001 break;
1002 case 11:
1003 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1004 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1005 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1006 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1007 break;
1008 case 12:
1009 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1010 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1011 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1012 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1013 break;
1014 case 13:
1015 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1016 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1017 break;
1018 case 14:
1019 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1020 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1021 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1022 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1023 break;
1024 case 16:
1025 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1026 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1027 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1028 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1029 break;
1030 case 17:
1031 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1032 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1033 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1034 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1035 break;
1036 case 27:
1037 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1038 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1039 break;
1040 case 28:
1041 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1042 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1043 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1044 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1045 break;
1046 case 29:
1047 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1048 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1049 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1050 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1051 break;
1052 case 30:
1053 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1054 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1055 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
1056 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1057 break;
1058 default:
1059 gb_tile_moden = 0;
1060 break;
1061 }
1062 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
1063 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1064 }
1065 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1066 switch (reg_offset) {
1067 case 0:
1068 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1069 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1070 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1071 NUM_BANKS(ADDR_SURF_16_BANK));
1072 break;
1073 case 1:
1074 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1075 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1076 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1077 NUM_BANKS(ADDR_SURF_16_BANK));
1078 break;
1079 case 2:
1080 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1081 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1082 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1083 NUM_BANKS(ADDR_SURF_16_BANK));
1084 break;
1085 case 3:
1086 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1087 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1088 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1089 NUM_BANKS(ADDR_SURF_16_BANK));
1090 break;
1091 case 4:
1092 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1093 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1094 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1095 NUM_BANKS(ADDR_SURF_8_BANK));
1096 break;
1097 case 5:
1098 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1099 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1100 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1101 NUM_BANKS(ADDR_SURF_4_BANK));
1102 break;
1103 case 6:
1104 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1105 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1106 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1107 NUM_BANKS(ADDR_SURF_2_BANK));
1108 break;
1109 case 8:
1110 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1111 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1112 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1113 NUM_BANKS(ADDR_SURF_16_BANK));
1114 break;
1115 case 9:
1116 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1117 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1118 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1119 NUM_BANKS(ADDR_SURF_16_BANK));
1120 break;
1121 case 10:
1122 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1123 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1124 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1125 NUM_BANKS(ADDR_SURF_16_BANK));
1126 break;
1127 case 11:
1128 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1129 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1130 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1131 NUM_BANKS(ADDR_SURF_16_BANK));
1132 break;
1133 case 12:
1134 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1135 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1136 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1137 NUM_BANKS(ADDR_SURF_8_BANK));
1138 break;
1139 case 13:
1140 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1141 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1142 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1143 NUM_BANKS(ADDR_SURF_4_BANK));
1144 break;
1145 case 14:
1146 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1147 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1148 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1149 NUM_BANKS(ADDR_SURF_2_BANK));
1150 break;
1151 default:
1152 gb_tile_moden = 0;
1153 break;
1154 }
1155 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1156 }
1157 } else if (num_pipe_configs == 4) {
1158 if (num_rbs == 4) {
1159 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1160 switch (reg_offset) {
1161 case 0:
1162 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1163 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1164 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1165 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
1166 break;
1167 case 1:
1168 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1169 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1170 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1171 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
1172 break;
1173 case 2:
1174 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1175 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1176 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1177 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
1178 break;
1179 case 3:
1180 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1181 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1182 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1183 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
1184 break;
1185 case 4:
1186 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1187 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1188 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1189 TILE_SPLIT(split_equal_to_row_size));
1190 break;
1191 case 5:
1192 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1193 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1194 break;
1195 case 6:
1196 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1197 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1198 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1199 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
1200 break;
1201 case 7:
1202 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1203 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1204 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1205 TILE_SPLIT(split_equal_to_row_size));
1206 break;
1207 case 8:
1208 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1209 PIPE_CONFIG(ADDR_SURF_P4_16x16));
1210 break;
1211 case 9:
1212 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1213 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1214 break;
1215 case 10:
1216 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1217 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1218 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1219 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1220 break;
1221 case 11:
1222 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1223 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1224 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1225 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1226 break;
1227 case 12:
1228 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1229 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1230 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1231 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1232 break;
1233 case 13:
1234 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1235 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1236 break;
1237 case 14:
1238 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1239 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1240 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1241 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1242 break;
1243 case 16:
1244 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1245 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1246 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1247 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1248 break;
1249 case 17:
1250 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1251 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1252 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1253 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1254 break;
1255 case 27:
1256 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1257 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1258 break;
1259 case 28:
1260 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1261 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1262 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1263 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1264 break;
1265 case 29:
1266 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1267 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1268 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1269 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1270 break;
1271 case 30:
1272 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1273 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1274 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1275 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1276 break;
1277 default:
1278 gb_tile_moden = 0;
1279 break;
1280 }
1281 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
1282 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1283 }
1284 } else if (num_rbs < 4) {
1285 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1286 switch (reg_offset) {
1287 case 0:
1288 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1289 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1290 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1291 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
1292 break;
1293 case 1:
1294 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1295 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1296 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1297 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
1298 break;
1299 case 2:
1300 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1301 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1302 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1303 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
1304 break;
1305 case 3:
1306 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1307 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1308 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1309 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
1310 break;
1311 case 4:
1312 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1313 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1314 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1315 TILE_SPLIT(split_equal_to_row_size));
1316 break;
1317 case 5:
1318 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1319 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1320 break;
1321 case 6:
1322 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1323 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1324 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1325 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
1326 break;
1327 case 7:
1328 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1329 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1330 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1331 TILE_SPLIT(split_equal_to_row_size));
1332 break;
1333 case 8:
1334 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1335 PIPE_CONFIG(ADDR_SURF_P4_8x16));
1336 break;
1337 case 9:
1338 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1339 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1340 break;
1341 case 10:
1342 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1343 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1344 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1345 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1346 break;
1347 case 11:
1348 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1349 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1350 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1351 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1352 break;
1353 case 12:
1354 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1355 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1356 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1357 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1358 break;
1359 case 13:
1360 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1361 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1362 break;
1363 case 14:
1364 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1365 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1366 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1367 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1368 break;
1369 case 16:
1370 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1371 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1372 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1373 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1374 break;
1375 case 17:
1376 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1377 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1378 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1379 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1380 break;
1381 case 27:
1382 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1383 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1384 break;
1385 case 28:
1386 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1387 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1388 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1389 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1390 break;
1391 case 29:
1392 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1393 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1394 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1395 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1396 break;
1397 case 30:
1398 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1399 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1400 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1401 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1402 break;
1403 default:
1404 gb_tile_moden = 0;
1405 break;
1406 }
1407 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
1408 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1409 }
1410 }
1411 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1412 switch (reg_offset) {
1413 case 0:
1414 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1415 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1416 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1417 NUM_BANKS(ADDR_SURF_16_BANK));
1418 break;
1419 case 1:
1420 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1421 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1422 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1423 NUM_BANKS(ADDR_SURF_16_BANK));
1424 break;
1425 case 2:
1426 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1427 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1428 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1429 NUM_BANKS(ADDR_SURF_16_BANK));
1430 break;
1431 case 3:
1432 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1433 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1434 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1435 NUM_BANKS(ADDR_SURF_16_BANK));
1436 break;
1437 case 4:
1438 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1439 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1440 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1441 NUM_BANKS(ADDR_SURF_16_BANK));
1442 break;
1443 case 5:
1444 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1445 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1446 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1447 NUM_BANKS(ADDR_SURF_8_BANK));
1448 break;
1449 case 6:
1450 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1451 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1452 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1453 NUM_BANKS(ADDR_SURF_4_BANK));
1454 break;
1455 case 8:
1456 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1457 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1458 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1459 NUM_BANKS(ADDR_SURF_16_BANK));
1460 break;
1461 case 9:
1462 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1463 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1464 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1465 NUM_BANKS(ADDR_SURF_16_BANK));
1466 break;
1467 case 10:
1468 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1469 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1470 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1471 NUM_BANKS(ADDR_SURF_16_BANK));
1472 break;
1473 case 11:
1474 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1475 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1476 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1477 NUM_BANKS(ADDR_SURF_16_BANK));
1478 break;
1479 case 12:
1480 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1481 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1482 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1483 NUM_BANKS(ADDR_SURF_16_BANK));
1484 break;
1485 case 13:
1486 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1487 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1488 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1489 NUM_BANKS(ADDR_SURF_8_BANK));
1490 break;
1491 case 14:
1492 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1493 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1494 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1495 NUM_BANKS(ADDR_SURF_4_BANK));
1496 break;
1497 default:
1498 gb_tile_moden = 0;
1499 break;
1500 }
1501 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1502 }
1503 } else if (num_pipe_configs == 2) {
1504 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1505 switch (reg_offset) {
1506 case 0:
1507 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1508 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1509 PIPE_CONFIG(ADDR_SURF_P2) |
1510 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
1511 break;
1512 case 1:
1513 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1514 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1515 PIPE_CONFIG(ADDR_SURF_P2) |
1516 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
1517 break;
1518 case 2:
1519 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1520 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1521 PIPE_CONFIG(ADDR_SURF_P2) |
1522 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
1523 break;
1524 case 3:
1525 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1526 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1527 PIPE_CONFIG(ADDR_SURF_P2) |
1528 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
1529 break;
1530 case 4:
1531 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1532 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1533 PIPE_CONFIG(ADDR_SURF_P2) |
1534 TILE_SPLIT(split_equal_to_row_size));
1535 break;
1536 case 5:
1537 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1538 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1539 break;
1540 case 6:
1541 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1542 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1543 PIPE_CONFIG(ADDR_SURF_P2) |
1544 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
1545 break;
1546 case 7:
1547 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1548 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1549 PIPE_CONFIG(ADDR_SURF_P2) |
1550 TILE_SPLIT(split_equal_to_row_size));
1551 break;
1552 case 8:
1553 gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED);
1554 break;
1555 case 9:
1556 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1557 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1558 break;
1559 case 10:
1560 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1561 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1562 PIPE_CONFIG(ADDR_SURF_P2) |
1563 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1564 break;
1565 case 11:
1566 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1567 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1568 PIPE_CONFIG(ADDR_SURF_P2) |
1569 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1570 break;
1571 case 12:
1572 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1573 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1574 PIPE_CONFIG(ADDR_SURF_P2) |
1575 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1576 break;
1577 case 13:
1578 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1579 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1580 break;
1581 case 14:
1582 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1583 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1584 PIPE_CONFIG(ADDR_SURF_P2) |
1585 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1586 break;
1587 case 16:
1588 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1589 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1590 PIPE_CONFIG(ADDR_SURF_P2) |
1591 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1592 break;
1593 case 17:
1594 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1595 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1596 PIPE_CONFIG(ADDR_SURF_P2) |
1597 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1598 break;
1599 case 27:
1600 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1601 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1602 break;
1603 case 28:
1604 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1605 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1606 PIPE_CONFIG(ADDR_SURF_P2) |
1607 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1608 break;
1609 case 29:
1610 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1611 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1612 PIPE_CONFIG(ADDR_SURF_P2) |
1613 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1614 break;
1615 case 30:
1616 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1617 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1618 PIPE_CONFIG(ADDR_SURF_P2) |
1619 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1620 break;
1621 default:
1622 gb_tile_moden = 0;
1623 break;
1624 }
1625 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
1626 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1627 }
1628 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1629 switch (reg_offset) {
1630 case 0:
1631 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1632 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1633 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1634 NUM_BANKS(ADDR_SURF_16_BANK));
1635 break;
1636 case 1:
1637 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1638 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1639 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1640 NUM_BANKS(ADDR_SURF_16_BANK));
1641 break;
1642 case 2:
1643 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1644 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1645 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1646 NUM_BANKS(ADDR_SURF_16_BANK));
1647 break;
1648 case 3:
1649 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1650 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1651 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1652 NUM_BANKS(ADDR_SURF_16_BANK));
1653 break;
1654 case 4:
1655 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1656 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1657 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1658 NUM_BANKS(ADDR_SURF_16_BANK));
1659 break;
1660 case 5:
1661 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1662 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1663 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1664 NUM_BANKS(ADDR_SURF_16_BANK));
1665 break;
1666 case 6:
1667 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1668 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1669 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1670 NUM_BANKS(ADDR_SURF_8_BANK));
1671 break;
1672 case 8:
1673 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1674 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1675 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1676 NUM_BANKS(ADDR_SURF_16_BANK));
1677 break;
1678 case 9:
1679 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1680 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1681 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1682 NUM_BANKS(ADDR_SURF_16_BANK));
1683 break;
1684 case 10:
1685 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1686 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1687 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1688 NUM_BANKS(ADDR_SURF_16_BANK));
1689 break;
1690 case 11:
1691 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1692 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1693 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1694 NUM_BANKS(ADDR_SURF_16_BANK));
1695 break;
1696 case 12:
1697 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1698 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1699 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1700 NUM_BANKS(ADDR_SURF_16_BANK));
1701 break;
1702 case 13:
1703 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1704 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1705 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1706 NUM_BANKS(ADDR_SURF_16_BANK));
1707 break;
1708 case 14:
1709 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1710 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1711 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1712 NUM_BANKS(ADDR_SURF_8_BANK));
1713 break;
1714 default:
1715 gb_tile_moden = 0;
1716 break;
1717 }
1718 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1719 }
1720 } else
1721 DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
1722}
1723
1724/**
1725 * cik_select_se_sh - select which SE, SH to address
1726 *
1727 * @rdev: radeon_device pointer
1728 * @se_num: shader engine to address
1729 * @sh_num: sh block to address
1730 *
1731 * Select which SE, SH combinations to address. Certain
1732 * registers are instanced per SE or SH. 0xffffffff means
1733 * broadcast to all SEs or SHs (CIK).
1734 */
1735static void cik_select_se_sh(struct radeon_device *rdev,
1736 u32 se_num, u32 sh_num)
1737{
1738 u32 data = INSTANCE_BROADCAST_WRITES;
1739
1740 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1741 data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
1742 else if (se_num == 0xffffffff)
1743 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
1744 else if (sh_num == 0xffffffff)
1745 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
1746 else
1747 data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
1748 WREG32(GRBM_GFX_INDEX, data);
1749}
1750
1751/**
1752 * cik_create_bitmask - create a bitmask
1753 *
1754 * @bit_width: length of the mask
1755 *
1756 * create a variable length bit mask (CIK).
1757 * Returns the bitmask.
1758 */
1759static u32 cik_create_bitmask(u32 bit_width)
1760{
1761 u32 i, mask = 0;
1762
1763 for (i = 0; i < bit_width; i++) {
1764 mask <<= 1;
1765 mask |= 1;
1766 }
1767 return mask;
1768}
1769
1770/**
1771 * cik_select_se_sh - select which SE, SH to address
1772 *
1773 * @rdev: radeon_device pointer
1774 * @max_rb_num: max RBs (render backends) for the asic
1775 * @se_num: number of SEs (shader engines) for the asic
1776 * @sh_per_se: number of SH blocks per SE for the asic
1777 *
1778 * Calculates the bitmask of disabled RBs (CIK).
1779 * Returns the disabled RB bitmask.
1780 */
1781static u32 cik_get_rb_disabled(struct radeon_device *rdev,
1782 u32 max_rb_num, u32 se_num,
1783 u32 sh_per_se)
1784{
1785 u32 data, mask;
1786
1787 data = RREG32(CC_RB_BACKEND_DISABLE);
1788 if (data & 1)
1789 data &= BACKEND_DISABLE_MASK;
1790 else
1791 data = 0;
1792 data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
1793
1794 data >>= BACKEND_DISABLE_SHIFT;
1795
1796 mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
1797
1798 return data & mask;
1799}
1800
1801/**
1802 * cik_setup_rb - setup the RBs on the asic
1803 *
1804 * @rdev: radeon_device pointer
1805 * @se_num: number of SEs (shader engines) for the asic
1806 * @sh_per_se: number of SH blocks per SE for the asic
1807 * @max_rb_num: max RBs (render backends) for the asic
1808 *
1809 * Configures per-SE/SH RB registers (CIK).
1810 */
1811static void cik_setup_rb(struct radeon_device *rdev,
1812 u32 se_num, u32 sh_per_se,
1813 u32 max_rb_num)
1814{
1815 int i, j;
1816 u32 data, mask;
1817 u32 disabled_rbs = 0;
1818 u32 enabled_rbs = 0;
1819
1820 for (i = 0; i < se_num; i++) {
1821 for (j = 0; j < sh_per_se; j++) {
1822 cik_select_se_sh(rdev, i, j);
1823 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
1824 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
1825 }
1826 }
1827 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1828
1829 mask = 1;
1830 for (i = 0; i < max_rb_num; i++) {
1831 if (!(disabled_rbs & mask))
1832 enabled_rbs |= mask;
1833 mask <<= 1;
1834 }
1835
1836 for (i = 0; i < se_num; i++) {
1837 cik_select_se_sh(rdev, i, 0xffffffff);
1838 data = 0;
1839 for (j = 0; j < sh_per_se; j++) {
1840 switch (enabled_rbs & 3) {
1841 case 1:
1842 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
1843 break;
1844 case 2:
1845 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
1846 break;
1847 case 3:
1848 default:
1849 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
1850 break;
1851 }
1852 enabled_rbs >>= 2;
1853 }
1854 WREG32(PA_SC_RASTER_CONFIG, data);
1855 }
1856 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1857}
1858
1859/**
1860 * cik_gpu_init - setup the 3D engine
1861 *
1862 * @rdev: radeon_device pointer
1863 *
1864 * Configures the 3D engine and tiling configuration
1865 * registers so that the 3D engine is usable.
1866 */
1867static void cik_gpu_init(struct radeon_device *rdev)
1868{
1869 u32 gb_addr_config = RREG32(GB_ADDR_CONFIG);
1870 u32 mc_shared_chmap, mc_arb_ramcfg;
1871 u32 hdp_host_path_cntl;
1872 u32 tmp;
1873 int i, j;
1874
1875 switch (rdev->family) {
1876 case CHIP_BONAIRE:
1877 rdev->config.cik.max_shader_engines = 2;
1878 rdev->config.cik.max_tile_pipes = 4;
1879 rdev->config.cik.max_cu_per_sh = 7;
1880 rdev->config.cik.max_sh_per_se = 1;
1881 rdev->config.cik.max_backends_per_se = 2;
1882 rdev->config.cik.max_texture_channel_caches = 4;
1883 rdev->config.cik.max_gprs = 256;
1884 rdev->config.cik.max_gs_threads = 32;
1885 rdev->config.cik.max_hw_contexts = 8;
1886
1887 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
1888 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
1889 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
1890 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
1891 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
1892 break;
1893 case CHIP_KAVERI:
1894 /* TODO */
1895 break;
1896 case CHIP_KABINI:
1897 default:
1898 rdev->config.cik.max_shader_engines = 1;
1899 rdev->config.cik.max_tile_pipes = 2;
1900 rdev->config.cik.max_cu_per_sh = 2;
1901 rdev->config.cik.max_sh_per_se = 1;
1902 rdev->config.cik.max_backends_per_se = 1;
1903 rdev->config.cik.max_texture_channel_caches = 2;
1904 rdev->config.cik.max_gprs = 256;
1905 rdev->config.cik.max_gs_threads = 16;
1906 rdev->config.cik.max_hw_contexts = 8;
1907
1908 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
1909 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
1910 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
1911 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
1912 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
1913 break;
1914 }
1915
1916 /* Initialize HDP */
1917 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1918 WREG32((0x2c14 + j), 0x00000000);
1919 WREG32((0x2c18 + j), 0x00000000);
1920 WREG32((0x2c1c + j), 0x00000000);
1921 WREG32((0x2c20 + j), 0x00000000);
1922 WREG32((0x2c24 + j), 0x00000000);
1923 }
1924
1925 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1926
1927 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1928
1929 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1930 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1931
1932 rdev->config.cik.num_tile_pipes = rdev->config.cik.max_tile_pipes;
1933 rdev->config.cik.mem_max_burst_length_bytes = 256;
1934 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
1935 rdev->config.cik.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1936 if (rdev->config.cik.mem_row_size_in_kb > 4)
1937 rdev->config.cik.mem_row_size_in_kb = 4;
1938 /* XXX use MC settings? */
1939 rdev->config.cik.shader_engine_tile_size = 32;
1940 rdev->config.cik.num_gpus = 1;
1941 rdev->config.cik.multi_gpu_tile_size = 64;
1942
1943 /* fix up row size */
1944 gb_addr_config &= ~ROW_SIZE_MASK;
1945 switch (rdev->config.cik.mem_row_size_in_kb) {
1946 case 1:
1947 default:
1948 gb_addr_config |= ROW_SIZE(0);
1949 break;
1950 case 2:
1951 gb_addr_config |= ROW_SIZE(1);
1952 break;
1953 case 4:
1954 gb_addr_config |= ROW_SIZE(2);
1955 break;
1956 }
1957
1958 /* setup tiling info dword. gb_addr_config is not adequate since it does
1959 * not have bank info, so create a custom tiling dword.
1960 * bits 3:0 num_pipes
1961 * bits 7:4 num_banks
1962 * bits 11:8 group_size
1963 * bits 15:12 row_size
1964 */
1965 rdev->config.cik.tile_config = 0;
1966 switch (rdev->config.cik.num_tile_pipes) {
1967 case 1:
1968 rdev->config.cik.tile_config |= (0 << 0);
1969 break;
1970 case 2:
1971 rdev->config.cik.tile_config |= (1 << 0);
1972 break;
1973 case 4:
1974 rdev->config.cik.tile_config |= (2 << 0);
1975 break;
1976 case 8:
1977 default:
1978 /* XXX what about 12? */
1979 rdev->config.cik.tile_config |= (3 << 0);
1980 break;
1981 }
1982 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
1983 rdev->config.cik.tile_config |= 1 << 4;
1984 else
1985 rdev->config.cik.tile_config |= 0 << 4;
1986 rdev->config.cik.tile_config |=
1987 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1988 rdev->config.cik.tile_config |=
1989 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1990
1991 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1992 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1993 WREG32(DMIF_ADDR_CALC, gb_addr_config);
1994 WREG32(SDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
1995 WREG32(SDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
1996 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
1997 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
1998 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
1999
2000 cik_tiling_mode_table_init(rdev);
2001
2002 cik_setup_rb(rdev, rdev->config.cik.max_shader_engines,
2003 rdev->config.cik.max_sh_per_se,
2004 rdev->config.cik.max_backends_per_se);
2005
2006 /* set HW defaults for 3D engine */
2007 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
2008
2009 WREG32(SX_DEBUG_1, 0x20);
2010
2011 WREG32(TA_CNTL_AUX, 0x00010000);
2012
2013 tmp = RREG32(SPI_CONFIG_CNTL);
2014 tmp |= 0x03000000;
2015 WREG32(SPI_CONFIG_CNTL, tmp);
2016
2017 WREG32(SQ_CONFIG, 1);
2018
2019 WREG32(DB_DEBUG, 0);
2020
2021 tmp = RREG32(DB_DEBUG2) & ~0xf00fffff;
2022 tmp |= 0x00000400;
2023 WREG32(DB_DEBUG2, tmp);
2024
2025 tmp = RREG32(DB_DEBUG3) & ~0x0002021c;
2026 tmp |= 0x00020200;
2027 WREG32(DB_DEBUG3, tmp);
2028
2029 tmp = RREG32(CB_HW_CONTROL) & ~0x00010000;
2030 tmp |= 0x00018208;
2031 WREG32(CB_HW_CONTROL, tmp);
2032
2033 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
2034
2035 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_frontend) |
2036 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_backend) |
2037 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cik.sc_hiz_tile_fifo_size) |
2038 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cik.sc_earlyz_tile_fifo_size)));
2039
2040 WREG32(VGT_NUM_INSTANCES, 1);
2041
2042 WREG32(CP_PERFMON_CNTL, 0);
2043
2044 WREG32(SQ_CONFIG, 0);
2045
2046 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
2047 FORCE_EOV_MAX_REZ_CNT(255)));
2048
2049 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
2050 AUTO_INVLD_EN(ES_AND_GS_AUTO));
2051
2052 WREG32(VGT_GS_VERTEX_REUSE, 16);
2053 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2054
2055 tmp = RREG32(HDP_MISC_CNTL);
2056 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
2057 WREG32(HDP_MISC_CNTL, tmp);
2058
2059 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2060 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2061
2062 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
2063 WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
2064
2065 udelay(50);
2066}
2067
2068/*
2069 * GPU scratch registers helpers function.
2070 */
2071/**
2072 * cik_scratch_init - setup driver info for CP scratch regs
2073 *
2074 * @rdev: radeon_device pointer
2075 *
2076 * Set up the number and offset of the CP scratch registers.
2077 * NOTE: use of CP scratch registers is a legacy inferface and
2078 * is not used by default on newer asics (r6xx+). On newer asics,
2079 * memory buffers are used for fences rather than scratch regs.
2080 */
2081static void cik_scratch_init(struct radeon_device *rdev)
2082{
2083 int i;
2084
2085 rdev->scratch.num_reg = 7;
2086 rdev->scratch.reg_base = SCRATCH_REG0;
2087 for (i = 0; i < rdev->scratch.num_reg; i++) {
2088 rdev->scratch.free[i] = true;
2089 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2090 }
2091}
2092
2093/**
2094 * cik_ring_test - basic gfx ring test
2095 *
2096 * @rdev: radeon_device pointer
2097 * @ring: radeon_ring structure holding ring information
2098 *
2099 * Allocate a scratch register and write to it using the gfx ring (CIK).
2100 * Provides a basic gfx ring test to verify that the ring is working.
2101 * Used by cik_cp_gfx_resume();
2102 * Returns 0 on success, error on failure.
2103 */
2104int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2105{
2106 uint32_t scratch;
2107 uint32_t tmp = 0;
2108 unsigned i;
2109 int r;
2110
2111 r = radeon_scratch_get(rdev, &scratch);
2112 if (r) {
2113 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2114 return r;
2115 }
2116 WREG32(scratch, 0xCAFEDEAD);
2117 r = radeon_ring_lock(rdev, ring, 3);
2118 if (r) {
2119 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
2120 radeon_scratch_free(rdev, scratch);
2121 return r;
2122 }
2123 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2124 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
2125 radeon_ring_write(ring, 0xDEADBEEF);
2126 radeon_ring_unlock_commit(rdev, ring);
2127
2128 for (i = 0; i < rdev->usec_timeout; i++) {
2129 tmp = RREG32(scratch);
2130 if (tmp == 0xDEADBEEF)
2131 break;
2132 DRM_UDELAY(1);
2133 }
2134 if (i < rdev->usec_timeout) {
2135 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2136 } else {
2137 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2138 ring->idx, scratch, tmp);
2139 r = -EINVAL;
2140 }
2141 radeon_scratch_free(rdev, scratch);
2142 return r;
2143}
2144
2145/**
2146 * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
2147 *
2148 * @rdev: radeon_device pointer
2149 * @fence: radeon fence object
2150 *
2151 * Emits a fence sequnce number on the gfx ring and flushes
2152 * GPU caches.
2153 */
2154void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
2155 struct radeon_fence *fence)
2156{
2157 struct radeon_ring *ring = &rdev->ring[fence->ring];
2158 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2159
2160 /* EVENT_WRITE_EOP - flush caches, send int */
2161 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2162 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
2163 EOP_TC_ACTION_EN |
2164 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2165 EVENT_INDEX(5)));
2166 radeon_ring_write(ring, addr & 0xfffffffc);
2167 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
2168 radeon_ring_write(ring, fence->seq);
2169 radeon_ring_write(ring, 0);
2170 /* HDP flush */
2171 /* We should be using the new WAIT_REG_MEM special op packet here
2172 * but it causes the CP to hang
2173 */
2174 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2175 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2176 WRITE_DATA_DST_SEL(0)));
2177 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
2178 radeon_ring_write(ring, 0);
2179 radeon_ring_write(ring, 0);
2180}
2181
2182/**
2183 * cik_fence_compute_ring_emit - emit a fence on the compute ring
2184 *
2185 * @rdev: radeon_device pointer
2186 * @fence: radeon fence object
2187 *
2188 * Emits a fence sequnce number on the compute ring and flushes
2189 * GPU caches.
2190 */
2191void cik_fence_compute_ring_emit(struct radeon_device *rdev,
2192 struct radeon_fence *fence)
2193{
2194 struct radeon_ring *ring = &rdev->ring[fence->ring];
2195 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2196
2197 /* RELEASE_MEM - flush caches, send int */
2198 radeon_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
2199 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
2200 EOP_TC_ACTION_EN |
2201 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2202 EVENT_INDEX(5)));
2203 radeon_ring_write(ring, DATA_SEL(1) | INT_SEL(2));
2204 radeon_ring_write(ring, addr & 0xfffffffc);
2205 radeon_ring_write(ring, upper_32_bits(addr));
2206 radeon_ring_write(ring, fence->seq);
2207 radeon_ring_write(ring, 0);
2208 /* HDP flush */
2209 /* We should be using the new WAIT_REG_MEM special op packet here
2210 * but it causes the CP to hang
2211 */
2212 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2213 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2214 WRITE_DATA_DST_SEL(0)));
2215 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
2216 radeon_ring_write(ring, 0);
2217 radeon_ring_write(ring, 0);
2218}
2219
2220void cik_semaphore_ring_emit(struct radeon_device *rdev,
2221 struct radeon_ring *ring,
2222 struct radeon_semaphore *semaphore,
2223 bool emit_wait)
2224{
2225 uint64_t addr = semaphore->gpu_addr;
2226 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2227
2228 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2229 radeon_ring_write(ring, addr & 0xffffffff);
2230 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
2231}
2232
2233/*
2234 * IB stuff
2235 */
2236/**
2237 * cik_ring_ib_execute - emit an IB (Indirect Buffer) on the gfx ring
2238 *
2239 * @rdev: radeon_device pointer
2240 * @ib: radeon indirect buffer object
2241 *
2242 * Emits an DE (drawing engine) or CE (constant engine) IB
2243 * on the gfx ring. IBs are usually generated by userspace
2244 * acceleration drivers and submitted to the kernel for
2245 * sheduling on the ring. This function schedules the IB
2246 * on the gfx ring for execution by the GPU.
2247 */
2248void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2249{
2250 struct radeon_ring *ring = &rdev->ring[ib->ring];
2251 u32 header, control = INDIRECT_BUFFER_VALID;
2252
2253 if (ib->is_const_ib) {
2254 /* set switch buffer packet before const IB */
2255 radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2256 radeon_ring_write(ring, 0);
2257
2258 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
2259 } else {
2260 u32 next_rptr;
2261 if (ring->rptr_save_reg) {
2262 next_rptr = ring->wptr + 3 + 4;
2263 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2264 radeon_ring_write(ring, ((ring->rptr_save_reg -
2265 PACKET3_SET_UCONFIG_REG_START) >> 2));
2266 radeon_ring_write(ring, next_rptr);
2267 } else if (rdev->wb.enabled) {
2268 next_rptr = ring->wptr + 5 + 4;
2269 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2270 radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
2271 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2272 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
2273 radeon_ring_write(ring, next_rptr);
2274 }
2275
2276 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2277 }
2278
2279 control |= ib->length_dw |
2280 (ib->vm ? (ib->vm->id << 24) : 0);
2281
2282 radeon_ring_write(ring, header);
2283 radeon_ring_write(ring,
2284#ifdef __BIG_ENDIAN
2285 (2 << 0) |
2286#endif
2287 (ib->gpu_addr & 0xFFFFFFFC));
2288 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2289 radeon_ring_write(ring, control);
2290}
2291
2292/**
2293 * cik_ib_test - basic gfx ring IB test
2294 *
2295 * @rdev: radeon_device pointer
2296 * @ring: radeon_ring structure holding ring information
2297 *
2298 * Allocate an IB and execute it on the gfx ring (CIK).
2299 * Provides a basic gfx ring test to verify that IBs are working.
2300 * Returns 0 on success, error on failure.
2301 */
2302int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2303{
2304 struct radeon_ib ib;
2305 uint32_t scratch;
2306 uint32_t tmp = 0;
2307 unsigned i;
2308 int r;
2309
2310 r = radeon_scratch_get(rdev, &scratch);
2311 if (r) {
2312 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2313 return r;
2314 }
2315 WREG32(scratch, 0xCAFEDEAD);
2316 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
2317 if (r) {
2318 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2319 return r;
2320 }
2321 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
2322 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
2323 ib.ptr[2] = 0xDEADBEEF;
2324 ib.length_dw = 3;
2325 r = radeon_ib_schedule(rdev, &ib, NULL);
2326 if (r) {
2327 radeon_scratch_free(rdev, scratch);
2328 radeon_ib_free(rdev, &ib);
2329 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2330 return r;
2331 }
2332 r = radeon_fence_wait(ib.fence, false);
2333 if (r) {
2334 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2335 return r;
2336 }
2337 for (i = 0; i < rdev->usec_timeout; i++) {
2338 tmp = RREG32(scratch);
2339 if (tmp == 0xDEADBEEF)
2340 break;
2341 DRM_UDELAY(1);
2342 }
2343 if (i < rdev->usec_timeout) {
2344 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
2345 } else {
2346 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2347 scratch, tmp);
2348 r = -EINVAL;
2349 }
2350 radeon_scratch_free(rdev, scratch);
2351 radeon_ib_free(rdev, &ib);
2352 return r;
2353}
2354
2355/*
2356 * CP.
2357 * On CIK, gfx and compute now have independant command processors.
2358 *
2359 * GFX
2360 * Gfx consists of a single ring and can process both gfx jobs and
2361 * compute jobs. The gfx CP consists of three microengines (ME):
2362 * PFP - Pre-Fetch Parser
2363 * ME - Micro Engine
2364 * CE - Constant Engine
2365 * The PFP and ME make up what is considered the Drawing Engine (DE).
2366 * The CE is an asynchronous engine used for updating buffer desciptors
2367 * used by the DE so that they can be loaded into cache in parallel
2368 * while the DE is processing state update packets.
2369 *
2370 * Compute
2371 * The compute CP consists of two microengines (ME):
2372 * MEC1 - Compute MicroEngine 1
2373 * MEC2 - Compute MicroEngine 2
2374 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
2375 * The queues are exposed to userspace and are programmed directly
2376 * by the compute runtime.
2377 */
2378/**
2379 * cik_cp_gfx_enable - enable/disable the gfx CP MEs
2380 *
2381 * @rdev: radeon_device pointer
2382 * @enable: enable or disable the MEs
2383 *
2384 * Halts or unhalts the gfx MEs.
2385 */
2386static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
2387{
2388 if (enable)
2389 WREG32(CP_ME_CNTL, 0);
2390 else {
2391 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
2392 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2393 }
2394 udelay(50);
2395}
2396
2397/**
2398 * cik_cp_gfx_load_microcode - load the gfx CP ME ucode
2399 *
2400 * @rdev: radeon_device pointer
2401 *
2402 * Loads the gfx PFP, ME, and CE ucode.
2403 * Returns 0 for success, -EINVAL if the ucode is not available.
2404 */
2405static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
2406{
2407 const __be32 *fw_data;
2408 int i;
2409
2410 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
2411 return -EINVAL;
2412
2413 cik_cp_gfx_enable(rdev, false);
2414
2415 /* PFP */
2416 fw_data = (const __be32 *)rdev->pfp_fw->data;
2417 WREG32(CP_PFP_UCODE_ADDR, 0);
2418 for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
2419 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
2420 WREG32(CP_PFP_UCODE_ADDR, 0);
2421
2422 /* CE */
2423 fw_data = (const __be32 *)rdev->ce_fw->data;
2424 WREG32(CP_CE_UCODE_ADDR, 0);
2425 for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
2426 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
2427 WREG32(CP_CE_UCODE_ADDR, 0);
2428
2429 /* ME */
2430 fw_data = (const __be32 *)rdev->me_fw->data;
2431 WREG32(CP_ME_RAM_WADDR, 0);
2432 for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
2433 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
2434 WREG32(CP_ME_RAM_WADDR, 0);
2435
2436 WREG32(CP_PFP_UCODE_ADDR, 0);
2437 WREG32(CP_CE_UCODE_ADDR, 0);
2438 WREG32(CP_ME_RAM_WADDR, 0);
2439 WREG32(CP_ME_RAM_RADDR, 0);
2440 return 0;
2441}
2442
2443/**
2444 * cik_cp_gfx_start - start the gfx ring
2445 *
2446 * @rdev: radeon_device pointer
2447 *
2448 * Enables the ring and loads the clear state context and other
2449 * packets required to init the ring.
2450 * Returns 0 for success, error for failure.
2451 */
2452static int cik_cp_gfx_start(struct radeon_device *rdev)
2453{
2454 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2455 int r, i;
2456
2457 /* init the CP */
2458 WREG32(CP_MAX_CONTEXT, rdev->config.cik.max_hw_contexts - 1);
2459 WREG32(CP_ENDIAN_SWAP, 0);
2460 WREG32(CP_DEVICE_ID, 1);
2461
2462 cik_cp_gfx_enable(rdev, true);
2463
2464 r = radeon_ring_lock(rdev, ring, cik_default_size + 17);
2465 if (r) {
2466 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2467 return r;
2468 }
2469
2470 /* init the CE partitions. CE only used for gfx on CIK */
2471 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2472 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2473 radeon_ring_write(ring, 0xc000);
2474 radeon_ring_write(ring, 0xc000);
2475
2476 /* setup clear context state */
2477 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2478 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2479
2480 radeon_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2481 radeon_ring_write(ring, 0x80000000);
2482 radeon_ring_write(ring, 0x80000000);
2483
2484 for (i = 0; i < cik_default_size; i++)
2485 radeon_ring_write(ring, cik_default_state[i]);
2486
2487 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2488 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2489
2490 /* set clear context state */
2491 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2492 radeon_ring_write(ring, 0);
2493
2494 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2495 radeon_ring_write(ring, 0x00000316);
2496 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2497 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
2498
2499 radeon_ring_unlock_commit(rdev, ring);
2500
2501 return 0;
2502}
2503
2504/**
2505 * cik_cp_gfx_fini - stop the gfx ring
2506 *
2507 * @rdev: radeon_device pointer
2508 *
2509 * Stop the gfx ring and tear down the driver ring
2510 * info.
2511 */
2512static void cik_cp_gfx_fini(struct radeon_device *rdev)
2513{
2514 cik_cp_gfx_enable(rdev, false);
2515 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
2516}
2517
2518/**
2519 * cik_cp_gfx_resume - setup the gfx ring buffer registers
2520 *
2521 * @rdev: radeon_device pointer
2522 *
2523 * Program the location and size of the gfx ring buffer
2524 * and test it to make sure it's working.
2525 * Returns 0 for success, error for failure.
2526 */
2527static int cik_cp_gfx_resume(struct radeon_device *rdev)
2528{
2529 struct radeon_ring *ring;
2530 u32 tmp;
2531 u32 rb_bufsz;
2532 u64 rb_addr;
2533 int r;
2534
2535 WREG32(CP_SEM_WAIT_TIMER, 0x0);
2536 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
2537
2538 /* Set the write pointer delay */
2539 WREG32(CP_RB_WPTR_DELAY, 0);
2540
2541 /* set the RB to use vmid 0 */
2542 WREG32(CP_RB_VMID, 0);
2543
2544 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2545
2546 /* ring 0 - compute and gfx */
2547 /* Set ring buffer size */
2548 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2549 rb_bufsz = drm_order(ring->ring_size / 8);
2550 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2551#ifdef __BIG_ENDIAN
2552 tmp |= BUF_SWAP_32BIT;
2553#endif
2554 WREG32(CP_RB0_CNTL, tmp);
2555
2556 /* Initialize the ring buffer's read and write pointers */
2557 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
2558 ring->wptr = 0;
2559 WREG32(CP_RB0_WPTR, ring->wptr);
2560
2561 /* set the wb address wether it's enabled or not */
2562 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
2563 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2564
2565 /* scratch register shadowing is no longer supported */
2566 WREG32(SCRATCH_UMSK, 0);
2567
2568 if (!rdev->wb.enabled)
2569 tmp |= RB_NO_UPDATE;
2570
2571 mdelay(1);
2572 WREG32(CP_RB0_CNTL, tmp);
2573
2574 rb_addr = ring->gpu_addr >> 8;
2575 WREG32(CP_RB0_BASE, rb_addr);
2576 WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
2577
2578 ring->rptr = RREG32(CP_RB0_RPTR);
2579
2580 /* start the ring */
2581 cik_cp_gfx_start(rdev);
2582 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
2583 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
2584 if (r) {
2585 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2586 return r;
2587 }
2588 return 0;
2589}
2590
2591u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
2592 struct radeon_ring *ring)
2593{
2594 u32 rptr;
2595
2596
2597
2598 if (rdev->wb.enabled) {
2599 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
2600 } else {
2601 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
2602 rptr = RREG32(CP_HQD_PQ_RPTR);
2603 cik_srbm_select(rdev, 0, 0, 0, 0);
2604 }
2605 rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
2606
2607 return rptr;
2608}
2609
2610u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
2611 struct radeon_ring *ring)
2612{
2613 u32 wptr;
2614
2615 if (rdev->wb.enabled) {
2616 wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
2617 } else {
2618 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
2619 wptr = RREG32(CP_HQD_PQ_WPTR);
2620 cik_srbm_select(rdev, 0, 0, 0, 0);
2621 }
2622 wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
2623
2624 return wptr;
2625}
2626
2627void cik_compute_ring_set_wptr(struct radeon_device *rdev,
2628 struct radeon_ring *ring)
2629{
2630 u32 wptr = (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask;
2631
2632 rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(wptr);
2633 WDOORBELL32(ring->doorbell_offset, wptr);
2634}
2635
2636/**
2637 * cik_cp_compute_enable - enable/disable the compute CP MEs
2638 *
2639 * @rdev: radeon_device pointer
2640 * @enable: enable or disable the MEs
2641 *
2642 * Halts or unhalts the compute MEs.
2643 */
2644static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
2645{
2646 if (enable)
2647 WREG32(CP_MEC_CNTL, 0);
2648 else
2649 WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
2650 udelay(50);
2651}
2652
2653/**
2654 * cik_cp_compute_load_microcode - load the compute CP ME ucode
2655 *
2656 * @rdev: radeon_device pointer
2657 *
2658 * Loads the compute MEC1&2 ucode.
2659 * Returns 0 for success, -EINVAL if the ucode is not available.
2660 */
2661static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
2662{
2663 const __be32 *fw_data;
2664 int i;
2665
2666 if (!rdev->mec_fw)
2667 return -EINVAL;
2668
2669 cik_cp_compute_enable(rdev, false);
2670
2671 /* MEC1 */
2672 fw_data = (const __be32 *)rdev->mec_fw->data;
2673 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
2674 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
2675 WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
2676 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
2677
2678 if (rdev->family == CHIP_KAVERI) {
2679 /* MEC2 */
2680 fw_data = (const __be32 *)rdev->mec_fw->data;
2681 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
2682 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
2683 WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
2684 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
2685 }
2686
2687 return 0;
2688}
2689
2690/**
2691 * cik_cp_compute_start - start the compute queues
2692 *
2693 * @rdev: radeon_device pointer
2694 *
2695 * Enable the compute queues.
2696 * Returns 0 for success, error for failure.
2697 */
2698static int cik_cp_compute_start(struct radeon_device *rdev)
2699{
2700 cik_cp_compute_enable(rdev, true);
2701
2702 return 0;
2703}
2704
2705/**
2706 * cik_cp_compute_fini - stop the compute queues
2707 *
2708 * @rdev: radeon_device pointer
2709 *
2710 * Stop the compute queues and tear down the driver queue
2711 * info.
2712 */
2713static void cik_cp_compute_fini(struct radeon_device *rdev)
2714{
2715 int i, idx, r;
2716
2717 cik_cp_compute_enable(rdev, false);
2718
2719 for (i = 0; i < 2; i++) {
2720 if (i == 0)
2721 idx = CAYMAN_RING_TYPE_CP1_INDEX;
2722 else
2723 idx = CAYMAN_RING_TYPE_CP2_INDEX;
2724
2725 if (rdev->ring[idx].mqd_obj) {
2726 r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
2727 if (unlikely(r != 0))
2728 dev_warn(rdev->dev, "(%d) reserve MQD bo failed\n", r);
2729
2730 radeon_bo_unpin(rdev->ring[idx].mqd_obj);
2731 radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
2732
2733 radeon_bo_unref(&rdev->ring[idx].mqd_obj);
2734 rdev->ring[idx].mqd_obj = NULL;
2735 }
2736 }
2737}
2738
2739static void cik_mec_fini(struct radeon_device *rdev)
2740{
2741 int r;
2742
2743 if (rdev->mec.hpd_eop_obj) {
2744 r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
2745 if (unlikely(r != 0))
2746 dev_warn(rdev->dev, "(%d) reserve HPD EOP bo failed\n", r);
2747 radeon_bo_unpin(rdev->mec.hpd_eop_obj);
2748 radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
2749
2750 radeon_bo_unref(&rdev->mec.hpd_eop_obj);
2751 rdev->mec.hpd_eop_obj = NULL;
2752 }
2753}
2754
2755#define MEC_HPD_SIZE 2048
2756
2757static int cik_mec_init(struct radeon_device *rdev)
2758{
2759 int r;
2760 u32 *hpd;
2761
2762 /*
2763 * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
2764 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
2765 */
2766 if (rdev->family == CHIP_KAVERI)
2767 rdev->mec.num_mec = 2;
2768 else
2769 rdev->mec.num_mec = 1;
2770 rdev->mec.num_pipe = 4;
2771 rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
2772
2773 if (rdev->mec.hpd_eop_obj == NULL) {
2774 r = radeon_bo_create(rdev,
2775 rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
2776 PAGE_SIZE, true,
2777 RADEON_GEM_DOMAIN_GTT, NULL,
2778 &rdev->mec.hpd_eop_obj);
2779 if (r) {
2780 dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
2781 return r;
2782 }
2783 }
2784
2785 r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
2786 if (unlikely(r != 0)) {
2787 cik_mec_fini(rdev);
2788 return r;
2789 }
2790 r = radeon_bo_pin(rdev->mec.hpd_eop_obj, RADEON_GEM_DOMAIN_GTT,
2791 &rdev->mec.hpd_eop_gpu_addr);
2792 if (r) {
2793 dev_warn(rdev->dev, "(%d) pin HDP EOP bo failed\n", r);
2794 cik_mec_fini(rdev);
2795 return r;
2796 }
2797 r = radeon_bo_kmap(rdev->mec.hpd_eop_obj, (void **)&hpd);
2798 if (r) {
2799 dev_warn(rdev->dev, "(%d) map HDP EOP bo failed\n", r);
2800 cik_mec_fini(rdev);
2801 return r;
2802 }
2803
2804 /* clear memory. Not sure if this is required or not */
2805 memset(hpd, 0, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2);
2806
2807 radeon_bo_kunmap(rdev->mec.hpd_eop_obj);
2808 radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
2809
2810 return 0;
2811}
2812
2813struct hqd_registers
2814{
2815 u32 cp_mqd_base_addr;
2816 u32 cp_mqd_base_addr_hi;
2817 u32 cp_hqd_active;
2818 u32 cp_hqd_vmid;
2819 u32 cp_hqd_persistent_state;
2820 u32 cp_hqd_pipe_priority;
2821 u32 cp_hqd_queue_priority;
2822 u32 cp_hqd_quantum;
2823 u32 cp_hqd_pq_base;
2824 u32 cp_hqd_pq_base_hi;
2825 u32 cp_hqd_pq_rptr;
2826 u32 cp_hqd_pq_rptr_report_addr;
2827 u32 cp_hqd_pq_rptr_report_addr_hi;
2828 u32 cp_hqd_pq_wptr_poll_addr;
2829 u32 cp_hqd_pq_wptr_poll_addr_hi;
2830 u32 cp_hqd_pq_doorbell_control;
2831 u32 cp_hqd_pq_wptr;
2832 u32 cp_hqd_pq_control;
2833 u32 cp_hqd_ib_base_addr;
2834 u32 cp_hqd_ib_base_addr_hi;
2835 u32 cp_hqd_ib_rptr;
2836 u32 cp_hqd_ib_control;
2837 u32 cp_hqd_iq_timer;
2838 u32 cp_hqd_iq_rptr;
2839 u32 cp_hqd_dequeue_request;
2840 u32 cp_hqd_dma_offload;
2841 u32 cp_hqd_sema_cmd;
2842 u32 cp_hqd_msg_type;
2843 u32 cp_hqd_atomic0_preop_lo;
2844 u32 cp_hqd_atomic0_preop_hi;
2845 u32 cp_hqd_atomic1_preop_lo;
2846 u32 cp_hqd_atomic1_preop_hi;
2847 u32 cp_hqd_hq_scheduler0;
2848 u32 cp_hqd_hq_scheduler1;
2849 u32 cp_mqd_control;
2850};
2851
2852struct bonaire_mqd
2853{
2854 u32 header;
2855 u32 dispatch_initiator;
2856 u32 dimensions[3];
2857 u32 start_idx[3];
2858 u32 num_threads[3];
2859 u32 pipeline_stat_enable;
2860 u32 perf_counter_enable;
2861 u32 pgm[2];
2862 u32 tba[2];
2863 u32 tma[2];
2864 u32 pgm_rsrc[2];
2865 u32 vmid;
2866 u32 resource_limits;
2867 u32 static_thread_mgmt01[2];
2868 u32 tmp_ring_size;
2869 u32 static_thread_mgmt23[2];
2870 u32 restart[3];
2871 u32 thread_trace_enable;
2872 u32 reserved1;
2873 u32 user_data[16];
2874 u32 vgtcs_invoke_count[2];
2875 struct hqd_registers queue_state;
2876 u32 dequeue_cntr;
2877 u32 interrupt_queue[64];
2878};
2879
2880/**
2881 * cik_cp_compute_resume - setup the compute queue registers
2882 *
2883 * @rdev: radeon_device pointer
2884 *
2885 * Program the compute queues and test them to make sure they
2886 * are working.
2887 * Returns 0 for success, error for failure.
2888 */
2889static int cik_cp_compute_resume(struct radeon_device *rdev)
2890{
2891 int r, i, idx;
2892 u32 tmp;
2893 bool use_doorbell = true;
2894 u64 hqd_gpu_addr;
2895 u64 mqd_gpu_addr;
2896 u64 eop_gpu_addr;
2897 u64 wb_gpu_addr;
2898 u32 *buf;
2899 struct bonaire_mqd *mqd;
2900
2901 r = cik_cp_compute_start(rdev);
2902 if (r)
2903 return r;
2904
2905 /* fix up chicken bits */
2906 tmp = RREG32(CP_CPF_DEBUG);
2907 tmp |= (1 << 23);
2908 WREG32(CP_CPF_DEBUG, tmp);
2909
2910 /* init the pipes */
2911 for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
2912 int me = (i < 4) ? 1 : 2;
2913 int pipe = (i < 4) ? i : (i - 4);
2914
2915 eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
2916
2917 cik_srbm_select(rdev, me, pipe, 0, 0);
2918
2919 /* write the EOP addr */
2920 WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
2921 WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
2922
2923 /* set the VMID assigned */
2924 WREG32(CP_HPD_EOP_VMID, 0);
2925
2926 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2927 tmp = RREG32(CP_HPD_EOP_CONTROL);
2928 tmp &= ~EOP_SIZE_MASK;
2929 tmp |= drm_order(MEC_HPD_SIZE / 8);
2930 WREG32(CP_HPD_EOP_CONTROL, tmp);
2931 }
2932 cik_srbm_select(rdev, 0, 0, 0, 0);
2933
2934 /* init the queues. Just two for now. */
2935 for (i = 0; i < 2; i++) {
2936 if (i == 0)
2937 idx = CAYMAN_RING_TYPE_CP1_INDEX;
2938 else
2939 idx = CAYMAN_RING_TYPE_CP2_INDEX;
2940
2941 if (rdev->ring[idx].mqd_obj == NULL) {
2942 r = radeon_bo_create(rdev,
2943 sizeof(struct bonaire_mqd),
2944 PAGE_SIZE, true,
2945 RADEON_GEM_DOMAIN_GTT, NULL,
2946 &rdev->ring[idx].mqd_obj);
2947 if (r) {
2948 dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
2949 return r;
2950 }
2951 }
2952
2953 r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
2954 if (unlikely(r != 0)) {
2955 cik_cp_compute_fini(rdev);
2956 return r;
2957 }
2958 r = radeon_bo_pin(rdev->ring[idx].mqd_obj, RADEON_GEM_DOMAIN_GTT,
2959 &mqd_gpu_addr);
2960 if (r) {
2961 dev_warn(rdev->dev, "(%d) pin MQD bo failed\n", r);
2962 cik_cp_compute_fini(rdev);
2963 return r;
2964 }
2965 r = radeon_bo_kmap(rdev->ring[idx].mqd_obj, (void **)&buf);
2966 if (r) {
2967 dev_warn(rdev->dev, "(%d) map MQD bo failed\n", r);
2968 cik_cp_compute_fini(rdev);
2969 return r;
2970 }
2971
2972 /* doorbell offset */
2973 rdev->ring[idx].doorbell_offset =
2974 (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
2975
2976 /* init the mqd struct */
2977 memset(buf, 0, sizeof(struct bonaire_mqd));
2978
2979 mqd = (struct bonaire_mqd *)buf;
2980 mqd->header = 0xC0310800;
2981 mqd->static_thread_mgmt01[0] = 0xffffffff;
2982 mqd->static_thread_mgmt01[1] = 0xffffffff;
2983 mqd->static_thread_mgmt23[0] = 0xffffffff;
2984 mqd->static_thread_mgmt23[1] = 0xffffffff;
2985
2986 cik_srbm_select(rdev, rdev->ring[idx].me,
2987 rdev->ring[idx].pipe,
2988 rdev->ring[idx].queue, 0);
2989
2990 /* disable wptr polling */
2991 tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
2992 tmp &= ~WPTR_POLL_EN;
2993 WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
2994
2995 /* enable doorbell? */
2996 mqd->queue_state.cp_hqd_pq_doorbell_control =
2997 RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
2998 if (use_doorbell)
2999 mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
3000 else
3001 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_EN;
3002 WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
3003 mqd->queue_state.cp_hqd_pq_doorbell_control);
3004
3005 /* disable the queue if it's active */
3006 mqd->queue_state.cp_hqd_dequeue_request = 0;
3007 mqd->queue_state.cp_hqd_pq_rptr = 0;
3008 mqd->queue_state.cp_hqd_pq_wptr= 0;
3009 if (RREG32(CP_HQD_ACTIVE) & 1) {
3010 WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
3011 for (i = 0; i < rdev->usec_timeout; i++) {
3012 if (!(RREG32(CP_HQD_ACTIVE) & 1))
3013 break;
3014 udelay(1);
3015 }
3016 WREG32(CP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
3017 WREG32(CP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
3018 WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
3019 }
3020
3021 /* set the pointer to the MQD */
3022 mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
3023 mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
3024 WREG32(CP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
3025 WREG32(CP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
3026 /* set MQD vmid to 0 */
3027 mqd->queue_state.cp_mqd_control = RREG32(CP_MQD_CONTROL);
3028 mqd->queue_state.cp_mqd_control &= ~MQD_VMID_MASK;
3029 WREG32(CP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
3030
3031 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3032 hqd_gpu_addr = rdev->ring[idx].gpu_addr >> 8;
3033 mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
3034 mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3035 WREG32(CP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
3036 WREG32(CP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
3037
3038 /* set up the HQD, this is similar to CP_RB0_CNTL */
3039 mqd->queue_state.cp_hqd_pq_control = RREG32(CP_HQD_PQ_CONTROL);
3040 mqd->queue_state.cp_hqd_pq_control &=
3041 ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
3042
3043 mqd->queue_state.cp_hqd_pq_control |=
3044 drm_order(rdev->ring[idx].ring_size / 8);
3045 mqd->queue_state.cp_hqd_pq_control |=
3046 (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8);
3047#ifdef __BIG_ENDIAN
3048 mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
3049#endif
3050 mqd->queue_state.cp_hqd_pq_control &=
3051 ~(UNORD_DISPATCH | ROQ_PQ_IB_FLIP | PQ_VOLATILE);
3052 mqd->queue_state.cp_hqd_pq_control |=
3053 PRIV_STATE | KMD_QUEUE; /* assuming kernel queue control */
3054 WREG32(CP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
3055
3056 /* only used if CP_PQ_WPTR_POLL_CNTL.WPTR_POLL_EN=1 */
3057 if (i == 0)
3058 wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP1_WPTR_OFFSET;
3059 else
3060 wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP2_WPTR_OFFSET;
3061 mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
3062 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3063 WREG32(CP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
3064 WREG32(CP_HQD_PQ_WPTR_POLL_ADDR_HI,
3065 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
3066
3067 /* set the wb address wether it's enabled or not */
3068 if (i == 0)
3069 wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET;
3070 else
3071 wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET;
3072 mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
3073 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
3074 upper_32_bits(wb_gpu_addr) & 0xffff;
3075 WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR,
3076 mqd->queue_state.cp_hqd_pq_rptr_report_addr);
3077 WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3078 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
3079
3080 /* enable the doorbell if requested */
3081 if (use_doorbell) {
3082 mqd->queue_state.cp_hqd_pq_doorbell_control =
3083 RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
3084 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
3085 mqd->queue_state.cp_hqd_pq_doorbell_control |=
3086 DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4);
3087 mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
3088 mqd->queue_state.cp_hqd_pq_doorbell_control &=
3089 ~(DOORBELL_SOURCE | DOORBELL_HIT);
3090
3091 } else {
3092 mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
3093 }
3094 WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
3095 mqd->queue_state.cp_hqd_pq_doorbell_control);
3096
3097 /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3098 rdev->ring[idx].wptr = 0;
3099 mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr;
3100 WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
3101 rdev->ring[idx].rptr = RREG32(CP_HQD_PQ_RPTR);
3102 mqd->queue_state.cp_hqd_pq_rptr = rdev->ring[idx].rptr;
3103
3104 /* set the vmid for the queue */
3105 mqd->queue_state.cp_hqd_vmid = 0;
3106 WREG32(CP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
3107
3108 /* activate the queue */
3109 mqd->queue_state.cp_hqd_active = 1;
3110 WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
3111
3112 cik_srbm_select(rdev, 0, 0, 0, 0);
3113
3114 radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
3115 radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
3116
3117 rdev->ring[idx].ready = true;
3118 r = radeon_ring_test(rdev, idx, &rdev->ring[idx]);
3119 if (r)
3120 rdev->ring[idx].ready = false;
3121 }
3122
3123 return 0;
3124}
3125
3126static void cik_cp_enable(struct radeon_device *rdev, bool enable)
3127{
3128 cik_cp_gfx_enable(rdev, enable);
3129 cik_cp_compute_enable(rdev, enable);
3130}
3131
3132static int cik_cp_load_microcode(struct radeon_device *rdev)
3133{
3134 int r;
3135
3136 r = cik_cp_gfx_load_microcode(rdev);
3137 if (r)
3138 return r;
3139 r = cik_cp_compute_load_microcode(rdev);
3140 if (r)
3141 return r;
3142
3143 return 0;
3144}
3145
3146static void cik_cp_fini(struct radeon_device *rdev)
3147{
3148 cik_cp_gfx_fini(rdev);
3149 cik_cp_compute_fini(rdev);
3150}
3151
3152static int cik_cp_resume(struct radeon_device *rdev)
3153{
3154 int r;
3155
3156 /* Reset all cp blocks */
3157 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
3158 RREG32(GRBM_SOFT_RESET);
3159 mdelay(15);
3160 WREG32(GRBM_SOFT_RESET, 0);
3161 RREG32(GRBM_SOFT_RESET);
3162
3163 r = cik_cp_load_microcode(rdev);
3164 if (r)
3165 return r;
3166
3167 r = cik_cp_gfx_resume(rdev);
3168 if (r)
3169 return r;
3170 r = cik_cp_compute_resume(rdev);
3171 if (r)
3172 return r;
3173
3174 return 0;
3175}
3176
3177/*
3178 * sDMA - System DMA
3179 * Starting with CIK, the GPU has new asynchronous
3180 * DMA engines. These engines are used for compute
3181 * and gfx. There are two DMA engines (SDMA0, SDMA1)
3182 * and each one supports 1 ring buffer used for gfx
3183 * and 2 queues used for compute.
3184 *
3185 * The programming model is very similar to the CP
3186 * (ring buffer, IBs, etc.), but sDMA has it's own
3187 * packet format that is different from the PM4 format
3188 * used by the CP. sDMA supports copying data, writing
3189 * embedded data, solid fills, and a number of other
3190 * things. It also has support for tiling/detiling of
3191 * buffers.
3192 */
3193/**
3194 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
3195 *
3196 * @rdev: radeon_device pointer
3197 * @ib: IB object to schedule
3198 *
3199 * Schedule an IB in the DMA ring (CIK).
3200 */
3201void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
3202 struct radeon_ib *ib)
3203{
3204 struct radeon_ring *ring = &rdev->ring[ib->ring];
3205 u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
3206
3207 if (rdev->wb.enabled) {
3208 u32 next_rptr = ring->wptr + 5;
3209 while ((next_rptr & 7) != 4)
3210 next_rptr++;
3211 next_rptr += 4;
3212 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
3213 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3214 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
3215 radeon_ring_write(ring, 1); /* number of DWs to follow */
3216 radeon_ring_write(ring, next_rptr);
3217 }
3218
3219 /* IB packet must end on a 8 DW boundary */
3220 while ((ring->wptr & 7) != 4)
3221 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
3222 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
3223 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
3224 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
3225 radeon_ring_write(ring, ib->length_dw);
3226
3227}
3228
3229/**
3230 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
3231 *
3232 * @rdev: radeon_device pointer
3233 * @fence: radeon fence object
3234 *
3235 * Add a DMA fence packet to the ring to write
3236 * the fence seq number and DMA trap packet to generate
3237 * an interrupt if needed (CIK).
3238 */
3239void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
3240 struct radeon_fence *fence)
3241{
3242 struct radeon_ring *ring = &rdev->ring[fence->ring];
3243 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3244 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
3245 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
3246 u32 ref_and_mask;
3247
3248 if (fence->ring == R600_RING_TYPE_DMA_INDEX)
3249 ref_and_mask = SDMA0;
3250 else
3251 ref_and_mask = SDMA1;
3252
3253 /* write the fence */
3254 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
3255 radeon_ring_write(ring, addr & 0xffffffff);
3256 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3257 radeon_ring_write(ring, fence->seq);
3258 /* generate an interrupt */
3259 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
3260 /* flush HDP */
3261 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
3262 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
3263 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
3264 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
3265 radeon_ring_write(ring, ref_and_mask); /* MASK */
3266 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
3267}
3268
3269/**
3270 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
3271 *
3272 * @rdev: radeon_device pointer
3273 * @ring: radeon_ring structure holding ring information
3274 * @semaphore: radeon semaphore object
3275 * @emit_wait: wait or signal semaphore
3276 *
3277 * Add a DMA semaphore packet to the ring wait on or signal
3278 * other rings (CIK).
3279 */
3280void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
3281 struct radeon_ring *ring,
3282 struct radeon_semaphore *semaphore,
3283 bool emit_wait)
3284{
3285 u64 addr = semaphore->gpu_addr;
3286 u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
3287
3288 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
3289 radeon_ring_write(ring, addr & 0xfffffff8);
3290 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3291}
3292
3293/**
3294 * cik_sdma_gfx_stop - stop the gfx async dma engines
3295 *
3296 * @rdev: radeon_device pointer
3297 *
3298 * Stop the gfx async dma ring buffers (CIK).
3299 */
3300static void cik_sdma_gfx_stop(struct radeon_device *rdev)
3301{
3302 u32 rb_cntl, reg_offset;
3303 int i;
3304
3305 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
3306
3307 for (i = 0; i < 2; i++) {
3308 if (i == 0)
3309 reg_offset = SDMA0_REGISTER_OFFSET;
3310 else
3311 reg_offset = SDMA1_REGISTER_OFFSET;
3312 rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
3313 rb_cntl &= ~SDMA_RB_ENABLE;
3314 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
3315 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
3316 }
3317}
3318
3319/**
3320 * cik_sdma_rlc_stop - stop the compute async dma engines
3321 *
3322 * @rdev: radeon_device pointer
3323 *
3324 * Stop the compute async dma queues (CIK).
3325 */
3326static void cik_sdma_rlc_stop(struct radeon_device *rdev)
3327{
3328 /* XXX todo */
3329}
3330
3331/**
3332 * cik_sdma_enable - stop the async dma engines
3333 *
3334 * @rdev: radeon_device pointer
3335 * @enable: enable/disable the DMA MEs.
3336 *
3337 * Halt or unhalt the async dma engines (CIK).
3338 */
3339static void cik_sdma_enable(struct radeon_device *rdev, bool enable)
3340{
3341 u32 me_cntl, reg_offset;
3342 int i;
3343
3344 for (i = 0; i < 2; i++) {
3345 if (i == 0)
3346 reg_offset = SDMA0_REGISTER_OFFSET;
3347 else
3348 reg_offset = SDMA1_REGISTER_OFFSET;
3349 me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
3350 if (enable)
3351 me_cntl &= ~SDMA_HALT;
3352 else
3353 me_cntl |= SDMA_HALT;
3354 WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
3355 }
3356}
3357
3358/**
3359 * cik_sdma_gfx_resume - setup and start the async dma engines
3360 *
3361 * @rdev: radeon_device pointer
3362 *
3363 * Set up the gfx DMA ring buffers and enable them (CIK).
3364 * Returns 0 for success, error for failure.
3365 */
3366static int cik_sdma_gfx_resume(struct radeon_device *rdev)
3367{
3368 struct radeon_ring *ring;
3369 u32 rb_cntl, ib_cntl;
3370 u32 rb_bufsz;
3371 u32 reg_offset, wb_offset;
3372 int i, r;
3373
3374 for (i = 0; i < 2; i++) {
3375 if (i == 0) {
3376 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3377 reg_offset = SDMA0_REGISTER_OFFSET;
3378 wb_offset = R600_WB_DMA_RPTR_OFFSET;
3379 } else {
3380 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
3381 reg_offset = SDMA1_REGISTER_OFFSET;
3382 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
3383 }
3384
3385 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
3386 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
3387
3388 /* Set ring buffer size in dwords */
3389 rb_bufsz = drm_order(ring->ring_size / 4);
3390 rb_cntl = rb_bufsz << 1;
3391#ifdef __BIG_ENDIAN
3392 rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
3393#endif
3394 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
3395
3396 /* Initialize the ring buffer's read and write pointers */
3397 WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
3398 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
3399
3400 /* set the wb address whether it's enabled or not */
3401 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
3402 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
3403 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
3404 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
3405
3406 if (rdev->wb.enabled)
3407 rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
3408
3409 WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
3410 WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
3411
3412 ring->wptr = 0;
3413 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
3414
3415 ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
3416
3417 /* enable DMA RB */
3418 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
3419
3420 ib_cntl = SDMA_IB_ENABLE;
3421#ifdef __BIG_ENDIAN
3422 ib_cntl |= SDMA_IB_SWAP_ENABLE;
3423#endif
3424 /* enable DMA IBs */
3425 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
3426
3427 ring->ready = true;
3428
3429 r = radeon_ring_test(rdev, ring->idx, ring);
3430 if (r) {
3431 ring->ready = false;
3432 return r;
3433 }
3434 }
3435
3436 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
3437
3438 return 0;
3439}
3440
3441/**
3442 * cik_sdma_rlc_resume - setup and start the async dma engines
3443 *
3444 * @rdev: radeon_device pointer
3445 *
3446 * Set up the compute DMA queues and enable them (CIK).
3447 * Returns 0 for success, error for failure.
3448 */
3449static int cik_sdma_rlc_resume(struct radeon_device *rdev)
3450{
3451 /* XXX todo */
3452 return 0;
3453}
3454
3455/**
3456 * cik_sdma_load_microcode - load the sDMA ME ucode
3457 *
3458 * @rdev: radeon_device pointer
3459 *
3460 * Loads the sDMA0/1 ucode.
3461 * Returns 0 for success, -EINVAL if the ucode is not available.
3462 */
3463static int cik_sdma_load_microcode(struct radeon_device *rdev)
3464{
3465 const __be32 *fw_data;
3466 int i;
3467
3468 if (!rdev->sdma_fw)
3469 return -EINVAL;
3470
3471 /* stop the gfx rings and rlc compute queues */
3472 cik_sdma_gfx_stop(rdev);
3473 cik_sdma_rlc_stop(rdev);
3474
3475 /* halt the MEs */
3476 cik_sdma_enable(rdev, false);
3477
3478 /* sdma0 */
3479 fw_data = (const __be32 *)rdev->sdma_fw->data;
3480 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
3481 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
3482 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
3483 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
3484
3485 /* sdma1 */
3486 fw_data = (const __be32 *)rdev->sdma_fw->data;
3487 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
3488 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
3489 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
3490 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
3491
3492 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
3493 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
3494 return 0;
3495}
3496
3497/**
3498 * cik_sdma_resume - setup and start the async dma engines
3499 *
3500 * @rdev: radeon_device pointer
3501 *
3502 * Set up the DMA engines and enable them (CIK).
3503 * Returns 0 for success, error for failure.
3504 */
3505static int cik_sdma_resume(struct radeon_device *rdev)
3506{
3507 int r;
3508
3509 /* Reset dma */
3510 WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
3511 RREG32(SRBM_SOFT_RESET);
3512 udelay(50);
3513 WREG32(SRBM_SOFT_RESET, 0);
3514 RREG32(SRBM_SOFT_RESET);
3515
3516 r = cik_sdma_load_microcode(rdev);
3517 if (r)
3518 return r;
3519
3520 /* unhalt the MEs */
3521 cik_sdma_enable(rdev, true);
3522
3523 /* start the gfx rings and rlc compute queues */
3524 r = cik_sdma_gfx_resume(rdev);
3525 if (r)
3526 return r;
3527 r = cik_sdma_rlc_resume(rdev);
3528 if (r)
3529 return r;
3530
3531 return 0;
3532}
3533
3534/**
3535 * cik_sdma_fini - tear down the async dma engines
3536 *
3537 * @rdev: radeon_device pointer
3538 *
3539 * Stop the async dma engines and free the rings (CIK).
3540 */
3541static void cik_sdma_fini(struct radeon_device *rdev)
3542{
3543 /* stop the gfx rings and rlc compute queues */
3544 cik_sdma_gfx_stop(rdev);
3545 cik_sdma_rlc_stop(rdev);
3546 /* halt the MEs */
3547 cik_sdma_enable(rdev, false);
3548 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
3549 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
3550 /* XXX - compute dma queue tear down */
3551}
3552
3553/**
3554 * cik_copy_dma - copy pages using the DMA engine
3555 *
3556 * @rdev: radeon_device pointer
3557 * @src_offset: src GPU address
3558 * @dst_offset: dst GPU address
3559 * @num_gpu_pages: number of GPU pages to xfer
3560 * @fence: radeon fence object
3561 *
3562 * Copy GPU paging using the DMA engine (CIK).
3563 * Used by the radeon ttm implementation to move pages if
3564 * registered as the asic copy callback.
3565 */
3566int cik_copy_dma(struct radeon_device *rdev,
3567 uint64_t src_offset, uint64_t dst_offset,
3568 unsigned num_gpu_pages,
3569 struct radeon_fence **fence)
3570{
3571 struct radeon_semaphore *sem = NULL;
3572 int ring_index = rdev->asic->copy.dma_ring_index;
3573 struct radeon_ring *ring = &rdev->ring[ring_index];
3574 u32 size_in_bytes, cur_size_in_bytes;
3575 int i, num_loops;
3576 int r = 0;
3577
3578 r = radeon_semaphore_create(rdev, &sem);
3579 if (r) {
3580 DRM_ERROR("radeon: moving bo (%d).\n", r);
3581 return r;
3582 }
3583
3584 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3585 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
3586 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
3587 if (r) {
3588 DRM_ERROR("radeon: moving bo (%d).\n", r);
3589 radeon_semaphore_free(rdev, &sem, NULL);
3590 return r;
3591 }
3592
3593 if (radeon_fence_need_sync(*fence, ring->idx)) {
3594 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3595 ring->idx);
3596 radeon_fence_note_sync(*fence, ring->idx);
3597 } else {
3598 radeon_semaphore_free(rdev, &sem, NULL);
3599 }
3600
3601 for (i = 0; i < num_loops; i++) {
3602 cur_size_in_bytes = size_in_bytes;
3603 if (cur_size_in_bytes > 0x1fffff)
3604 cur_size_in_bytes = 0x1fffff;
3605 size_in_bytes -= cur_size_in_bytes;
3606 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
3607 radeon_ring_write(ring, cur_size_in_bytes);
3608 radeon_ring_write(ring, 0); /* src/dst endian swap */
3609 radeon_ring_write(ring, src_offset & 0xffffffff);
3610 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
3611 radeon_ring_write(ring, dst_offset & 0xfffffffc);
3612 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
3613 src_offset += cur_size_in_bytes;
3614 dst_offset += cur_size_in_bytes;
3615 }
3616
3617 r = radeon_fence_emit(rdev, fence, ring->idx);
3618 if (r) {
3619 radeon_ring_unlock_undo(rdev, ring);
3620 return r;
3621 }
3622
3623 radeon_ring_unlock_commit(rdev, ring);
3624 radeon_semaphore_free(rdev, &sem, *fence);
3625
3626 return r;
3627}
3628
3629/**
3630 * cik_sdma_ring_test - simple async dma engine test
3631 *
3632 * @rdev: radeon_device pointer
3633 * @ring: radeon_ring structure holding ring information
3634 *
3635 * Test the DMA engine by writing using it to write an
3636 * value to memory. (CIK).
3637 * Returns 0 for success, error for failure.
3638 */
3639int cik_sdma_ring_test(struct radeon_device *rdev,
3640 struct radeon_ring *ring)
3641{
3642 unsigned i;
3643 int r;
3644 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3645 u32 tmp;
3646
3647 if (!ptr) {
3648 DRM_ERROR("invalid vram scratch pointer\n");
3649 return -EINVAL;
3650 }
3651
3652 tmp = 0xCAFEDEAD;
3653 writel(tmp, ptr);
3654
3655 r = radeon_ring_lock(rdev, ring, 4);
3656 if (r) {
3657 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
3658 return r;
3659 }
3660 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
3661 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
3662 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
3663 radeon_ring_write(ring, 1); /* number of DWs to follow */
3664 radeon_ring_write(ring, 0xDEADBEEF);
3665 radeon_ring_unlock_commit(rdev, ring);
3666
3667 for (i = 0; i < rdev->usec_timeout; i++) {
3668 tmp = readl(ptr);
3669 if (tmp == 0xDEADBEEF)
3670 break;
3671 DRM_UDELAY(1);
3672 }
3673
3674 if (i < rdev->usec_timeout) {
3675 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
3676 } else {
3677 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
3678 ring->idx, tmp);
3679 r = -EINVAL;
3680 }
3681 return r;
3682}
3683
3684/**
3685 * cik_sdma_ib_test - test an IB on the DMA engine
3686 *
3687 * @rdev: radeon_device pointer
3688 * @ring: radeon_ring structure holding ring information
3689 *
3690 * Test a simple IB in the DMA ring (CIK).
3691 * Returns 0 on success, error on failure.
3692 */
3693int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3694{
3695 struct radeon_ib ib;
3696 unsigned i;
3697 int r;
3698 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3699 u32 tmp = 0;
3700
3701 if (!ptr) {
3702 DRM_ERROR("invalid vram scratch pointer\n");
3703 return -EINVAL;
3704 }
3705
3706 tmp = 0xCAFEDEAD;
3707 writel(tmp, ptr);
3708
3709 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3710 if (r) {
3711 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3712 return r;
3713 }
3714
3715 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
3716 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
3717 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
3718 ib.ptr[3] = 1;
3719 ib.ptr[4] = 0xDEADBEEF;
3720 ib.length_dw = 5;
3721
3722 r = radeon_ib_schedule(rdev, &ib, NULL);
3723 if (r) {
3724 radeon_ib_free(rdev, &ib);
3725 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3726 return r;
3727 }
3728 r = radeon_fence_wait(ib.fence, false);
3729 if (r) {
3730 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3731 return r;
3732 }
3733 for (i = 0; i < rdev->usec_timeout; i++) {
3734 tmp = readl(ptr);
3735 if (tmp == 0xDEADBEEF)
3736 break;
3737 DRM_UDELAY(1);
3738 }
3739 if (i < rdev->usec_timeout) {
3740 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3741 } else {
3742 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
3743 r = -EINVAL;
3744 }
3745 radeon_ib_free(rdev, &ib);
3746 return r;
3747}
3748
3749
3750static void cik_print_gpu_status_regs(struct radeon_device *rdev)
3751{
3752 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
3753 RREG32(GRBM_STATUS));
3754 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
3755 RREG32(GRBM_STATUS2));
3756 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
3757 RREG32(GRBM_STATUS_SE0));
3758 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
3759 RREG32(GRBM_STATUS_SE1));
3760 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
3761 RREG32(GRBM_STATUS_SE2));
3762 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
3763 RREG32(GRBM_STATUS_SE3));
3764 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
3765 RREG32(SRBM_STATUS));
3766 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
3767 RREG32(SRBM_STATUS2));
3768 dev_info(rdev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
3769 RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
3770 dev_info(rdev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
3771 RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
3772 dev_info(rdev->dev, " CP_STAT = 0x%08x\n", RREG32(CP_STAT));
3773 dev_info(rdev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
3774 RREG32(CP_STALLED_STAT1));
3775 dev_info(rdev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
3776 RREG32(CP_STALLED_STAT2));
3777 dev_info(rdev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
3778 RREG32(CP_STALLED_STAT3));
3779 dev_info(rdev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
3780 RREG32(CP_CPF_BUSY_STAT));
3781 dev_info(rdev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
3782 RREG32(CP_CPF_STALLED_STAT1));
3783 dev_info(rdev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(CP_CPF_STATUS));
3784 dev_info(rdev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(CP_CPC_BUSY_STAT));
3785 dev_info(rdev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
3786 RREG32(CP_CPC_STALLED_STAT1));
3787 dev_info(rdev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(CP_CPC_STATUS));
3788}
3789
3790/**
3791 * cik_gpu_check_soft_reset - check which blocks are busy
3792 *
3793 * @rdev: radeon_device pointer
3794 *
3795 * Check which blocks are busy and return the relevant reset
3796 * mask to be used by cik_gpu_soft_reset().
3797 * Returns a mask of the blocks to be reset.
3798 */
3799static u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
3800{
3801 u32 reset_mask = 0;
3802 u32 tmp;
3803
3804 /* GRBM_STATUS */
3805 tmp = RREG32(GRBM_STATUS);
3806 if (tmp & (PA_BUSY | SC_BUSY |
3807 BCI_BUSY | SX_BUSY |
3808 TA_BUSY | VGT_BUSY |
3809 DB_BUSY | CB_BUSY |
3810 GDS_BUSY | SPI_BUSY |
3811 IA_BUSY | IA_BUSY_NO_DMA))
3812 reset_mask |= RADEON_RESET_GFX;
3813
3814 if (tmp & (CP_BUSY | CP_COHERENCY_BUSY))
3815 reset_mask |= RADEON_RESET_CP;
3816
3817 /* GRBM_STATUS2 */
3818 tmp = RREG32(GRBM_STATUS2);
3819 if (tmp & RLC_BUSY)
3820 reset_mask |= RADEON_RESET_RLC;
3821
3822 /* SDMA0_STATUS_REG */
3823 tmp = RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
3824 if (!(tmp & SDMA_IDLE))
3825 reset_mask |= RADEON_RESET_DMA;
3826
3827 /* SDMA1_STATUS_REG */
3828 tmp = RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
3829 if (!(tmp & SDMA_IDLE))
3830 reset_mask |= RADEON_RESET_DMA1;
3831
3832 /* SRBM_STATUS2 */
3833 tmp = RREG32(SRBM_STATUS2);
3834 if (tmp & SDMA_BUSY)
3835 reset_mask |= RADEON_RESET_DMA;
3836
3837 if (tmp & SDMA1_BUSY)
3838 reset_mask |= RADEON_RESET_DMA1;
3839
3840 /* SRBM_STATUS */
3841 tmp = RREG32(SRBM_STATUS);
3842
3843 if (tmp & IH_BUSY)
3844 reset_mask |= RADEON_RESET_IH;
3845
3846 if (tmp & SEM_BUSY)
3847 reset_mask |= RADEON_RESET_SEM;
3848
3849 if (tmp & GRBM_RQ_PENDING)
3850 reset_mask |= RADEON_RESET_GRBM;
3851
3852 if (tmp & VMC_BUSY)
3853 reset_mask |= RADEON_RESET_VMC;
3854
3855 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
3856 MCC_BUSY | MCD_BUSY))
3857 reset_mask |= RADEON_RESET_MC;
3858
3859 if (evergreen_is_display_hung(rdev))
3860 reset_mask |= RADEON_RESET_DISPLAY;
3861
3862 /* Skip MC reset as it's mostly likely not hung, just busy */
3863 if (reset_mask & RADEON_RESET_MC) {
3864 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
3865 reset_mask &= ~RADEON_RESET_MC;
3866 }
3867
3868 return reset_mask;
3869}
3870
3871/**
3872 * cik_gpu_soft_reset - soft reset GPU
3873 *
3874 * @rdev: radeon_device pointer
3875 * @reset_mask: mask of which blocks to reset
3876 *
3877 * Soft reset the blocks specified in @reset_mask.
3878 */
3879static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
3880{
3881 struct evergreen_mc_save save;
3882 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
3883 u32 tmp;
3884
3885 if (reset_mask == 0)
3886 return;
3887
3888 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
3889
3890 cik_print_gpu_status_regs(rdev);
3891 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3892 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3893 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3894 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3895
3896 /* stop the rlc */
3897 cik_rlc_stop(rdev);
3898
3899 /* Disable GFX parsing/prefetching */
3900 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
3901
3902 /* Disable MEC parsing/prefetching */
3903 WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
3904
3905 if (reset_mask & RADEON_RESET_DMA) {
3906 /* sdma0 */
3907 tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
3908 tmp |= SDMA_HALT;
3909 WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
3910 }
3911 if (reset_mask & RADEON_RESET_DMA1) {
3912 /* sdma1 */
3913 tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
3914 tmp |= SDMA_HALT;
3915 WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
3916 }
3917
3918 evergreen_mc_stop(rdev, &save);
3919 if (evergreen_mc_wait_for_idle(rdev)) {
3920 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3921 }
3922
3923 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))
3924 grbm_soft_reset = SOFT_RESET_CP | SOFT_RESET_GFX;
3925
3926 if (reset_mask & RADEON_RESET_CP) {
3927 grbm_soft_reset |= SOFT_RESET_CP;
3928
3929 srbm_soft_reset |= SOFT_RESET_GRBM;
3930 }
3931
3932 if (reset_mask & RADEON_RESET_DMA)
3933 srbm_soft_reset |= SOFT_RESET_SDMA;
3934
3935 if (reset_mask & RADEON_RESET_DMA1)
3936 srbm_soft_reset |= SOFT_RESET_SDMA1;
3937
3938 if (reset_mask & RADEON_RESET_DISPLAY)
3939 srbm_soft_reset |= SOFT_RESET_DC;
3940
3941 if (reset_mask & RADEON_RESET_RLC)
3942 grbm_soft_reset |= SOFT_RESET_RLC;
3943
3944 if (reset_mask & RADEON_RESET_SEM)
3945 srbm_soft_reset |= SOFT_RESET_SEM;
3946
3947 if (reset_mask & RADEON_RESET_IH)
3948 srbm_soft_reset |= SOFT_RESET_IH;
3949
3950 if (reset_mask & RADEON_RESET_GRBM)
3951 srbm_soft_reset |= SOFT_RESET_GRBM;
3952
3953 if (reset_mask & RADEON_RESET_VMC)
3954 srbm_soft_reset |= SOFT_RESET_VMC;
3955
3956 if (!(rdev->flags & RADEON_IS_IGP)) {
3957 if (reset_mask & RADEON_RESET_MC)
3958 srbm_soft_reset |= SOFT_RESET_MC;
3959 }
3960
3961 if (grbm_soft_reset) {
3962 tmp = RREG32(GRBM_SOFT_RESET);
3963 tmp |= grbm_soft_reset;
3964 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3965 WREG32(GRBM_SOFT_RESET, tmp);
3966 tmp = RREG32(GRBM_SOFT_RESET);
3967
3968 udelay(50);
3969
3970 tmp &= ~grbm_soft_reset;
3971 WREG32(GRBM_SOFT_RESET, tmp);
3972 tmp = RREG32(GRBM_SOFT_RESET);
3973 }
3974
3975 if (srbm_soft_reset) {
3976 tmp = RREG32(SRBM_SOFT_RESET);
3977 tmp |= srbm_soft_reset;
3978 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3979 WREG32(SRBM_SOFT_RESET, tmp);
3980 tmp = RREG32(SRBM_SOFT_RESET);
3981
3982 udelay(50);
3983
3984 tmp &= ~srbm_soft_reset;
3985 WREG32(SRBM_SOFT_RESET, tmp);
3986 tmp = RREG32(SRBM_SOFT_RESET);
3987 }
3988
3989 /* Wait a little for things to settle down */
3990 udelay(50);
3991
3992 evergreen_mc_resume(rdev, &save);
3993 udelay(50);
3994
3995 cik_print_gpu_status_regs(rdev);
3996}
3997
3998/**
3999 * cik_asic_reset - soft reset GPU
4000 *
4001 * @rdev: radeon_device pointer
4002 *
4003 * Look up which blocks are hung and attempt
4004 * to reset them.
4005 * Returns 0 for success.
4006 */
4007int cik_asic_reset(struct radeon_device *rdev)
4008{
4009 u32 reset_mask;
4010
4011 reset_mask = cik_gpu_check_soft_reset(rdev);
4012
4013 if (reset_mask)
4014 r600_set_bios_scratch_engine_hung(rdev, true);
4015
4016 cik_gpu_soft_reset(rdev, reset_mask);
4017
4018 reset_mask = cik_gpu_check_soft_reset(rdev);
4019
4020 if (!reset_mask)
4021 r600_set_bios_scratch_engine_hung(rdev, false);
4022
4023 return 0;
4024}
4025
4026/**
4027 * cik_gfx_is_lockup - check if the 3D engine is locked up
4028 *
4029 * @rdev: radeon_device pointer
4030 * @ring: radeon_ring structure holding ring information
4031 *
4032 * Check if the 3D engine is locked up (CIK).
4033 * Returns true if the engine is locked, false if not.
4034 */
4035bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4036{
4037 u32 reset_mask = cik_gpu_check_soft_reset(rdev);
4038
4039 if (!(reset_mask & (RADEON_RESET_GFX |
4040 RADEON_RESET_COMPUTE |
4041 RADEON_RESET_CP))) {
4042 radeon_ring_lockup_update(ring);
4043 return false;
4044 }
4045 /* force CP activities */
4046 radeon_ring_force_activity(rdev, ring);
4047 return radeon_ring_test_lockup(rdev, ring);
4048}
4049
4050/**
4051 * cik_sdma_is_lockup - Check if the DMA engine is locked up
4052 *
4053 * @rdev: radeon_device pointer
4054 * @ring: radeon_ring structure holding ring information
4055 *
4056 * Check if the async DMA engine is locked up (CIK).
4057 * Returns true if the engine appears to be locked up, false if not.
4058 */
4059bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4060{
4061 u32 reset_mask = cik_gpu_check_soft_reset(rdev);
4062 u32 mask;
4063
4064 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
4065 mask = RADEON_RESET_DMA;
4066 else
4067 mask = RADEON_RESET_DMA1;
4068
4069 if (!(reset_mask & mask)) {
4070 radeon_ring_lockup_update(ring);
4071 return false;
4072 }
4073 /* force ring activities */
4074 radeon_ring_force_activity(rdev, ring);
4075 return radeon_ring_test_lockup(rdev, ring);
4076}
4077
4078/* MC */
4079/**
4080 * cik_mc_program - program the GPU memory controller
4081 *
4082 * @rdev: radeon_device pointer
4083 *
4084 * Set the location of vram, gart, and AGP in the GPU's
4085 * physical address space (CIK).
4086 */
4087static void cik_mc_program(struct radeon_device *rdev)
4088{
4089 struct evergreen_mc_save save;
4090 u32 tmp;
4091 int i, j;
4092
4093 /* Initialize HDP */
4094 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
4095 WREG32((0x2c14 + j), 0x00000000);
4096 WREG32((0x2c18 + j), 0x00000000);
4097 WREG32((0x2c1c + j), 0x00000000);
4098 WREG32((0x2c20 + j), 0x00000000);
4099 WREG32((0x2c24 + j), 0x00000000);
4100 }
4101 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
4102
4103 evergreen_mc_stop(rdev, &save);
4104 if (radeon_mc_wait_for_idle(rdev)) {
4105 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4106 }
4107 /* Lockout access through VGA aperture*/
4108 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
4109 /* Update configuration */
4110 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
4111 rdev->mc.vram_start >> 12);
4112 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
4113 rdev->mc.vram_end >> 12);
4114 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
4115 rdev->vram_scratch.gpu_addr >> 12);
4116 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
4117 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
4118 WREG32(MC_VM_FB_LOCATION, tmp);
4119 /* XXX double check these! */
4120 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
4121 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
4122 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
4123 WREG32(MC_VM_AGP_BASE, 0);
4124 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
4125 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
4126 if (radeon_mc_wait_for_idle(rdev)) {
4127 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4128 }
4129 evergreen_mc_resume(rdev, &save);
4130 /* we need to own VRAM, so turn off the VGA renderer here
4131 * to stop it overwriting our objects */
4132 rv515_vga_render_disable(rdev);
4133}
4134
4135/**
4136 * cik_mc_init - initialize the memory controller driver params
4137 *
4138 * @rdev: radeon_device pointer
4139 *
4140 * Look up the amount of vram, vram width, and decide how to place
4141 * vram and gart within the GPU's physical address space (CIK).
4142 * Returns 0 for success.
4143 */
4144static int cik_mc_init(struct radeon_device *rdev)
4145{
4146 u32 tmp;
4147 int chansize, numchan;
4148
4149 /* Get VRAM informations */
4150 rdev->mc.vram_is_ddr = true;
4151 tmp = RREG32(MC_ARB_RAMCFG);
4152 if (tmp & CHANSIZE_MASK) {
4153 chansize = 64;
4154 } else {
4155 chansize = 32;
4156 }
4157 tmp = RREG32(MC_SHARED_CHMAP);
4158 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
4159 case 0:
4160 default:
4161 numchan = 1;
4162 break;
4163 case 1:
4164 numchan = 2;
4165 break;
4166 case 2:
4167 numchan = 4;
4168 break;
4169 case 3:
4170 numchan = 8;
4171 break;
4172 case 4:
4173 numchan = 3;
4174 break;
4175 case 5:
4176 numchan = 6;
4177 break;
4178 case 6:
4179 numchan = 10;
4180 break;
4181 case 7:
4182 numchan = 12;
4183 break;
4184 case 8:
4185 numchan = 16;
4186 break;
4187 }
4188 rdev->mc.vram_width = numchan * chansize;
4189 /* Could aper size report 0 ? */
4190 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
4191 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
4192 /* size in MB on si */
4193 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
4194 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
4195 rdev->mc.visible_vram_size = rdev->mc.aper_size;
4196 si_vram_gtt_location(rdev, &rdev->mc);
4197 radeon_update_bandwidth_info(rdev);
4198
4199 return 0;
4200}
4201
4202/*
4203 * GART
4204 * VMID 0 is the physical GPU addresses as used by the kernel.
4205 * VMIDs 1-15 are used for userspace clients and are handled
4206 * by the radeon vm/hsa code.
4207 */
4208/**
4209 * cik_pcie_gart_tlb_flush - gart tlb flush callback
4210 *
4211 * @rdev: radeon_device pointer
4212 *
4213 * Flush the TLB for the VMID 0 page table (CIK).
4214 */
4215void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
4216{
4217 /* flush hdp cache */
4218 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
4219
4220 /* bits 0-15 are the VM contexts0-15 */
4221 WREG32(VM_INVALIDATE_REQUEST, 0x1);
4222}
4223
4224/**
4225 * cik_pcie_gart_enable - gart enable
4226 *
4227 * @rdev: radeon_device pointer
4228 *
4229 * This sets up the TLBs, programs the page tables for VMID0,
4230 * sets up the hw for VMIDs 1-15 which are allocated on
4231 * demand, and sets up the global locations for the LDS, GDS,
4232 * and GPUVM for FSA64 clients (CIK).
4233 * Returns 0 for success, errors for failure.
4234 */
4235static int cik_pcie_gart_enable(struct radeon_device *rdev)
4236{
4237 int r, i;
4238
4239 if (rdev->gart.robj == NULL) {
4240 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
4241 return -EINVAL;
4242 }
4243 r = radeon_gart_table_vram_pin(rdev);
4244 if (r)
4245 return r;
4246 radeon_gart_restore(rdev);
4247 /* Setup TLB control */
4248 WREG32(MC_VM_MX_L1_TLB_CNTL,
4249 (0xA << 7) |
4250 ENABLE_L1_TLB |
4251 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4252 ENABLE_ADVANCED_DRIVER_MODEL |
4253 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4254 /* Setup L2 cache */
4255 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
4256 ENABLE_L2_FRAGMENT_PROCESSING |
4257 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4258 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4259 EFFECTIVE_L2_QUEUE_SIZE(7) |
4260 CONTEXT1_IDENTITY_ACCESS_MODE(1));
4261 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
4262 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4263 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
4264 /* setup context0 */
4265 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
4266 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
4267 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
4268 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
4269 (u32)(rdev->dummy_page.addr >> 12));
4270 WREG32(VM_CONTEXT0_CNTL2, 0);
4271 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
4272 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
4273
4274 WREG32(0x15D4, 0);
4275 WREG32(0x15D8, 0);
4276 WREG32(0x15DC, 0);
4277
4278 /* empty context1-15 */
4279 /* FIXME start with 4G, once using 2 level pt switch to full
4280 * vm size space
4281 */
4282 /* set vm size, must be a multiple of 4 */
4283 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
4284 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
4285 for (i = 1; i < 16; i++) {
4286 if (i < 8)
4287 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
4288 rdev->gart.table_addr >> 12);
4289 else
4290 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
4291 rdev->gart.table_addr >> 12);
4292 }
4293
4294 /* enable context1-15 */
4295 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
4296 (u32)(rdev->dummy_page.addr >> 12));
4297 WREG32(VM_CONTEXT1_CNTL2, 4);
4298 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
4299 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4300 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4301 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4302 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4303 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
4304 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
4305 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
4306 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
4307 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
4308 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
4309 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4310 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
4311
4312 /* TC cache setup ??? */
4313 WREG32(TC_CFG_L1_LOAD_POLICY0, 0);
4314 WREG32(TC_CFG_L1_LOAD_POLICY1, 0);
4315 WREG32(TC_CFG_L1_STORE_POLICY, 0);
4316
4317 WREG32(TC_CFG_L2_LOAD_POLICY0, 0);
4318 WREG32(TC_CFG_L2_LOAD_POLICY1, 0);
4319 WREG32(TC_CFG_L2_STORE_POLICY0, 0);
4320 WREG32(TC_CFG_L2_STORE_POLICY1, 0);
4321 WREG32(TC_CFG_L2_ATOMIC_POLICY, 0);
4322
4323 WREG32(TC_CFG_L1_VOLATILE, 0);
4324 WREG32(TC_CFG_L2_VOLATILE, 0);
4325
4326 if (rdev->family == CHIP_KAVERI) {
4327 u32 tmp = RREG32(CHUB_CONTROL);
4328 tmp &= ~BYPASS_VM;
4329 WREG32(CHUB_CONTROL, tmp);
4330 }
4331
4332 /* XXX SH_MEM regs */
4333 /* where to put LDS, scratch, GPUVM in FSA64 space */
4334 for (i = 0; i < 16; i++) {
4335 cik_srbm_select(rdev, 0, 0, 0, i);
4336 /* CP and shaders */
4337 WREG32(SH_MEM_CONFIG, 0);
4338 WREG32(SH_MEM_APE1_BASE, 1);
4339 WREG32(SH_MEM_APE1_LIMIT, 0);
4340 WREG32(SH_MEM_BASES, 0);
4341 /* SDMA GFX */
4342 WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA0_REGISTER_OFFSET, 0);
4343 WREG32(SDMA0_GFX_APE1_CNTL + SDMA0_REGISTER_OFFSET, 0);
4344 WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA1_REGISTER_OFFSET, 0);
4345 WREG32(SDMA0_GFX_APE1_CNTL + SDMA1_REGISTER_OFFSET, 0);
4346 /* XXX SDMA RLC - todo */
4347 }
4348 cik_srbm_select(rdev, 0, 0, 0, 0);
4349
4350 cik_pcie_gart_tlb_flush(rdev);
4351 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
4352 (unsigned)(rdev->mc.gtt_size >> 20),
4353 (unsigned long long)rdev->gart.table_addr);
4354 rdev->gart.ready = true;
4355 return 0;
4356}
4357
4358/**
4359 * cik_pcie_gart_disable - gart disable
4360 *
4361 * @rdev: radeon_device pointer
4362 *
4363 * This disables all VM page table (CIK).
4364 */
4365static void cik_pcie_gart_disable(struct radeon_device *rdev)
4366{
4367 /* Disable all tables */
4368 WREG32(VM_CONTEXT0_CNTL, 0);
4369 WREG32(VM_CONTEXT1_CNTL, 0);
4370 /* Setup TLB control */
4371 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4372 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4373 /* Setup L2 cache */
4374 WREG32(VM_L2_CNTL,
4375 ENABLE_L2_FRAGMENT_PROCESSING |
4376 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4377 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4378 EFFECTIVE_L2_QUEUE_SIZE(7) |
4379 CONTEXT1_IDENTITY_ACCESS_MODE(1));
4380 WREG32(VM_L2_CNTL2, 0);
4381 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4382 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
4383 radeon_gart_table_vram_unpin(rdev);
4384}
4385
4386/**
4387 * cik_pcie_gart_fini - vm fini callback
4388 *
4389 * @rdev: radeon_device pointer
4390 *
4391 * Tears down the driver GART/VM setup (CIK).
4392 */
4393static void cik_pcie_gart_fini(struct radeon_device *rdev)
4394{
4395 cik_pcie_gart_disable(rdev);
4396 radeon_gart_table_vram_free(rdev);
4397 radeon_gart_fini(rdev);
4398}
4399
4400/* vm parser */
4401/**
4402 * cik_ib_parse - vm ib_parse callback
4403 *
4404 * @rdev: radeon_device pointer
4405 * @ib: indirect buffer pointer
4406 *
4407 * CIK uses hw IB checking so this is a nop (CIK).
4408 */
4409int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4410{
4411 return 0;
4412}
4413
4414/*
4415 * vm
4416 * VMID 0 is the physical GPU addresses as used by the kernel.
4417 * VMIDs 1-15 are used for userspace clients and are handled
4418 * by the radeon vm/hsa code.
4419 */
4420/**
4421 * cik_vm_init - cik vm init callback
4422 *
4423 * @rdev: radeon_device pointer
4424 *
4425 * Inits cik specific vm parameters (number of VMs, base of vram for
4426 * VMIDs 1-15) (CIK).
4427 * Returns 0 for success.
4428 */
4429int cik_vm_init(struct radeon_device *rdev)
4430{
4431 /* number of VMs */
4432 rdev->vm_manager.nvm = 16;
4433 /* base offset of vram pages */
4434 if (rdev->flags & RADEON_IS_IGP) {
4435 u64 tmp = RREG32(MC_VM_FB_OFFSET);
4436 tmp <<= 22;
4437 rdev->vm_manager.vram_base_offset = tmp;
4438 } else
4439 rdev->vm_manager.vram_base_offset = 0;
4440
4441 return 0;
4442}
4443
4444/**
4445 * cik_vm_fini - cik vm fini callback
4446 *
4447 * @rdev: radeon_device pointer
4448 *
4449 * Tear down any asic specific VM setup (CIK).
4450 */
4451void cik_vm_fini(struct radeon_device *rdev)
4452{
4453}
4454
4455/**
4456 * cik_vm_flush - cik vm flush using the CP
4457 *
4458 * @rdev: radeon_device pointer
4459 *
4460 * Update the page table base and flush the VM TLB
4461 * using the CP (CIK).
4462 */
4463void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4464{
4465 struct radeon_ring *ring = &rdev->ring[ridx];
4466
4467 if (vm == NULL)
4468 return;
4469
4470 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4471 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4472 WRITE_DATA_DST_SEL(0)));
4473 if (vm->id < 8) {
4474 radeon_ring_write(ring,
4475 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
4476 } else {
4477 radeon_ring_write(ring,
4478 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
4479 }
4480 radeon_ring_write(ring, 0);
4481 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
4482
4483 /* update SH_MEM_* regs */
4484 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4485 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4486 WRITE_DATA_DST_SEL(0)));
4487 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
4488 radeon_ring_write(ring, 0);
4489 radeon_ring_write(ring, VMID(vm->id));
4490
4491 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
4492 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4493 WRITE_DATA_DST_SEL(0)));
4494 radeon_ring_write(ring, SH_MEM_BASES >> 2);
4495 radeon_ring_write(ring, 0);
4496
4497 radeon_ring_write(ring, 0); /* SH_MEM_BASES */
4498 radeon_ring_write(ring, 0); /* SH_MEM_CONFIG */
4499 radeon_ring_write(ring, 1); /* SH_MEM_APE1_BASE */
4500 radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
4501
4502 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4503 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4504 WRITE_DATA_DST_SEL(0)));
4505 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
4506 radeon_ring_write(ring, 0);
4507 radeon_ring_write(ring, VMID(0));
4508
4509 /* HDP flush */
4510 /* We should be using the WAIT_REG_MEM packet here like in
4511 * cik_fence_ring_emit(), but it causes the CP to hang in this
4512 * context...
4513 */
4514 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4515 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4516 WRITE_DATA_DST_SEL(0)));
4517 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
4518 radeon_ring_write(ring, 0);
4519 radeon_ring_write(ring, 0);
4520
4521 /* bits 0-15 are the VM contexts0-15 */
4522 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4523 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4524 WRITE_DATA_DST_SEL(0)));
4525 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
4526 radeon_ring_write(ring, 0);
4527 radeon_ring_write(ring, 1 << vm->id);
4528
4529 /* compute doesn't have PFP */
4530 if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
4531 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4532 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4533 radeon_ring_write(ring, 0x0);
4534 }
4535}
4536
4537/**
4538 * cik_vm_set_page - update the page tables using sDMA
4539 *
4540 * @rdev: radeon_device pointer
4541 * @ib: indirect buffer to fill with commands
4542 * @pe: addr of the page entry
4543 * @addr: dst addr to write into pe
4544 * @count: number of page entries to update
4545 * @incr: increase next addr by incr bytes
4546 * @flags: access flags
4547 *
4548 * Update the page tables using CP or sDMA (CIK).
4549 */
4550void cik_vm_set_page(struct radeon_device *rdev,
4551 struct radeon_ib *ib,
4552 uint64_t pe,
4553 uint64_t addr, unsigned count,
4554 uint32_t incr, uint32_t flags)
4555{
4556 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
4557 uint64_t value;
4558 unsigned ndw;
4559
4560 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
4561 /* CP */
4562 while (count) {
4563 ndw = 2 + count * 2;
4564 if (ndw > 0x3FFE)
4565 ndw = 0x3FFE;
4566
4567 ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
4568 ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
4569 WRITE_DATA_DST_SEL(1));
4570 ib->ptr[ib->length_dw++] = pe;
4571 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
4572 for (; ndw > 2; ndw -= 2, --count, pe += 8) {
4573 if (flags & RADEON_VM_PAGE_SYSTEM) {
4574 value = radeon_vm_map_gart(rdev, addr);
4575 value &= 0xFFFFFFFFFFFFF000ULL;
4576 } else if (flags & RADEON_VM_PAGE_VALID) {
4577 value = addr;
4578 } else {
4579 value = 0;
4580 }
4581 addr += incr;
4582 value |= r600_flags;
4583 ib->ptr[ib->length_dw++] = value;
4584 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4585 }
4586 }
4587 } else {
4588 /* DMA */
4589 if (flags & RADEON_VM_PAGE_SYSTEM) {
4590 while (count) {
4591 ndw = count * 2;
4592 if (ndw > 0xFFFFE)
4593 ndw = 0xFFFFE;
4594
4595 /* for non-physically contiguous pages (system) */
4596 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
4597 ib->ptr[ib->length_dw++] = pe;
4598 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
4599 ib->ptr[ib->length_dw++] = ndw;
4600 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
4601 if (flags & RADEON_VM_PAGE_SYSTEM) {
4602 value = radeon_vm_map_gart(rdev, addr);
4603 value &= 0xFFFFFFFFFFFFF000ULL;
4604 } else if (flags & RADEON_VM_PAGE_VALID) {
4605 value = addr;
4606 } else {
4607 value = 0;
4608 }
4609 addr += incr;
4610 value |= r600_flags;
4611 ib->ptr[ib->length_dw++] = value;
4612 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4613 }
4614 }
4615 } else {
4616 while (count) {
4617 ndw = count;
4618 if (ndw > 0x7FFFF)
4619 ndw = 0x7FFFF;
4620
4621 if (flags & RADEON_VM_PAGE_VALID)
4622 value = addr;
4623 else
4624 value = 0;
4625 /* for physically contiguous pages (vram) */
4626 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
4627 ib->ptr[ib->length_dw++] = pe; /* dst addr */
4628 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
4629 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
4630 ib->ptr[ib->length_dw++] = 0;
4631 ib->ptr[ib->length_dw++] = value; /* value */
4632 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4633 ib->ptr[ib->length_dw++] = incr; /* increment size */
4634 ib->ptr[ib->length_dw++] = 0;
4635 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
4636 pe += ndw * 8;
4637 addr += ndw * incr;
4638 count -= ndw;
4639 }
4640 }
4641 while (ib->length_dw & 0x7)
4642 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
4643 }
4644}
4645
4646/**
4647 * cik_dma_vm_flush - cik vm flush using sDMA
4648 *
4649 * @rdev: radeon_device pointer
4650 *
4651 * Update the page table base and flush the VM TLB
4652 * using sDMA (CIK).
4653 */
4654void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4655{
4656 struct radeon_ring *ring = &rdev->ring[ridx];
4657 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
4658 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
4659 u32 ref_and_mask;
4660
4661 if (vm == NULL)
4662 return;
4663
4664 if (ridx == R600_RING_TYPE_DMA_INDEX)
4665 ref_and_mask = SDMA0;
4666 else
4667 ref_and_mask = SDMA1;
4668
4669 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4670 if (vm->id < 8) {
4671 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
4672 } else {
4673 radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
4674 }
4675 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
4676
4677 /* update SH_MEM_* regs */
4678 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4679 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
4680 radeon_ring_write(ring, VMID(vm->id));
4681
4682 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4683 radeon_ring_write(ring, SH_MEM_BASES >> 2);
4684 radeon_ring_write(ring, 0);
4685
4686 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4687 radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
4688 radeon_ring_write(ring, 0);
4689
4690 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4691 radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
4692 radeon_ring_write(ring, 1);
4693
4694 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4695 radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
4696 radeon_ring_write(ring, 0);
4697
4698 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4699 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
4700 radeon_ring_write(ring, VMID(0));
4701
4702 /* flush HDP */
4703 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
4704 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
4705 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
4706 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
4707 radeon_ring_write(ring, ref_and_mask); /* MASK */
4708 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
4709
4710 /* flush TLB */
4711 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
4712 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
4713 radeon_ring_write(ring, 1 << vm->id);
4714}
4715
4716/*
4717 * RLC
4718 * The RLC is a multi-purpose microengine that handles a
4719 * variety of functions, the most important of which is
4720 * the interrupt controller.
4721 */
4722/**
4723 * cik_rlc_stop - stop the RLC ME
4724 *
4725 * @rdev: radeon_device pointer
4726 *
4727 * Halt the RLC ME (MicroEngine) (CIK).
4728 */
4729static void cik_rlc_stop(struct radeon_device *rdev)
4730{
4731 int i, j, k;
4732 u32 mask, tmp;
4733
4734 tmp = RREG32(CP_INT_CNTL_RING0);
4735 tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4736 WREG32(CP_INT_CNTL_RING0, tmp);
4737
4738 RREG32(CB_CGTT_SCLK_CTRL);
4739 RREG32(CB_CGTT_SCLK_CTRL);
4740 RREG32(CB_CGTT_SCLK_CTRL);
4741 RREG32(CB_CGTT_SCLK_CTRL);
4742
4743 tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
4744 WREG32(RLC_CGCG_CGLS_CTRL, tmp);
4745
4746 WREG32(RLC_CNTL, 0);
4747
4748 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
4749 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
4750 cik_select_se_sh(rdev, i, j);
4751 for (k = 0; k < rdev->usec_timeout; k++) {
4752 if (RREG32(RLC_SERDES_CU_MASTER_BUSY) == 0)
4753 break;
4754 udelay(1);
4755 }
4756 }
4757 }
4758 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
4759
4760 mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
4761 for (k = 0; k < rdev->usec_timeout; k++) {
4762 if ((RREG32(RLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
4763 break;
4764 udelay(1);
4765 }
4766}
4767
4768/**
4769 * cik_rlc_start - start the RLC ME
4770 *
4771 * @rdev: radeon_device pointer
4772 *
4773 * Unhalt the RLC ME (MicroEngine) (CIK).
4774 */
4775static void cik_rlc_start(struct radeon_device *rdev)
4776{
4777 u32 tmp;
4778
4779 WREG32(RLC_CNTL, RLC_ENABLE);
4780
4781 tmp = RREG32(CP_INT_CNTL_RING0);
4782 tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4783 WREG32(CP_INT_CNTL_RING0, tmp);
4784
4785 udelay(50);
4786}
4787
4788/**
4789 * cik_rlc_resume - setup the RLC hw
4790 *
4791 * @rdev: radeon_device pointer
4792 *
4793 * Initialize the RLC registers, load the ucode,
4794 * and start the RLC (CIK).
4795 * Returns 0 for success, -EINVAL if the ucode is not available.
4796 */
4797static int cik_rlc_resume(struct radeon_device *rdev)
4798{
4799 u32 i, size;
4800 u32 clear_state_info[3];
4801 const __be32 *fw_data;
4802
4803 if (!rdev->rlc_fw)
4804 return -EINVAL;
4805
4806 switch (rdev->family) {
4807 case CHIP_BONAIRE:
4808 default:
4809 size = BONAIRE_RLC_UCODE_SIZE;
4810 break;
4811 case CHIP_KAVERI:
4812 size = KV_RLC_UCODE_SIZE;
4813 break;
4814 case CHIP_KABINI:
4815 size = KB_RLC_UCODE_SIZE;
4816 break;
4817 }
4818
4819 cik_rlc_stop(rdev);
4820
4821 WREG32(GRBM_SOFT_RESET, SOFT_RESET_RLC);
4822 RREG32(GRBM_SOFT_RESET);
4823 udelay(50);
4824 WREG32(GRBM_SOFT_RESET, 0);
4825 RREG32(GRBM_SOFT_RESET);
4826 udelay(50);
4827
4828 WREG32(RLC_LB_CNTR_INIT, 0);
4829 WREG32(RLC_LB_CNTR_MAX, 0x00008000);
4830
4831 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
4832 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
4833 WREG32(RLC_LB_PARAMS, 0x00600408);
4834 WREG32(RLC_LB_CNTL, 0x80000004);
4835
4836 WREG32(RLC_MC_CNTL, 0);
4837 WREG32(RLC_UCODE_CNTL, 0);
4838
4839 fw_data = (const __be32 *)rdev->rlc_fw->data;
4840 WREG32(RLC_GPM_UCODE_ADDR, 0);
4841 for (i = 0; i < size; i++)
4842 WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
4843 WREG32(RLC_GPM_UCODE_ADDR, 0);
4844
4845 /* XXX */
4846 clear_state_info[0] = 0;//upper_32_bits(rdev->rlc.save_restore_gpu_addr);
4847 clear_state_info[1] = 0;//rdev->rlc.save_restore_gpu_addr;
4848 clear_state_info[2] = 0;//cik_default_size;
4849 WREG32(RLC_GPM_SCRATCH_ADDR, 0x3d);
4850 for (i = 0; i < 3; i++)
4851 WREG32(RLC_GPM_SCRATCH_DATA, clear_state_info[i]);
4852 WREG32(RLC_DRIVER_DMA_STATUS, 0);
4853
4854 cik_rlc_start(rdev);
4855
4856 return 0;
4857}
4858
4859/*
4860 * Interrupts
4861 * Starting with r6xx, interrupts are handled via a ring buffer.
4862 * Ring buffers are areas of GPU accessible memory that the GPU
4863 * writes interrupt vectors into and the host reads vectors out of.
4864 * There is a rptr (read pointer) that determines where the
4865 * host is currently reading, and a wptr (write pointer)
4866 * which determines where the GPU has written. When the
4867 * pointers are equal, the ring is idle. When the GPU
4868 * writes vectors to the ring buffer, it increments the
4869 * wptr. When there is an interrupt, the host then starts
4870 * fetching commands and processing them until the pointers are
4871 * equal again at which point it updates the rptr.
4872 */
4873
4874/**
4875 * cik_enable_interrupts - Enable the interrupt ring buffer
4876 *
4877 * @rdev: radeon_device pointer
4878 *
4879 * Enable the interrupt ring buffer (CIK).
4880 */
4881static void cik_enable_interrupts(struct radeon_device *rdev)
4882{
4883 u32 ih_cntl = RREG32(IH_CNTL);
4884 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
4885
4886 ih_cntl |= ENABLE_INTR;
4887 ih_rb_cntl |= IH_RB_ENABLE;
4888 WREG32(IH_CNTL, ih_cntl);
4889 WREG32(IH_RB_CNTL, ih_rb_cntl);
4890 rdev->ih.enabled = true;
4891}
4892
4893/**
4894 * cik_disable_interrupts - Disable the interrupt ring buffer
4895 *
4896 * @rdev: radeon_device pointer
4897 *
4898 * Disable the interrupt ring buffer (CIK).
4899 */
4900static void cik_disable_interrupts(struct radeon_device *rdev)
4901{
4902 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
4903 u32 ih_cntl = RREG32(IH_CNTL);
4904
4905 ih_rb_cntl &= ~IH_RB_ENABLE;
4906 ih_cntl &= ~ENABLE_INTR;
4907 WREG32(IH_RB_CNTL, ih_rb_cntl);
4908 WREG32(IH_CNTL, ih_cntl);
4909 /* set rptr, wptr to 0 */
4910 WREG32(IH_RB_RPTR, 0);
4911 WREG32(IH_RB_WPTR, 0);
4912 rdev->ih.enabled = false;
4913 rdev->ih.rptr = 0;
4914}
4915
4916/**
4917 * cik_disable_interrupt_state - Disable all interrupt sources
4918 *
4919 * @rdev: radeon_device pointer
4920 *
4921 * Clear all interrupt enable bits used by the driver (CIK).
4922 */
4923static void cik_disable_interrupt_state(struct radeon_device *rdev)
4924{
4925 u32 tmp;
4926
4927 /* gfx ring */
4928 WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4929 /* sdma */
4930 tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
4931 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
4932 tmp = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
4933 WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, tmp);
4934 /* compute queues */
4935 WREG32(CP_ME1_PIPE0_INT_CNTL, 0);
4936 WREG32(CP_ME1_PIPE1_INT_CNTL, 0);
4937 WREG32(CP_ME1_PIPE2_INT_CNTL, 0);
4938 WREG32(CP_ME1_PIPE3_INT_CNTL, 0);
4939 WREG32(CP_ME2_PIPE0_INT_CNTL, 0);
4940 WREG32(CP_ME2_PIPE1_INT_CNTL, 0);
4941 WREG32(CP_ME2_PIPE2_INT_CNTL, 0);
4942 WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
4943 /* grbm */
4944 WREG32(GRBM_INT_CNTL, 0);
4945 /* vline/vblank, etc. */
4946 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
4947 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
4948 if (rdev->num_crtc >= 4) {
4949 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
4950 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
4951 }
4952 if (rdev->num_crtc >= 6) {
4953 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
4954 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
4955 }
4956
4957 /* dac hotplug */
4958 WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
4959
4960 /* digital hotplug */
4961 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4962 WREG32(DC_HPD1_INT_CONTROL, tmp);
4963 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4964 WREG32(DC_HPD2_INT_CONTROL, tmp);
4965 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4966 WREG32(DC_HPD3_INT_CONTROL, tmp);
4967 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4968 WREG32(DC_HPD4_INT_CONTROL, tmp);
4969 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4970 WREG32(DC_HPD5_INT_CONTROL, tmp);
4971 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4972 WREG32(DC_HPD6_INT_CONTROL, tmp);
4973
4974}
4975
4976/**
4977 * cik_irq_init - init and enable the interrupt ring
4978 *
4979 * @rdev: radeon_device pointer
4980 *
4981 * Allocate a ring buffer for the interrupt controller,
4982 * enable the RLC, disable interrupts, enable the IH
4983 * ring buffer and enable it (CIK).
4984 * Called at device load and reume.
4985 * Returns 0 for success, errors for failure.
4986 */
4987static int cik_irq_init(struct radeon_device *rdev)
4988{
4989 int ret = 0;
4990 int rb_bufsz;
4991 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
4992
4993 /* allocate ring */
4994 ret = r600_ih_ring_alloc(rdev);
4995 if (ret)
4996 return ret;
4997
4998 /* disable irqs */
4999 cik_disable_interrupts(rdev);
5000
5001 /* init rlc */
5002 ret = cik_rlc_resume(rdev);
5003 if (ret) {
5004 r600_ih_ring_fini(rdev);
5005 return ret;
5006 }
5007
5008 /* setup interrupt control */
5009 /* XXX this should actually be a bus address, not an MC address. same on older asics */
5010 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
5011 interrupt_cntl = RREG32(INTERRUPT_CNTL);
5012 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
5013 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
5014 */
5015 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
5016 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
5017 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
5018 WREG32(INTERRUPT_CNTL, interrupt_cntl);
5019
5020 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
5021 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
5022
5023 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
5024 IH_WPTR_OVERFLOW_CLEAR |
5025 (rb_bufsz << 1));
5026
5027 if (rdev->wb.enabled)
5028 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
5029
5030 /* set the writeback address whether it's enabled or not */
5031 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
5032 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
5033
5034 WREG32(IH_RB_CNTL, ih_rb_cntl);
5035
5036 /* set rptr, wptr to 0 */
5037 WREG32(IH_RB_RPTR, 0);
5038 WREG32(IH_RB_WPTR, 0);
5039
5040 /* Default settings for IH_CNTL (disabled at first) */
5041 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
5042 /* RPTR_REARM only works if msi's are enabled */
5043 if (rdev->msi_enabled)
5044 ih_cntl |= RPTR_REARM;
5045 WREG32(IH_CNTL, ih_cntl);
5046
5047 /* force the active interrupt state to all disabled */
5048 cik_disable_interrupt_state(rdev);
5049
5050 pci_set_master(rdev->pdev);
5051
5052 /* enable irqs */
5053 cik_enable_interrupts(rdev);
5054
5055 return ret;
5056}
5057
5058/**
5059 * cik_irq_set - enable/disable interrupt sources
5060 *
5061 * @rdev: radeon_device pointer
5062 *
5063 * Enable interrupt sources on the GPU (vblanks, hpd,
5064 * etc.) (CIK).
5065 * Returns 0 for success, errors for failure.
5066 */
5067int cik_irq_set(struct radeon_device *rdev)
5068{
5069 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE |
5070 PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
5071 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
5072 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
5073 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
5074 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
5075 u32 grbm_int_cntl = 0;
5076 u32 dma_cntl, dma_cntl1;
5077
5078 if (!rdev->irq.installed) {
5079 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
5080 return -EINVAL;
5081 }
5082 /* don't enable anything if the ih is disabled */
5083 if (!rdev->ih.enabled) {
5084 cik_disable_interrupts(rdev);
5085 /* force the active interrupt state to all disabled */
5086 cik_disable_interrupt_state(rdev);
5087 return 0;
5088 }
5089
5090 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
5091 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
5092 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
5093 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
5094 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
5095 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
5096
5097 dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
5098 dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
5099
5100 cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5101 cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5102 cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5103 cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5104 cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5105 cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5106 cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5107 cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
5108
5109 /* enable CP interrupts on all rings */
5110 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
5111 DRM_DEBUG("cik_irq_set: sw int gfx\n");
5112 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
5113 }
5114 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
5115 struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
5116 DRM_DEBUG("si_irq_set: sw int cp1\n");
5117 if (ring->me == 1) {
5118 switch (ring->pipe) {
5119 case 0:
5120 cp_m1p0 |= TIME_STAMP_INT_ENABLE;
5121 break;
5122 case 1:
5123 cp_m1p1 |= TIME_STAMP_INT_ENABLE;
5124 break;
5125 case 2:
5126 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
5127 break;
5128 case 3:
5129 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
5130 break;
5131 default:
5132 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
5133 break;
5134 }
5135 } else if (ring->me == 2) {
5136 switch (ring->pipe) {
5137 case 0:
5138 cp_m2p0 |= TIME_STAMP_INT_ENABLE;
5139 break;
5140 case 1:
5141 cp_m2p1 |= TIME_STAMP_INT_ENABLE;
5142 break;
5143 case 2:
5144 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
5145 break;
5146 case 3:
5147 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
5148 break;
5149 default:
5150 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
5151 break;
5152 }
5153 } else {
5154 DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me);
5155 }
5156 }
5157 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
5158 struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
5159 DRM_DEBUG("si_irq_set: sw int cp2\n");
5160 if (ring->me == 1) {
5161 switch (ring->pipe) {
5162 case 0:
5163 cp_m1p0 |= TIME_STAMP_INT_ENABLE;
5164 break;
5165 case 1:
5166 cp_m1p1 |= TIME_STAMP_INT_ENABLE;
5167 break;
5168 case 2:
5169 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
5170 break;
5171 case 3:
5172 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
5173 break;
5174 default:
5175 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
5176 break;
5177 }
5178 } else if (ring->me == 2) {
5179 switch (ring->pipe) {
5180 case 0:
5181 cp_m2p0 |= TIME_STAMP_INT_ENABLE;
5182 break;
5183 case 1:
5184 cp_m2p1 |= TIME_STAMP_INT_ENABLE;
5185 break;
5186 case 2:
5187 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
5188 break;
5189 case 3:
5190 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
5191 break;
5192 default:
5193 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
5194 break;
5195 }
5196 } else {
5197 DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me);
5198 }
5199 }
5200
5201 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
5202 DRM_DEBUG("cik_irq_set: sw int dma\n");
5203 dma_cntl |= TRAP_ENABLE;
5204 }
5205
5206 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
5207 DRM_DEBUG("cik_irq_set: sw int dma1\n");
5208 dma_cntl1 |= TRAP_ENABLE;
5209 }
5210
5211 if (rdev->irq.crtc_vblank_int[0] ||
5212 atomic_read(&rdev->irq.pflip[0])) {
5213 DRM_DEBUG("cik_irq_set: vblank 0\n");
5214 crtc1 |= VBLANK_INTERRUPT_MASK;
5215 }
5216 if (rdev->irq.crtc_vblank_int[1] ||
5217 atomic_read(&rdev->irq.pflip[1])) {
5218 DRM_DEBUG("cik_irq_set: vblank 1\n");
5219 crtc2 |= VBLANK_INTERRUPT_MASK;
5220 }
5221 if (rdev->irq.crtc_vblank_int[2] ||
5222 atomic_read(&rdev->irq.pflip[2])) {
5223 DRM_DEBUG("cik_irq_set: vblank 2\n");
5224 crtc3 |= VBLANK_INTERRUPT_MASK;
5225 }
5226 if (rdev->irq.crtc_vblank_int[3] ||
5227 atomic_read(&rdev->irq.pflip[3])) {
5228 DRM_DEBUG("cik_irq_set: vblank 3\n");
5229 crtc4 |= VBLANK_INTERRUPT_MASK;
5230 }
5231 if (rdev->irq.crtc_vblank_int[4] ||
5232 atomic_read(&rdev->irq.pflip[4])) {
5233 DRM_DEBUG("cik_irq_set: vblank 4\n");
5234 crtc5 |= VBLANK_INTERRUPT_MASK;
5235 }
5236 if (rdev->irq.crtc_vblank_int[5] ||
5237 atomic_read(&rdev->irq.pflip[5])) {
5238 DRM_DEBUG("cik_irq_set: vblank 5\n");
5239 crtc6 |= VBLANK_INTERRUPT_MASK;
5240 }
5241 if (rdev->irq.hpd[0]) {
5242 DRM_DEBUG("cik_irq_set: hpd 1\n");
5243 hpd1 |= DC_HPDx_INT_EN;
5244 }
5245 if (rdev->irq.hpd[1]) {
5246 DRM_DEBUG("cik_irq_set: hpd 2\n");
5247 hpd2 |= DC_HPDx_INT_EN;
5248 }
5249 if (rdev->irq.hpd[2]) {
5250 DRM_DEBUG("cik_irq_set: hpd 3\n");
5251 hpd3 |= DC_HPDx_INT_EN;
5252 }
5253 if (rdev->irq.hpd[3]) {
5254 DRM_DEBUG("cik_irq_set: hpd 4\n");
5255 hpd4 |= DC_HPDx_INT_EN;
5256 }
5257 if (rdev->irq.hpd[4]) {
5258 DRM_DEBUG("cik_irq_set: hpd 5\n");
5259 hpd5 |= DC_HPDx_INT_EN;
5260 }
5261 if (rdev->irq.hpd[5]) {
5262 DRM_DEBUG("cik_irq_set: hpd 6\n");
5263 hpd6 |= DC_HPDx_INT_EN;
5264 }
5265
5266 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
5267
5268 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
5269 WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
5270
5271 WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
5272 WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
5273 WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
5274 WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
5275 WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
5276 WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
5277 WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
5278 WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
5279
5280 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
5281
5282 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
5283 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
5284 if (rdev->num_crtc >= 4) {
5285 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
5286 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
5287 }
5288 if (rdev->num_crtc >= 6) {
5289 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
5290 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
5291 }
5292
5293 WREG32(DC_HPD1_INT_CONTROL, hpd1);
5294 WREG32(DC_HPD2_INT_CONTROL, hpd2);
5295 WREG32(DC_HPD3_INT_CONTROL, hpd3);
5296 WREG32(DC_HPD4_INT_CONTROL, hpd4);
5297 WREG32(DC_HPD5_INT_CONTROL, hpd5);
5298 WREG32(DC_HPD6_INT_CONTROL, hpd6);
5299
5300 return 0;
5301}
5302
5303/**
5304 * cik_irq_ack - ack interrupt sources
5305 *
5306 * @rdev: radeon_device pointer
5307 *
5308 * Ack interrupt sources on the GPU (vblanks, hpd,
5309 * etc.) (CIK). Certain interrupts sources are sw
5310 * generated and do not require an explicit ack.
5311 */
5312static inline void cik_irq_ack(struct radeon_device *rdev)
5313{
5314 u32 tmp;
5315
5316 rdev->irq.stat_regs.cik.disp_int = RREG32(DISP_INTERRUPT_STATUS);
5317 rdev->irq.stat_regs.cik.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
5318 rdev->irq.stat_regs.cik.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
5319 rdev->irq.stat_regs.cik.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
5320 rdev->irq.stat_regs.cik.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
5321 rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
5322 rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
5323
5324 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
5325 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
5326 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
5327 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
5328 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
5329 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
5330 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT)
5331 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
5332
5333 if (rdev->num_crtc >= 4) {
5334 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
5335 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
5336 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
5337 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
5338 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
5339 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
5340 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
5341 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
5342 }
5343
5344 if (rdev->num_crtc >= 6) {
5345 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
5346 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
5347 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
5348 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
5349 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
5350 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
5351 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
5352 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
5353 }
5354
5355 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
5356 tmp = RREG32(DC_HPD1_INT_CONTROL);
5357 tmp |= DC_HPDx_INT_ACK;
5358 WREG32(DC_HPD1_INT_CONTROL, tmp);
5359 }
5360 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
5361 tmp = RREG32(DC_HPD2_INT_CONTROL);
5362 tmp |= DC_HPDx_INT_ACK;
5363 WREG32(DC_HPD2_INT_CONTROL, tmp);
5364 }
5365 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
5366 tmp = RREG32(DC_HPD3_INT_CONTROL);
5367 tmp |= DC_HPDx_INT_ACK;
5368 WREG32(DC_HPD3_INT_CONTROL, tmp);
5369 }
5370 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
5371 tmp = RREG32(DC_HPD4_INT_CONTROL);
5372 tmp |= DC_HPDx_INT_ACK;
5373 WREG32(DC_HPD4_INT_CONTROL, tmp);
5374 }
5375 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
5376 tmp = RREG32(DC_HPD5_INT_CONTROL);
5377 tmp |= DC_HPDx_INT_ACK;
5378 WREG32(DC_HPD5_INT_CONTROL, tmp);
5379 }
5380 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
5381 tmp = RREG32(DC_HPD5_INT_CONTROL);
5382 tmp |= DC_HPDx_INT_ACK;
5383 WREG32(DC_HPD6_INT_CONTROL, tmp);
5384 }
5385}
5386
5387/**
5388 * cik_irq_disable - disable interrupts
5389 *
5390 * @rdev: radeon_device pointer
5391 *
5392 * Disable interrupts on the hw (CIK).
5393 */
5394static void cik_irq_disable(struct radeon_device *rdev)
5395{
5396 cik_disable_interrupts(rdev);
5397 /* Wait and acknowledge irq */
5398 mdelay(1);
5399 cik_irq_ack(rdev);
5400 cik_disable_interrupt_state(rdev);
5401}
5402
5403/**
5404 * cik_irq_disable - disable interrupts for suspend
5405 *
5406 * @rdev: radeon_device pointer
5407 *
5408 * Disable interrupts and stop the RLC (CIK).
5409 * Used for suspend.
5410 */
5411static void cik_irq_suspend(struct radeon_device *rdev)
5412{
5413 cik_irq_disable(rdev);
5414 cik_rlc_stop(rdev);
5415}
5416
5417/**
5418 * cik_irq_fini - tear down interrupt support
5419 *
5420 * @rdev: radeon_device pointer
5421 *
5422 * Disable interrupts on the hw and free the IH ring
5423 * buffer (CIK).
5424 * Used for driver unload.
5425 */
5426static void cik_irq_fini(struct radeon_device *rdev)
5427{
5428 cik_irq_suspend(rdev);
5429 r600_ih_ring_fini(rdev);
5430}
5431
5432/**
5433 * cik_get_ih_wptr - get the IH ring buffer wptr
5434 *
5435 * @rdev: radeon_device pointer
5436 *
5437 * Get the IH ring buffer wptr from either the register
5438 * or the writeback memory buffer (CIK). Also check for
5439 * ring buffer overflow and deal with it.
5440 * Used by cik_irq_process().
5441 * Returns the value of the wptr.
5442 */
5443static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
5444{
5445 u32 wptr, tmp;
5446
5447 if (rdev->wb.enabled)
5448 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
5449 else
5450 wptr = RREG32(IH_RB_WPTR);
5451
5452 if (wptr & RB_OVERFLOW) {
5453 /* When a ring buffer overflow happen start parsing interrupt
5454 * from the last not overwritten vector (wptr + 16). Hopefully
5455 * this should allow us to catchup.
5456 */
5457 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
5458 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
5459 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
5460 tmp = RREG32(IH_RB_CNTL);
5461 tmp |= IH_WPTR_OVERFLOW_CLEAR;
5462 WREG32(IH_RB_CNTL, tmp);
5463 }
5464 return (wptr & rdev->ih.ptr_mask);
5465}
5466
5467/* CIK IV Ring
5468 * Each IV ring entry is 128 bits:
5469 * [7:0] - interrupt source id
5470 * [31:8] - reserved
5471 * [59:32] - interrupt source data
5472 * [63:60] - reserved
5473 * [71:64] - RINGID
5474 * CP:
5475 * ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0]
5476 * QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher
5477 * - for gfx, hw shader state (0=PS...5=LS, 6=CS)
5478 * ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes
5479 * PIPE_ID - ME0 0=3D
5480 * - ME1&2 compute dispatcher (4 pipes each)
5481 * SDMA:
5482 * INSTANCE_ID [1:0], QUEUE_ID[1:0]
5483 * INSTANCE_ID - 0 = sdma0, 1 = sdma1
5484 * QUEUE_ID - 0 = gfx, 1 = rlc0, 2 = rlc1
5485 * [79:72] - VMID
5486 * [95:80] - PASID
5487 * [127:96] - reserved
5488 */
5489/**
5490 * cik_irq_process - interrupt handler
5491 *
5492 * @rdev: radeon_device pointer
5493 *
5494 * Interrupt hander (CIK). Walk the IH ring,
5495 * ack interrupts and schedule work to handle
5496 * interrupt events.
5497 * Returns irq process return code.
5498 */
5499int cik_irq_process(struct radeon_device *rdev)
5500{
5501 struct radeon_ring *cp1_ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
5502 struct radeon_ring *cp2_ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
5503 u32 wptr;
5504 u32 rptr;
5505 u32 src_id, src_data, ring_id;
5506 u8 me_id, pipe_id, queue_id;
5507 u32 ring_index;
5508 bool queue_hotplug = false;
5509 bool queue_reset = false;
5510
5511 if (!rdev->ih.enabled || rdev->shutdown)
5512 return IRQ_NONE;
5513
5514 wptr = cik_get_ih_wptr(rdev);
5515
5516restart_ih:
5517 /* is somebody else already processing irqs? */
5518 if (atomic_xchg(&rdev->ih.lock, 1))
5519 return IRQ_NONE;
5520
5521 rptr = rdev->ih.rptr;
5522 DRM_DEBUG("cik_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
5523
5524 /* Order reading of wptr vs. reading of IH ring data */
5525 rmb();
5526
5527 /* display interrupts */
5528 cik_irq_ack(rdev);
5529
5530 while (rptr != wptr) {
5531 /* wptr/rptr are in bytes! */
5532 ring_index = rptr / 4;
5533 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
5534 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
5535 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
5536
5537 switch (src_id) {
5538 case 1: /* D1 vblank/vline */
5539 switch (src_data) {
5540 case 0: /* D1 vblank */
5541 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
5542 if (rdev->irq.crtc_vblank_int[0]) {
5543 drm_handle_vblank(rdev->ddev, 0);
5544 rdev->pm.vblank_sync = true;
5545 wake_up(&rdev->irq.vblank_queue);
5546 }
5547 if (atomic_read(&rdev->irq.pflip[0]))
5548 radeon_crtc_handle_flip(rdev, 0);
5549 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
5550 DRM_DEBUG("IH: D1 vblank\n");
5551 }
5552 break;
5553 case 1: /* D1 vline */
5554 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
5555 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
5556 DRM_DEBUG("IH: D1 vline\n");
5557 }
5558 break;
5559 default:
5560 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5561 break;
5562 }
5563 break;
5564 case 2: /* D2 vblank/vline */
5565 switch (src_data) {
5566 case 0: /* D2 vblank */
5567 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
5568 if (rdev->irq.crtc_vblank_int[1]) {
5569 drm_handle_vblank(rdev->ddev, 1);
5570 rdev->pm.vblank_sync = true;
5571 wake_up(&rdev->irq.vblank_queue);
5572 }
5573 if (atomic_read(&rdev->irq.pflip[1]))
5574 radeon_crtc_handle_flip(rdev, 1);
5575 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
5576 DRM_DEBUG("IH: D2 vblank\n");
5577 }
5578 break;
5579 case 1: /* D2 vline */
5580 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
5581 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
5582 DRM_DEBUG("IH: D2 vline\n");
5583 }
5584 break;
5585 default:
5586 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5587 break;
5588 }
5589 break;
5590 case 3: /* D3 vblank/vline */
5591 switch (src_data) {
5592 case 0: /* D3 vblank */
5593 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
5594 if (rdev->irq.crtc_vblank_int[2]) {
5595 drm_handle_vblank(rdev->ddev, 2);
5596 rdev->pm.vblank_sync = true;
5597 wake_up(&rdev->irq.vblank_queue);
5598 }
5599 if (atomic_read(&rdev->irq.pflip[2]))
5600 radeon_crtc_handle_flip(rdev, 2);
5601 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
5602 DRM_DEBUG("IH: D3 vblank\n");
5603 }
5604 break;
5605 case 1: /* D3 vline */
5606 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
5607 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
5608 DRM_DEBUG("IH: D3 vline\n");
5609 }
5610 break;
5611 default:
5612 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5613 break;
5614 }
5615 break;
5616 case 4: /* D4 vblank/vline */
5617 switch (src_data) {
5618 case 0: /* D4 vblank */
5619 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
5620 if (rdev->irq.crtc_vblank_int[3]) {
5621 drm_handle_vblank(rdev->ddev, 3);
5622 rdev->pm.vblank_sync = true;
5623 wake_up(&rdev->irq.vblank_queue);
5624 }
5625 if (atomic_read(&rdev->irq.pflip[3]))
5626 radeon_crtc_handle_flip(rdev, 3);
5627 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
5628 DRM_DEBUG("IH: D4 vblank\n");
5629 }
5630 break;
5631 case 1: /* D4 vline */
5632 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
5633 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
5634 DRM_DEBUG("IH: D4 vline\n");
5635 }
5636 break;
5637 default:
5638 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5639 break;
5640 }
5641 break;
5642 case 5: /* D5 vblank/vline */
5643 switch (src_data) {
5644 case 0: /* D5 vblank */
5645 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
5646 if (rdev->irq.crtc_vblank_int[4]) {
5647 drm_handle_vblank(rdev->ddev, 4);
5648 rdev->pm.vblank_sync = true;
5649 wake_up(&rdev->irq.vblank_queue);
5650 }
5651 if (atomic_read(&rdev->irq.pflip[4]))
5652 radeon_crtc_handle_flip(rdev, 4);
5653 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
5654 DRM_DEBUG("IH: D5 vblank\n");
5655 }
5656 break;
5657 case 1: /* D5 vline */
5658 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
5659 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
5660 DRM_DEBUG("IH: D5 vline\n");
5661 }
5662 break;
5663 default:
5664 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5665 break;
5666 }
5667 break;
5668 case 6: /* D6 vblank/vline */
5669 switch (src_data) {
5670 case 0: /* D6 vblank */
5671 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
5672 if (rdev->irq.crtc_vblank_int[5]) {
5673 drm_handle_vblank(rdev->ddev, 5);
5674 rdev->pm.vblank_sync = true;
5675 wake_up(&rdev->irq.vblank_queue);
5676 }
5677 if (atomic_read(&rdev->irq.pflip[5]))
5678 radeon_crtc_handle_flip(rdev, 5);
5679 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
5680 DRM_DEBUG("IH: D6 vblank\n");
5681 }
5682 break;
5683 case 1: /* D6 vline */
5684 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
5685 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
5686 DRM_DEBUG("IH: D6 vline\n");
5687 }
5688 break;
5689 default:
5690 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5691 break;
5692 }
5693 break;
5694 case 42: /* HPD hotplug */
5695 switch (src_data) {
5696 case 0:
5697 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
5698 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
5699 queue_hotplug = true;
5700 DRM_DEBUG("IH: HPD1\n");
5701 }
5702 break;
5703 case 1:
5704 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
5705 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
5706 queue_hotplug = true;
5707 DRM_DEBUG("IH: HPD2\n");
5708 }
5709 break;
5710 case 2:
5711 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
5712 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
5713 queue_hotplug = true;
5714 DRM_DEBUG("IH: HPD3\n");
5715 }
5716 break;
5717 case 3:
5718 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
5719 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
5720 queue_hotplug = true;
5721 DRM_DEBUG("IH: HPD4\n");
5722 }
5723 break;
5724 case 4:
5725 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
5726 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
5727 queue_hotplug = true;
5728 DRM_DEBUG("IH: HPD5\n");
5729 }
5730 break;
5731 case 5:
5732 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
5733 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
5734 queue_hotplug = true;
5735 DRM_DEBUG("IH: HPD6\n");
5736 }
5737 break;
5738 default:
5739 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5740 break;
5741 }
5742 break;
5743 case 146:
5744 case 147:
5745 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
5746 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
5747 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
5748 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
5749 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
5750 /* reset addr and status */
5751 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
5752 break;
5753 case 176: /* GFX RB CP_INT */
5754 case 177: /* GFX IB CP_INT */
5755 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
5756 break;
5757 case 181: /* CP EOP event */
5758 DRM_DEBUG("IH: CP EOP\n");
5759 /* XXX check the bitfield order! */
5760 me_id = (ring_id & 0x60) >> 5;
5761 pipe_id = (ring_id & 0x18) >> 3;
5762 queue_id = (ring_id & 0x7) >> 0;
5763 switch (me_id) {
5764 case 0:
5765 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
5766 break;
5767 case 1:
5768 case 2:
5769 if ((cp1_ring->me == me_id) & (cp1_ring->pipe == pipe_id))
5770 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
5771 if ((cp2_ring->me == me_id) & (cp2_ring->pipe == pipe_id))
5772 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
5773 break;
5774 }
5775 break;
5776 case 184: /* CP Privileged reg access */
5777 DRM_ERROR("Illegal register access in command stream\n");
5778 /* XXX check the bitfield order! */
5779 me_id = (ring_id & 0x60) >> 5;
5780 pipe_id = (ring_id & 0x18) >> 3;
5781 queue_id = (ring_id & 0x7) >> 0;
5782 switch (me_id) {
5783 case 0:
5784 /* This results in a full GPU reset, but all we need to do is soft
5785 * reset the CP for gfx
5786 */
5787 queue_reset = true;
5788 break;
5789 case 1:
5790 /* XXX compute */
5791 queue_reset = true;
5792 break;
5793 case 2:
5794 /* XXX compute */
5795 queue_reset = true;
5796 break;
5797 }
5798 break;
5799 case 185: /* CP Privileged inst */
5800 DRM_ERROR("Illegal instruction in command stream\n");
5801 /* XXX check the bitfield order! */
5802 me_id = (ring_id & 0x60) >> 5;
5803 pipe_id = (ring_id & 0x18) >> 3;
5804 queue_id = (ring_id & 0x7) >> 0;
5805 switch (me_id) {
5806 case 0:
5807 /* This results in a full GPU reset, but all we need to do is soft
5808 * reset the CP for gfx
5809 */
5810 queue_reset = true;
5811 break;
5812 case 1:
5813 /* XXX compute */
5814 queue_reset = true;
5815 break;
5816 case 2:
5817 /* XXX compute */
5818 queue_reset = true;
5819 break;
5820 }
5821 break;
5822 case 224: /* SDMA trap event */
5823 /* XXX check the bitfield order! */
5824 me_id = (ring_id & 0x3) >> 0;
5825 queue_id = (ring_id & 0xc) >> 2;
5826 DRM_DEBUG("IH: SDMA trap\n");
5827 switch (me_id) {
5828 case 0:
5829 switch (queue_id) {
5830 case 0:
5831 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
5832 break;
5833 case 1:
5834 /* XXX compute */
5835 break;
5836 case 2:
5837 /* XXX compute */
5838 break;
5839 }
5840 break;
5841 case 1:
5842 switch (queue_id) {
5843 case 0:
5844 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
5845 break;
5846 case 1:
5847 /* XXX compute */
5848 break;
5849 case 2:
5850 /* XXX compute */
5851 break;
5852 }
5853 break;
5854 }
5855 break;
5856 case 241: /* SDMA Privileged inst */
5857 case 247: /* SDMA Privileged inst */
5858 DRM_ERROR("Illegal instruction in SDMA command stream\n");
5859 /* XXX check the bitfield order! */
5860 me_id = (ring_id & 0x3) >> 0;
5861 queue_id = (ring_id & 0xc) >> 2;
5862 switch (me_id) {
5863 case 0:
5864 switch (queue_id) {
5865 case 0:
5866 queue_reset = true;
5867 break;
5868 case 1:
5869 /* XXX compute */
5870 queue_reset = true;
5871 break;
5872 case 2:
5873 /* XXX compute */
5874 queue_reset = true;
5875 break;
5876 }
5877 break;
5878 case 1:
5879 switch (queue_id) {
5880 case 0:
5881 queue_reset = true;
5882 break;
5883 case 1:
5884 /* XXX compute */
5885 queue_reset = true;
5886 break;
5887 case 2:
5888 /* XXX compute */
5889 queue_reset = true;
5890 break;
5891 }
5892 break;
5893 }
5894 break;
5895 case 233: /* GUI IDLE */
5896 DRM_DEBUG("IH: GUI idle\n");
5897 break;
5898 default:
5899 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5900 break;
5901 }
5902
5903 /* wptr/rptr are in bytes! */
5904 rptr += 16;
5905 rptr &= rdev->ih.ptr_mask;
5906 }
5907 if (queue_hotplug)
5908 schedule_work(&rdev->hotplug_work);
5909 if (queue_reset)
5910 schedule_work(&rdev->reset_work);
5911 rdev->ih.rptr = rptr;
5912 WREG32(IH_RB_RPTR, rdev->ih.rptr);
5913 atomic_set(&rdev->ih.lock, 0);
5914
5915 /* make sure wptr hasn't changed while processing */
5916 wptr = cik_get_ih_wptr(rdev);
5917 if (wptr != rptr)
5918 goto restart_ih;
5919
5920 return IRQ_HANDLED;
5921}
5922
5923/*
5924 * startup/shutdown callbacks
5925 */
5926/**
5927 * cik_startup - program the asic to a functional state
5928 *
5929 * @rdev: radeon_device pointer
5930 *
5931 * Programs the asic to a functional state (CIK).
5932 * Called by cik_init() and cik_resume().
5933 * Returns 0 for success, error for failure.
5934 */
5935static int cik_startup(struct radeon_device *rdev)
5936{
5937 struct radeon_ring *ring;
5938 int r;
5939
5940 if (rdev->flags & RADEON_IS_IGP) {
5941 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
5942 !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
5943 r = cik_init_microcode(rdev);
5944 if (r) {
5945 DRM_ERROR("Failed to load firmware!\n");
5946 return r;
5947 }
5948 }
5949 } else {
5950 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
5951 !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
5952 !rdev->mc_fw) {
5953 r = cik_init_microcode(rdev);
5954 if (r) {
5955 DRM_ERROR("Failed to load firmware!\n");
5956 return r;
5957 }
5958 }
5959
5960 r = ci_mc_load_microcode(rdev);
5961 if (r) {
5962 DRM_ERROR("Failed to load MC firmware!\n");
5963 return r;
5964 }
5965 }
5966
5967 r = r600_vram_scratch_init(rdev);
5968 if (r)
5969 return r;
5970
5971 cik_mc_program(rdev);
5972 r = cik_pcie_gart_enable(rdev);
5973 if (r)
5974 return r;
5975 cik_gpu_init(rdev);
5976
5977 /* allocate rlc buffers */
5978 r = si_rlc_init(rdev);
5979 if (r) {
5980 DRM_ERROR("Failed to init rlc BOs!\n");
5981 return r;
5982 }
5983
5984 /* allocate wb buffer */
5985 r = radeon_wb_init(rdev);
5986 if (r)
5987 return r;
5988
5989 /* allocate mec buffers */
5990 r = cik_mec_init(rdev);
5991 if (r) {
5992 DRM_ERROR("Failed to init MEC BOs!\n");
5993 return r;
5994 }
5995
5996 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
5997 if (r) {
5998 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
5999 return r;
6000 }
6001
6002 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6003 if (r) {
6004 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6005 return r;
6006 }
6007
6008 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6009 if (r) {
6010 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6011 return r;
6012 }
6013
6014 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
6015 if (r) {
6016 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
6017 return r;
6018 }
6019
6020 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
6021 if (r) {
6022 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
6023 return r;
6024 }
6025
6026 r = cik_uvd_resume(rdev);
6027 if (!r) {
6028 r = radeon_fence_driver_start_ring(rdev,
6029 R600_RING_TYPE_UVD_INDEX);
6030 if (r)
6031 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
6032 }
6033 if (r)
6034 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
6035
6036 /* Enable IRQ */
6037 if (!rdev->irq.installed) {
6038 r = radeon_irq_kms_init(rdev);
6039 if (r)
6040 return r;
6041 }
6042
6043 r = cik_irq_init(rdev);
6044 if (r) {
6045 DRM_ERROR("radeon: IH init failed (%d).\n", r);
6046 radeon_irq_kms_fini(rdev);
6047 return r;
6048 }
6049 cik_irq_set(rdev);
6050
6051 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6052 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
6053 CP_RB0_RPTR, CP_RB0_WPTR,
6054 0, 0xfffff, RADEON_CP_PACKET2);
6055 if (r)
6056 return r;
6057
6058 /* set up the compute queues */
6059 /* type-2 packets are deprecated on MEC, use type-3 instead */
6060 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6061 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
6062 CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
6063 0, 0xfffff, PACKET3(PACKET3_NOP, 0x3FFF));
6064 if (r)
6065 return r;
6066 ring->me = 1; /* first MEC */
6067 ring->pipe = 0; /* first pipe */
6068 ring->queue = 0; /* first queue */
6069 ring->wptr_offs = CIK_WB_CP1_WPTR_OFFSET;
6070
6071 /* type-2 packets are deprecated on MEC, use type-3 instead */
6072 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6073 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
6074 CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
6075 0, 0xffffffff, PACKET3(PACKET3_NOP, 0x3FFF));
6076 if (r)
6077 return r;
6078 /* dGPU only have 1 MEC */
6079 ring->me = 1; /* first MEC */
6080 ring->pipe = 0; /* first pipe */
6081 ring->queue = 1; /* second queue */
6082 ring->wptr_offs = CIK_WB_CP2_WPTR_OFFSET;
6083
6084 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
6085 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
6086 SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
6087 SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
6088 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
6089 if (r)
6090 return r;
6091
6092 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
6093 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
6094 SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
6095 SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
6096 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
6097 if (r)
6098 return r;
6099
6100 r = cik_cp_resume(rdev);
6101 if (r)
6102 return r;
6103
6104 r = cik_sdma_resume(rdev);
6105 if (r)
6106 return r;
6107
6108 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6109 if (ring->ring_size) {
6110 r = radeon_ring_init(rdev, ring, ring->ring_size,
6111 R600_WB_UVD_RPTR_OFFSET,
6112 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
6113 0, 0xfffff, RADEON_CP_PACKET2);
6114 if (!r)
6115 r = r600_uvd_init(rdev);
6116 if (r)
6117 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
6118 }
6119
6120 r = radeon_ib_pool_init(rdev);
6121 if (r) {
6122 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
6123 return r;
6124 }
6125
6126 r = radeon_vm_manager_init(rdev);
6127 if (r) {
6128 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
6129 return r;
6130 }
6131
6132 return 0;
6133}
6134
6135/**
6136 * cik_resume - resume the asic to a functional state
6137 *
6138 * @rdev: radeon_device pointer
6139 *
6140 * Programs the asic to a functional state (CIK).
6141 * Called at resume.
6142 * Returns 0 for success, error for failure.
6143 */
6144int cik_resume(struct radeon_device *rdev)
6145{
6146 int r;
6147
6148 /* post card */
6149 atom_asic_init(rdev->mode_info.atom_context);
6150
6151 /* init golden registers */
6152 cik_init_golden_registers(rdev);
6153
6154 rdev->accel_working = true;
6155 r = cik_startup(rdev);
6156 if (r) {
6157 DRM_ERROR("cik startup failed on resume\n");
6158 rdev->accel_working = false;
6159 return r;
6160 }
6161
6162 return r;
6163
6164}
6165
6166/**
6167 * cik_suspend - suspend the asic
6168 *
6169 * @rdev: radeon_device pointer
6170 *
6171 * Bring the chip into a state suitable for suspend (CIK).
6172 * Called at suspend.
6173 * Returns 0 for success.
6174 */
6175int cik_suspend(struct radeon_device *rdev)
6176{
6177 radeon_vm_manager_fini(rdev);
6178 cik_cp_enable(rdev, false);
6179 cik_sdma_enable(rdev, false);
6180 r600_uvd_rbc_stop(rdev);
6181 radeon_uvd_suspend(rdev);
6182 cik_irq_suspend(rdev);
6183 radeon_wb_disable(rdev);
6184 cik_pcie_gart_disable(rdev);
6185 return 0;
6186}
6187
6188/* Plan is to move initialization in that function and use
6189 * helper function so that radeon_device_init pretty much
6190 * do nothing more than calling asic specific function. This
6191 * should also allow to remove a bunch of callback function
6192 * like vram_info.
6193 */
6194/**
6195 * cik_init - asic specific driver and hw init
6196 *
6197 * @rdev: radeon_device pointer
6198 *
6199 * Setup asic specific driver variables and program the hw
6200 * to a functional state (CIK).
6201 * Called at driver startup.
6202 * Returns 0 for success, errors for failure.
6203 */
6204int cik_init(struct radeon_device *rdev)
6205{
6206 struct radeon_ring *ring;
6207 int r;
6208
6209 /* Read BIOS */
6210 if (!radeon_get_bios(rdev)) {
6211 if (ASIC_IS_AVIVO(rdev))
6212 return -EINVAL;
6213 }
6214 /* Must be an ATOMBIOS */
6215 if (!rdev->is_atom_bios) {
6216 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
6217 return -EINVAL;
6218 }
6219 r = radeon_atombios_init(rdev);
6220 if (r)
6221 return r;
6222
6223 /* Post card if necessary */
6224 if (!radeon_card_posted(rdev)) {
6225 if (!rdev->bios) {
6226 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
6227 return -EINVAL;
6228 }
6229 DRM_INFO("GPU not posted. posting now...\n");
6230 atom_asic_init(rdev->mode_info.atom_context);
6231 }
6232 /* init golden registers */
6233 cik_init_golden_registers(rdev);
6234 /* Initialize scratch registers */
6235 cik_scratch_init(rdev);
6236 /* Initialize surface registers */
6237 radeon_surface_init(rdev);
6238 /* Initialize clocks */
6239 radeon_get_clock_info(rdev->ddev);
6240
6241 /* Fence driver */
6242 r = radeon_fence_driver_init(rdev);
6243 if (r)
6244 return r;
6245
6246 /* initialize memory controller */
6247 r = cik_mc_init(rdev);
6248 if (r)
6249 return r;
6250 /* Memory manager */
6251 r = radeon_bo_init(rdev);
6252 if (r)
6253 return r;
6254
6255 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6256 ring->ring_obj = NULL;
6257 r600_ring_init(rdev, ring, 1024 * 1024);
6258
6259 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6260 ring->ring_obj = NULL;
6261 r600_ring_init(rdev, ring, 1024 * 1024);
6262 r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
6263 if (r)
6264 return r;
6265
6266 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6267 ring->ring_obj = NULL;
6268 r600_ring_init(rdev, ring, 1024 * 1024);
6269 r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
6270 if (r)
6271 return r;
6272
6273 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
6274 ring->ring_obj = NULL;
6275 r600_ring_init(rdev, ring, 256 * 1024);
6276
6277 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
6278 ring->ring_obj = NULL;
6279 r600_ring_init(rdev, ring, 256 * 1024);
6280
6281 r = radeon_uvd_init(rdev);
6282 if (!r) {
6283 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6284 ring->ring_obj = NULL;
6285 r600_ring_init(rdev, ring, 4096);
6286 }
6287
6288 rdev->ih.ring_obj = NULL;
6289 r600_ih_ring_init(rdev, 64 * 1024);
6290
6291 r = r600_pcie_gart_init(rdev);
6292 if (r)
6293 return r;
6294
6295 rdev->accel_working = true;
6296 r = cik_startup(rdev);
6297 if (r) {
6298 dev_err(rdev->dev, "disabling GPU acceleration\n");
6299 cik_cp_fini(rdev);
6300 cik_sdma_fini(rdev);
6301 cik_irq_fini(rdev);
6302 si_rlc_fini(rdev);
6303 cik_mec_fini(rdev);
6304 radeon_wb_fini(rdev);
6305 radeon_ib_pool_fini(rdev);
6306 radeon_vm_manager_fini(rdev);
6307 radeon_irq_kms_fini(rdev);
6308 cik_pcie_gart_fini(rdev);
6309 rdev->accel_working = false;
6310 }
6311
6312 /* Don't start up if the MC ucode is missing.
6313 * The default clocks and voltages before the MC ucode
6314 * is loaded are not suffient for advanced operations.
6315 */
6316 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
6317 DRM_ERROR("radeon: MC ucode required for NI+.\n");
6318 return -EINVAL;
6319 }
6320
6321 return 0;
6322}
6323
6324/**
6325 * cik_fini - asic specific driver and hw fini
6326 *
6327 * @rdev: radeon_device pointer
6328 *
6329 * Tear down the asic specific driver variables and program the hw
6330 * to an idle state (CIK).
6331 * Called at driver unload.
6332 */
6333void cik_fini(struct radeon_device *rdev)
6334{
6335 cik_cp_fini(rdev);
6336 cik_sdma_fini(rdev);
6337 cik_irq_fini(rdev);
6338 si_rlc_fini(rdev);
6339 cik_mec_fini(rdev);
6340 radeon_wb_fini(rdev);
6341 radeon_vm_manager_fini(rdev);
6342 radeon_ib_pool_fini(rdev);
6343 radeon_irq_kms_fini(rdev);
6344 radeon_uvd_fini(rdev);
6345 cik_pcie_gart_fini(rdev);
6346 r600_vram_scratch_fini(rdev);
6347 radeon_gem_fini(rdev);
6348 radeon_fence_driver_fini(rdev);
6349 radeon_bo_fini(rdev);
6350 radeon_atombios_fini(rdev);
6351 kfree(rdev->bios);
6352 rdev->bios = NULL;
6353}
6354
6355/* display watermark setup */
6356/**
6357 * dce8_line_buffer_adjust - Set up the line buffer
6358 *
6359 * @rdev: radeon_device pointer
6360 * @radeon_crtc: the selected display controller
6361 * @mode: the current display mode on the selected display
6362 * controller
6363 *
6364 * Setup up the line buffer allocation for
6365 * the selected display controller (CIK).
6366 * Returns the line buffer size in pixels.
6367 */
6368static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
6369 struct radeon_crtc *radeon_crtc,
6370 struct drm_display_mode *mode)
6371{
6372 u32 tmp;
6373
6374 /*
6375 * Line Buffer Setup
6376 * There are 6 line buffers, one for each display controllers.
6377 * There are 3 partitions per LB. Select the number of partitions
6378 * to enable based on the display width. For display widths larger
6379 * than 4096, you need use to use 2 display controllers and combine
6380 * them using the stereo blender.
6381 */
6382 if (radeon_crtc->base.enabled && mode) {
6383 if (mode->crtc_hdisplay < 1920)
6384 tmp = 1;
6385 else if (mode->crtc_hdisplay < 2560)
6386 tmp = 2;
6387 else if (mode->crtc_hdisplay < 4096)
6388 tmp = 0;
6389 else {
6390 DRM_DEBUG_KMS("Mode too big for LB!\n");
6391 tmp = 0;
6392 }
6393 } else
6394 tmp = 1;
6395
6396 WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
6397 LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
6398
6399 if (radeon_crtc->base.enabled && mode) {
6400 switch (tmp) {
6401 case 0:
6402 default:
6403 return 4096 * 2;
6404 case 1:
6405 return 1920 * 2;
6406 case 2:
6407 return 2560 * 2;
6408 }
6409 }
6410
6411 /* controller not enabled, so no lb used */
6412 return 0;
6413}
6414
6415/**
6416 * cik_get_number_of_dram_channels - get the number of dram channels
6417 *
6418 * @rdev: radeon_device pointer
6419 *
6420 * Look up the number of video ram channels (CIK).
6421 * Used for display watermark bandwidth calculations
6422 * Returns the number of dram channels
6423 */
6424static u32 cik_get_number_of_dram_channels(struct radeon_device *rdev)
6425{
6426 u32 tmp = RREG32(MC_SHARED_CHMAP);
6427
6428 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
6429 case 0:
6430 default:
6431 return 1;
6432 case 1:
6433 return 2;
6434 case 2:
6435 return 4;
6436 case 3:
6437 return 8;
6438 case 4:
6439 return 3;
6440 case 5:
6441 return 6;
6442 case 6:
6443 return 10;
6444 case 7:
6445 return 12;
6446 case 8:
6447 return 16;
6448 }
6449}
6450
6451struct dce8_wm_params {
6452 u32 dram_channels; /* number of dram channels */
6453 u32 yclk; /* bandwidth per dram data pin in kHz */
6454 u32 sclk; /* engine clock in kHz */
6455 u32 disp_clk; /* display clock in kHz */
6456 u32 src_width; /* viewport width */
6457 u32 active_time; /* active display time in ns */
6458 u32 blank_time; /* blank time in ns */
6459 bool interlaced; /* mode is interlaced */
6460 fixed20_12 vsc; /* vertical scale ratio */
6461 u32 num_heads; /* number of active crtcs */
6462 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
6463 u32 lb_size; /* line buffer allocated to pipe */
6464 u32 vtaps; /* vertical scaler taps */
6465};
6466
6467/**
6468 * dce8_dram_bandwidth - get the dram bandwidth
6469 *
6470 * @wm: watermark calculation data
6471 *
6472 * Calculate the raw dram bandwidth (CIK).
6473 * Used for display watermark bandwidth calculations
6474 * Returns the dram bandwidth in MBytes/s
6475 */
6476static u32 dce8_dram_bandwidth(struct dce8_wm_params *wm)
6477{
6478 /* Calculate raw DRAM Bandwidth */
6479 fixed20_12 dram_efficiency; /* 0.7 */
6480 fixed20_12 yclk, dram_channels, bandwidth;
6481 fixed20_12 a;
6482
6483 a.full = dfixed_const(1000);
6484 yclk.full = dfixed_const(wm->yclk);
6485 yclk.full = dfixed_div(yclk, a);
6486 dram_channels.full = dfixed_const(wm->dram_channels * 4);
6487 a.full = dfixed_const(10);
6488 dram_efficiency.full = dfixed_const(7);
6489 dram_efficiency.full = dfixed_div(dram_efficiency, a);
6490 bandwidth.full = dfixed_mul(dram_channels, yclk);
6491 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
6492
6493 return dfixed_trunc(bandwidth);
6494}
6495
6496/**
6497 * dce8_dram_bandwidth_for_display - get the dram bandwidth for display
6498 *
6499 * @wm: watermark calculation data
6500 *
6501 * Calculate the dram bandwidth used for display (CIK).
6502 * Used for display watermark bandwidth calculations
6503 * Returns the dram bandwidth for display in MBytes/s
6504 */
6505static u32 dce8_dram_bandwidth_for_display(struct dce8_wm_params *wm)
6506{
6507 /* Calculate DRAM Bandwidth and the part allocated to display. */
6508 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
6509 fixed20_12 yclk, dram_channels, bandwidth;
6510 fixed20_12 a;
6511
6512 a.full = dfixed_const(1000);
6513 yclk.full = dfixed_const(wm->yclk);
6514 yclk.full = dfixed_div(yclk, a);
6515 dram_channels.full = dfixed_const(wm->dram_channels * 4);
6516 a.full = dfixed_const(10);
6517 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
6518 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
6519 bandwidth.full = dfixed_mul(dram_channels, yclk);
6520 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
6521
6522 return dfixed_trunc(bandwidth);
6523}
6524
6525/**
6526 * dce8_data_return_bandwidth - get the data return bandwidth
6527 *
6528 * @wm: watermark calculation data
6529 *
6530 * Calculate the data return bandwidth used for display (CIK).
6531 * Used for display watermark bandwidth calculations
6532 * Returns the data return bandwidth in MBytes/s
6533 */
6534static u32 dce8_data_return_bandwidth(struct dce8_wm_params *wm)
6535{
6536 /* Calculate the display Data return Bandwidth */
6537 fixed20_12 return_efficiency; /* 0.8 */
6538 fixed20_12 sclk, bandwidth;
6539 fixed20_12 a;
6540
6541 a.full = dfixed_const(1000);
6542 sclk.full = dfixed_const(wm->sclk);
6543 sclk.full = dfixed_div(sclk, a);
6544 a.full = dfixed_const(10);
6545 return_efficiency.full = dfixed_const(8);
6546 return_efficiency.full = dfixed_div(return_efficiency, a);
6547 a.full = dfixed_const(32);
6548 bandwidth.full = dfixed_mul(a, sclk);
6549 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
6550
6551 return dfixed_trunc(bandwidth);
6552}
6553
6554/**
6555 * dce8_dmif_request_bandwidth - get the dmif bandwidth
6556 *
6557 * @wm: watermark calculation data
6558 *
6559 * Calculate the dmif bandwidth used for display (CIK).
6560 * Used for display watermark bandwidth calculations
6561 * Returns the dmif bandwidth in MBytes/s
6562 */
6563static u32 dce8_dmif_request_bandwidth(struct dce8_wm_params *wm)
6564{
6565 /* Calculate the DMIF Request Bandwidth */
6566 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
6567 fixed20_12 disp_clk, bandwidth;
6568 fixed20_12 a, b;
6569
6570 a.full = dfixed_const(1000);
6571 disp_clk.full = dfixed_const(wm->disp_clk);
6572 disp_clk.full = dfixed_div(disp_clk, a);
6573 a.full = dfixed_const(32);
6574 b.full = dfixed_mul(a, disp_clk);
6575
6576 a.full = dfixed_const(10);
6577 disp_clk_request_efficiency.full = dfixed_const(8);
6578 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
6579
6580 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
6581
6582 return dfixed_trunc(bandwidth);
6583}
6584
6585/**
6586 * dce8_available_bandwidth - get the min available bandwidth
6587 *
6588 * @wm: watermark calculation data
6589 *
6590 * Calculate the min available bandwidth used for display (CIK).
6591 * Used for display watermark bandwidth calculations
6592 * Returns the min available bandwidth in MBytes/s
6593 */
6594static u32 dce8_available_bandwidth(struct dce8_wm_params *wm)
6595{
6596 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
6597 u32 dram_bandwidth = dce8_dram_bandwidth(wm);
6598 u32 data_return_bandwidth = dce8_data_return_bandwidth(wm);
6599 u32 dmif_req_bandwidth = dce8_dmif_request_bandwidth(wm);
6600
6601 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
6602}
6603
6604/**
6605 * dce8_average_bandwidth - get the average available bandwidth
6606 *
6607 * @wm: watermark calculation data
6608 *
6609 * Calculate the average available bandwidth used for display (CIK).
6610 * Used for display watermark bandwidth calculations
6611 * Returns the average available bandwidth in MBytes/s
6612 */
6613static u32 dce8_average_bandwidth(struct dce8_wm_params *wm)
6614{
6615 /* Calculate the display mode Average Bandwidth
6616 * DisplayMode should contain the source and destination dimensions,
6617 * timing, etc.
6618 */
6619 fixed20_12 bpp;
6620 fixed20_12 line_time;
6621 fixed20_12 src_width;
6622 fixed20_12 bandwidth;
6623 fixed20_12 a;
6624
6625 a.full = dfixed_const(1000);
6626 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
6627 line_time.full = dfixed_div(line_time, a);
6628 bpp.full = dfixed_const(wm->bytes_per_pixel);
6629 src_width.full = dfixed_const(wm->src_width);
6630 bandwidth.full = dfixed_mul(src_width, bpp);
6631 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
6632 bandwidth.full = dfixed_div(bandwidth, line_time);
6633
6634 return dfixed_trunc(bandwidth);
6635}
6636
6637/**
6638 * dce8_latency_watermark - get the latency watermark
6639 *
6640 * @wm: watermark calculation data
6641 *
6642 * Calculate the latency watermark (CIK).
6643 * Used for display watermark bandwidth calculations
6644 * Returns the latency watermark in ns
6645 */
6646static u32 dce8_latency_watermark(struct dce8_wm_params *wm)
6647{
6648 /* First calculate the latency in ns */
6649 u32 mc_latency = 2000; /* 2000 ns. */
6650 u32 available_bandwidth = dce8_available_bandwidth(wm);
6651 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
6652 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
6653 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
6654 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
6655 (wm->num_heads * cursor_line_pair_return_time);
6656 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
6657 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
6658 u32 tmp, dmif_size = 12288;
6659 fixed20_12 a, b, c;
6660
6661 if (wm->num_heads == 0)
6662 return 0;
6663
6664 a.full = dfixed_const(2);
6665 b.full = dfixed_const(1);
6666 if ((wm->vsc.full > a.full) ||
6667 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
6668 (wm->vtaps >= 5) ||
6669 ((wm->vsc.full >= a.full) && wm->interlaced))
6670 max_src_lines_per_dst_line = 4;
6671 else
6672 max_src_lines_per_dst_line = 2;
6673
6674 a.full = dfixed_const(available_bandwidth);
6675 b.full = dfixed_const(wm->num_heads);
6676 a.full = dfixed_div(a, b);
6677
6678 b.full = dfixed_const(mc_latency + 512);
6679 c.full = dfixed_const(wm->disp_clk);
6680 b.full = dfixed_div(b, c);
6681
6682 c.full = dfixed_const(dmif_size);
6683 b.full = dfixed_div(c, b);
6684
6685 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
6686
6687 b.full = dfixed_const(1000);
6688 c.full = dfixed_const(wm->disp_clk);
6689 b.full = dfixed_div(c, b);
6690 c.full = dfixed_const(wm->bytes_per_pixel);
6691 b.full = dfixed_mul(b, c);
6692
6693 lb_fill_bw = min(tmp, dfixed_trunc(b));
6694
6695 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
6696 b.full = dfixed_const(1000);
6697 c.full = dfixed_const(lb_fill_bw);
6698 b.full = dfixed_div(c, b);
6699 a.full = dfixed_div(a, b);
6700 line_fill_time = dfixed_trunc(a);
6701
6702 if (line_fill_time < wm->active_time)
6703 return latency;
6704 else
6705 return latency + (line_fill_time - wm->active_time);
6706
6707}
6708
6709/**
6710 * dce8_average_bandwidth_vs_dram_bandwidth_for_display - check
6711 * average and available dram bandwidth
6712 *
6713 * @wm: watermark calculation data
6714 *
6715 * Check if the display average bandwidth fits in the display
6716 * dram bandwidth (CIK).
6717 * Used for display watermark bandwidth calculations
6718 * Returns true if the display fits, false if not.
6719 */
6720static bool dce8_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
6721{
6722 if (dce8_average_bandwidth(wm) <=
6723 (dce8_dram_bandwidth_for_display(wm) / wm->num_heads))
6724 return true;
6725 else
6726 return false;
6727}
6728
6729/**
6730 * dce8_average_bandwidth_vs_available_bandwidth - check
6731 * average and available bandwidth
6732 *
6733 * @wm: watermark calculation data
6734 *
6735 * Check if the display average bandwidth fits in the display
6736 * available bandwidth (CIK).
6737 * Used for display watermark bandwidth calculations
6738 * Returns true if the display fits, false if not.
6739 */
6740static bool dce8_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
6741{
6742 if (dce8_average_bandwidth(wm) <=
6743 (dce8_available_bandwidth(wm) / wm->num_heads))
6744 return true;
6745 else
6746 return false;
6747}
6748
6749/**
6750 * dce8_check_latency_hiding - check latency hiding
6751 *
6752 * @wm: watermark calculation data
6753 *
6754 * Check latency hiding (CIK).
6755 * Used for display watermark bandwidth calculations
6756 * Returns true if the display fits, false if not.
6757 */
6758static bool dce8_check_latency_hiding(struct dce8_wm_params *wm)
6759{
6760 u32 lb_partitions = wm->lb_size / wm->src_width;
6761 u32 line_time = wm->active_time + wm->blank_time;
6762 u32 latency_tolerant_lines;
6763 u32 latency_hiding;
6764 fixed20_12 a;
6765
6766 a.full = dfixed_const(1);
6767 if (wm->vsc.full > a.full)
6768 latency_tolerant_lines = 1;
6769 else {
6770 if (lb_partitions <= (wm->vtaps + 1))
6771 latency_tolerant_lines = 1;
6772 else
6773 latency_tolerant_lines = 2;
6774 }
6775
6776 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
6777
6778 if (dce8_latency_watermark(wm) <= latency_hiding)
6779 return true;
6780 else
6781 return false;
6782}
6783
6784/**
6785 * dce8_program_watermarks - program display watermarks
6786 *
6787 * @rdev: radeon_device pointer
6788 * @radeon_crtc: the selected display controller
6789 * @lb_size: line buffer size
6790 * @num_heads: number of display controllers in use
6791 *
6792 * Calculate and program the display watermarks for the
6793 * selected display controller (CIK).
6794 */
6795static void dce8_program_watermarks(struct radeon_device *rdev,
6796 struct radeon_crtc *radeon_crtc,
6797 u32 lb_size, u32 num_heads)
6798{
6799 struct drm_display_mode *mode = &radeon_crtc->base.mode;
6800 struct dce8_wm_params wm;
6801 u32 pixel_period;
6802 u32 line_time = 0;
6803 u32 latency_watermark_a = 0, latency_watermark_b = 0;
6804 u32 tmp, wm_mask;
6805
6806 if (radeon_crtc->base.enabled && num_heads && mode) {
6807 pixel_period = 1000000 / (u32)mode->clock;
6808 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
6809
6810 wm.yclk = rdev->pm.current_mclk * 10;
6811 wm.sclk = rdev->pm.current_sclk * 10;
6812 wm.disp_clk = mode->clock;
6813 wm.src_width = mode->crtc_hdisplay;
6814 wm.active_time = mode->crtc_hdisplay * pixel_period;
6815 wm.blank_time = line_time - wm.active_time;
6816 wm.interlaced = false;
6817 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
6818 wm.interlaced = true;
6819 wm.vsc = radeon_crtc->vsc;
6820 wm.vtaps = 1;
6821 if (radeon_crtc->rmx_type != RMX_OFF)
6822 wm.vtaps = 2;
6823 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
6824 wm.lb_size = lb_size;
6825 wm.dram_channels = cik_get_number_of_dram_channels(rdev);
6826 wm.num_heads = num_heads;
6827
6828 /* set for high clocks */
6829 latency_watermark_a = min(dce8_latency_watermark(&wm), (u32)65535);
6830 /* set for low clocks */
6831 /* wm.yclk = low clk; wm.sclk = low clk */
6832 latency_watermark_b = min(dce8_latency_watermark(&wm), (u32)65535);
6833
6834 /* possibly force display priority to high */
6835 /* should really do this at mode validation time... */
6836 if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
6837 !dce8_average_bandwidth_vs_available_bandwidth(&wm) ||
6838 !dce8_check_latency_hiding(&wm) ||
6839 (rdev->disp_priority == 2)) {
6840 DRM_DEBUG_KMS("force priority to high\n");
6841 }
6842 }
6843
6844 /* select wm A */
6845 wm_mask = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
6846 tmp = wm_mask;
6847 tmp &= ~LATENCY_WATERMARK_MASK(3);
6848 tmp |= LATENCY_WATERMARK_MASK(1);
6849 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
6850 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
6851 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
6852 LATENCY_HIGH_WATERMARK(line_time)));
6853 /* select wm B */
6854 tmp = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
6855 tmp &= ~LATENCY_WATERMARK_MASK(3);
6856 tmp |= LATENCY_WATERMARK_MASK(2);
6857 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
6858 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
6859 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
6860 LATENCY_HIGH_WATERMARK(line_time)));
6861 /* restore original selection */
6862 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
6863}
6864
6865/**
6866 * dce8_bandwidth_update - program display watermarks
6867 *
6868 * @rdev: radeon_device pointer
6869 *
6870 * Calculate and program the display watermarks and line
6871 * buffer allocation (CIK).
6872 */
6873void dce8_bandwidth_update(struct radeon_device *rdev)
6874{
6875 struct drm_display_mode *mode = NULL;
6876 u32 num_heads = 0, lb_size;
6877 int i;
6878
6879 radeon_update_display_priority(rdev);
6880
6881 for (i = 0; i < rdev->num_crtc; i++) {
6882 if (rdev->mode_info.crtcs[i]->base.enabled)
6883 num_heads++;
6884 }
6885 for (i = 0; i < rdev->num_crtc; i++) {
6886 mode = &rdev->mode_info.crtcs[i]->base.mode;
6887 lb_size = dce8_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode);
6888 dce8_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
6889 }
6890}
6891
6892/**
6893 * cik_get_gpu_clock_counter - return GPU clock counter snapshot
6894 *
6895 * @rdev: radeon_device pointer
6896 *
6897 * Fetches a GPU clock counter snapshot (SI).
6898 * Returns the 64 bit clock counter snapshot.
6899 */
6900uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev)
6901{
6902 uint64_t clock;
6903
6904 mutex_lock(&rdev->gpu_clock_mutex);
6905 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
6906 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
6907 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
6908 mutex_unlock(&rdev->gpu_clock_mutex);
6909 return clock;
6910}
6911
6912static int cik_set_uvd_clock(struct radeon_device *rdev, u32 clock,
6913 u32 cntl_reg, u32 status_reg)
6914{
6915 int r, i;
6916 struct atom_clock_dividers dividers;
6917 uint32_t tmp;
6918
6919 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
6920 clock, false, &dividers);
6921 if (r)
6922 return r;
6923
6924 tmp = RREG32_SMC(cntl_reg);
6925 tmp &= ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK);
6926 tmp |= dividers.post_divider;
6927 WREG32_SMC(cntl_reg, tmp);
6928
6929 for (i = 0; i < 100; i++) {
6930 if (RREG32_SMC(status_reg) & DCLK_STATUS)
6931 break;
6932 mdelay(10);
6933 }
6934 if (i == 100)
6935 return -ETIMEDOUT;
6936
6937 return 0;
6938}
6939
6940int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
6941{
6942 int r = 0;
6943
6944 r = cik_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
6945 if (r)
6946 return r;
6947
6948 r = cik_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
6949 return r;
6950}
6951
6952int cik_uvd_resume(struct radeon_device *rdev)
6953{
6954 uint64_t addr;
6955 uint32_t size;
6956 int r;
6957
6958 r = radeon_uvd_resume(rdev);
6959 if (r)
6960 return r;
6961
6962 /* programm the VCPU memory controller bits 0-27 */
6963 addr = rdev->uvd.gpu_addr >> 3;
6964 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
6965 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
6966 WREG32(UVD_VCPU_CACHE_SIZE0, size);
6967
6968 addr += size;
6969 size = RADEON_UVD_STACK_SIZE >> 3;
6970 WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
6971 WREG32(UVD_VCPU_CACHE_SIZE1, size);
6972
6973 addr += size;
6974 size = RADEON_UVD_HEAP_SIZE >> 3;
6975 WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
6976 WREG32(UVD_VCPU_CACHE_SIZE2, size);
6977
6978 /* bits 28-31 */
6979 addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
6980 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
6981
6982 /* bits 32-39 */
6983 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
6984 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
6985
6986 return 0;
6987}
diff --git a/drivers/gpu/drm/radeon/cik_blit_shaders.c b/drivers/gpu/drm/radeon/cik_blit_shaders.c
new file mode 100644
index 000000000000..ff1311806e91
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_blit_shaders.c
@@ -0,0 +1,246 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Alex Deucher <alexander.deucher@amd.com>
25 */
26
27#include <linux/types.h>
28#include <linux/bug.h>
29#include <linux/kernel.h>
30
31const u32 cik_default_state[] =
32{
33 0xc0066900,
34 0x00000000,
35 0x00000060, /* DB_RENDER_CONTROL */
36 0x00000000, /* DB_COUNT_CONTROL */
37 0x00000000, /* DB_DEPTH_VIEW */
38 0x0000002a, /* DB_RENDER_OVERRIDE */
39 0x00000000, /* DB_RENDER_OVERRIDE2 */
40 0x00000000, /* DB_HTILE_DATA_BASE */
41
42 0xc0046900,
43 0x00000008,
44 0x00000000, /* DB_DEPTH_BOUNDS_MIN */
45 0x00000000, /* DB_DEPTH_BOUNDS_MAX */
46 0x00000000, /* DB_STENCIL_CLEAR */
47 0x00000000, /* DB_DEPTH_CLEAR */
48
49 0xc0036900,
50 0x0000000f,
51 0x00000000, /* DB_DEPTH_INFO */
52 0x00000000, /* DB_Z_INFO */
53 0x00000000, /* DB_STENCIL_INFO */
54
55 0xc0016900,
56 0x00000080,
57 0x00000000, /* PA_SC_WINDOW_OFFSET */
58
59 0xc00d6900,
60 0x00000083,
61 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
62 0x00000000, /* PA_SC_CLIPRECT_0_TL */
63 0x20002000, /* PA_SC_CLIPRECT_0_BR */
64 0x00000000,
65 0x20002000,
66 0x00000000,
67 0x20002000,
68 0x00000000,
69 0x20002000,
70 0xaaaaaaaa, /* PA_SC_EDGERULE */
71 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
72 0x0000000f, /* CB_TARGET_MASK */
73 0x0000000f, /* CB_SHADER_MASK */
74
75 0xc0226900,
76 0x00000094,
77 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
78 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
79 0x80000000,
80 0x20002000,
81 0x80000000,
82 0x20002000,
83 0x80000000,
84 0x20002000,
85 0x80000000,
86 0x20002000,
87 0x80000000,
88 0x20002000,
89 0x80000000,
90 0x20002000,
91 0x80000000,
92 0x20002000,
93 0x80000000,
94 0x20002000,
95 0x80000000,
96 0x20002000,
97 0x80000000,
98 0x20002000,
99 0x80000000,
100 0x20002000,
101 0x80000000,
102 0x20002000,
103 0x80000000,
104 0x20002000,
105 0x80000000,
106 0x20002000,
107 0x80000000,
108 0x20002000,
109 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
110 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
111
112 0xc0046900,
113 0x00000100,
114 0xffffffff, /* VGT_MAX_VTX_INDX */
115 0x00000000, /* VGT_MIN_VTX_INDX */
116 0x00000000, /* VGT_INDX_OFFSET */
117 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
118
119 0xc0046900,
120 0x00000105,
121 0x00000000, /* CB_BLEND_RED */
122 0x00000000, /* CB_BLEND_GREEN */
123 0x00000000, /* CB_BLEND_BLUE */
124 0x00000000, /* CB_BLEND_ALPHA */
125
126 0xc0016900,
127 0x000001e0,
128 0x00000000, /* CB_BLEND0_CONTROL */
129
130 0xc00c6900,
131 0x00000200,
132 0x00000000, /* DB_DEPTH_CONTROL */
133 0x00000000, /* DB_EQAA */
134 0x00cc0010, /* CB_COLOR_CONTROL */
135 0x00000210, /* DB_SHADER_CONTROL */
136 0x00010000, /* PA_CL_CLIP_CNTL */
137 0x00000004, /* PA_SU_SC_MODE_CNTL */
138 0x00000100, /* PA_CL_VTE_CNTL */
139 0x00000000, /* PA_CL_VS_OUT_CNTL */
140 0x00000000, /* PA_CL_NANINF_CNTL */
141 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
142 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
143 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
144
145 0xc0116900,
146 0x00000280,
147 0x00000000, /* PA_SU_POINT_SIZE */
148 0x00000000, /* PA_SU_POINT_MINMAX */
149 0x00000008, /* PA_SU_LINE_CNTL */
150 0x00000000, /* PA_SC_LINE_STIPPLE */
151 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
152 0x00000000, /* VGT_HOS_CNTL */
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000, /* VGT_GS_MODE */
164
165 0xc0026900,
166 0x00000292,
167 0x00000000, /* PA_SC_MODE_CNTL_0 */
168 0x00000000, /* PA_SC_MODE_CNTL_1 */
169
170 0xc0016900,
171 0x000002a1,
172 0x00000000, /* VGT_PRIMITIVEID_EN */
173
174 0xc0016900,
175 0x000002a5,
176 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
177
178 0xc0026900,
179 0x000002a8,
180 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
181 0x00000000,
182
183 0xc0026900,
184 0x000002ad,
185 0x00000000, /* VGT_REUSE_OFF */
186 0x00000000,
187
188 0xc0016900,
189 0x000002d5,
190 0x00000000, /* VGT_SHADER_STAGES_EN */
191
192 0xc0016900,
193 0x000002dc,
194 0x0000aa00, /* DB_ALPHA_TO_MASK */
195
196 0xc0066900,
197 0x000002de,
198 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
199 0x00000000,
200 0x00000000,
201 0x00000000,
202 0x00000000,
203 0x00000000,
204
205 0xc0026900,
206 0x000002e5,
207 0x00000000, /* VGT_STRMOUT_CONFIG */
208 0x00000000,
209
210 0xc01b6900,
211 0x000002f5,
212 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
213 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
214 0x00000000, /* PA_SC_LINE_CNTL */
215 0x00000000, /* PA_SC_AA_CONFIG */
216 0x00000005, /* PA_SU_VTX_CNTL */
217 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
218 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
219 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
220 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
221 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
222 0x00000000,
223 0x00000000,
224 0x00000000,
225 0x00000000,
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000,
237 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
238 0xffffffff,
239
240 0xc0026900,
241 0x00000316,
242 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
243 0x00000010, /* */
244};
245
246const u32 cik_default_size = ARRAY_SIZE(cik_default_state);
diff --git a/drivers/gpu/drm/radeon/cik_blit_shaders.h b/drivers/gpu/drm/radeon/cik_blit_shaders.h
new file mode 100644
index 000000000000..dfe7314f9ff4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_blit_shaders.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#ifndef CIK_BLIT_SHADERS_H
26#define CIK_BLIT_SHADERS_H
27
28extern const u32 cik_default_state[];
29
30extern const u32 cik_default_size;
31
32#endif
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
new file mode 100644
index 000000000000..d71e46d571f5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -0,0 +1,147 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef __CIK_REG_H__
25#define __CIK_REG_H__
26
27#define CIK_DC_GPIO_HPD_MASK 0x65b0
28#define CIK_DC_GPIO_HPD_A 0x65b4
29#define CIK_DC_GPIO_HPD_EN 0x65b8
30#define CIK_DC_GPIO_HPD_Y 0x65bc
31
32#define CIK_GRPH_CONTROL 0x6804
33# define CIK_GRPH_DEPTH(x) (((x) & 0x3) << 0)
34# define CIK_GRPH_DEPTH_8BPP 0
35# define CIK_GRPH_DEPTH_16BPP 1
36# define CIK_GRPH_DEPTH_32BPP 2
37# define CIK_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
38# define CIK_ADDR_SURF_2_BANK 0
39# define CIK_ADDR_SURF_4_BANK 1
40# define CIK_ADDR_SURF_8_BANK 2
41# define CIK_ADDR_SURF_16_BANK 3
42# define CIK_GRPH_Z(x) (((x) & 0x3) << 4)
43# define CIK_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
44# define CIK_ADDR_SURF_BANK_WIDTH_1 0
45# define CIK_ADDR_SURF_BANK_WIDTH_2 1
46# define CIK_ADDR_SURF_BANK_WIDTH_4 2
47# define CIK_ADDR_SURF_BANK_WIDTH_8 3
48# define CIK_GRPH_FORMAT(x) (((x) & 0x7) << 8)
49/* 8 BPP */
50# define CIK_GRPH_FORMAT_INDEXED 0
51/* 16 BPP */
52# define CIK_GRPH_FORMAT_ARGB1555 0
53# define CIK_GRPH_FORMAT_ARGB565 1
54# define CIK_GRPH_FORMAT_ARGB4444 2
55# define CIK_GRPH_FORMAT_AI88 3
56# define CIK_GRPH_FORMAT_MONO16 4
57# define CIK_GRPH_FORMAT_BGRA5551 5
58/* 32 BPP */
59# define CIK_GRPH_FORMAT_ARGB8888 0
60# define CIK_GRPH_FORMAT_ARGB2101010 1
61# define CIK_GRPH_FORMAT_32BPP_DIG 2
62# define CIK_GRPH_FORMAT_8B_ARGB2101010 3
63# define CIK_GRPH_FORMAT_BGRA1010102 4
64# define CIK_GRPH_FORMAT_8B_BGRA1010102 5
65# define CIK_GRPH_FORMAT_RGB111110 6
66# define CIK_GRPH_FORMAT_BGR101111 7
67# define CIK_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
68# define CIK_ADDR_SURF_BANK_HEIGHT_1 0
69# define CIK_ADDR_SURF_BANK_HEIGHT_2 1
70# define CIK_ADDR_SURF_BANK_HEIGHT_4 2
71# define CIK_ADDR_SURF_BANK_HEIGHT_8 3
72# define CIK_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
73# define CIK_ADDR_SURF_TILE_SPLIT_64B 0
74# define CIK_ADDR_SURF_TILE_SPLIT_128B 1
75# define CIK_ADDR_SURF_TILE_SPLIT_256B 2
76# define CIK_ADDR_SURF_TILE_SPLIT_512B 3
77# define CIK_ADDR_SURF_TILE_SPLIT_1KB 4
78# define CIK_ADDR_SURF_TILE_SPLIT_2KB 5
79# define CIK_ADDR_SURF_TILE_SPLIT_4KB 6
80# define CIK_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
81# define CIK_ADDR_SURF_MACRO_TILE_ASPECT_1 0
82# define CIK_ADDR_SURF_MACRO_TILE_ASPECT_2 1
83# define CIK_ADDR_SURF_MACRO_TILE_ASPECT_4 2
84# define CIK_ADDR_SURF_MACRO_TILE_ASPECT_8 3
85# define CIK_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
86# define CIK_GRPH_ARRAY_LINEAR_GENERAL 0
87# define CIK_GRPH_ARRAY_LINEAR_ALIGNED 1
88# define CIK_GRPH_ARRAY_1D_TILED_THIN1 2
89# define CIK_GRPH_ARRAY_2D_TILED_THIN1 4
90# define CIK_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
91# define CIK_ADDR_SURF_P2 0
92# define CIK_ADDR_SURF_P4_8x16 4
93# define CIK_ADDR_SURF_P4_16x16 5
94# define CIK_ADDR_SURF_P4_16x32 6
95# define CIK_ADDR_SURF_P4_32x32 7
96# define CIK_ADDR_SURF_P8_16x16_8x16 8
97# define CIK_ADDR_SURF_P8_16x32_8x16 9
98# define CIK_ADDR_SURF_P8_32x32_8x16 10
99# define CIK_ADDR_SURF_P8_16x32_16x16 11
100# define CIK_ADDR_SURF_P8_32x32_16x16 12
101# define CIK_ADDR_SURF_P8_32x32_16x32 13
102# define CIK_ADDR_SURF_P8_32x64_32x32 14
103# define CIK_GRPH_MICRO_TILE_MODE(x) (((x) & 0x7) << 29)
104# define CIK_DISPLAY_MICRO_TILING 0
105# define CIK_THIN_MICRO_TILING 1
106# define CIK_DEPTH_MICRO_TILING 2
107# define CIK_ROTATED_MICRO_TILING 4
108
109/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
110#define CIK_CUR_CONTROL 0x6998
111# define CIK_CURSOR_EN (1 << 0)
112# define CIK_CURSOR_MODE(x) (((x) & 0x3) << 8)
113# define CIK_CURSOR_MONO 0
114# define CIK_CURSOR_24_1 1
115# define CIK_CURSOR_24_8_PRE_MULT 2
116# define CIK_CURSOR_24_8_UNPRE_MULT 3
117# define CIK_CURSOR_2X_MAGNIFY (1 << 16)
118# define CIK_CURSOR_FORCE_MC_ON (1 << 20)
119# define CIK_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
120# define CIK_CURSOR_URGENT_ALWAYS 0
121# define CIK_CURSOR_URGENT_1_8 1
122# define CIK_CURSOR_URGENT_1_4 2
123# define CIK_CURSOR_URGENT_3_8 3
124# define CIK_CURSOR_URGENT_1_2 4
125#define CIK_CUR_SURFACE_ADDRESS 0x699c
126# define CIK_CUR_SURFACE_ADDRESS_MASK 0xfffff000
127#define CIK_CUR_SIZE 0x69a0
128#define CIK_CUR_SURFACE_ADDRESS_HIGH 0x69a4
129#define CIK_CUR_POSITION 0x69a8
130#define CIK_CUR_HOT_SPOT 0x69ac
131#define CIK_CUR_COLOR1 0x69b0
132#define CIK_CUR_COLOR2 0x69b4
133#define CIK_CUR_UPDATE 0x69b8
134# define CIK_CURSOR_UPDATE_PENDING (1 << 0)
135# define CIK_CURSOR_UPDATE_TAKEN (1 << 1)
136# define CIK_CURSOR_UPDATE_LOCK (1 << 16)
137# define CIK_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
138
139#define CIK_ALPHA_CONTROL 0x6af0
140# define CIK_CURSOR_ALPHA_BLND_ENA (1 << 1)
141
142#define CIK_LB_DATA_FORMAT 0x6b00
143# define CIK_INTERLEAVE_EN (1 << 3)
144
145#define CIK_LB_DESKTOP_HEIGHT 0x6b0c
146
147#endif
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
new file mode 100644
index 000000000000..63514b95889a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -0,0 +1,1297 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef CIK_H
25#define CIK_H
26
27#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
28
29#define CIK_RB_BITMAP_WIDTH_PER_SH 2
30
31/* SMC IND registers */
32#define GENERAL_PWRMGT 0xC0200000
33# define GPU_COUNTER_CLK (1 << 15)
34
35#define CG_CLKPIN_CNTL 0xC05001A0
36# define XTALIN_DIVIDE (1 << 1)
37
38#define PCIE_INDEX 0x38
39#define PCIE_DATA 0x3C
40
41#define VGA_HDP_CONTROL 0x328
42#define VGA_MEMORY_DISABLE (1 << 4)
43
44#define DMIF_ADDR_CALC 0xC00
45
46#define SRBM_GFX_CNTL 0xE44
47#define PIPEID(x) ((x) << 0)
48#define MEID(x) ((x) << 2)
49#define VMID(x) ((x) << 4)
50#define QUEUEID(x) ((x) << 8)
51
52#define SRBM_STATUS2 0xE4C
53#define SDMA_BUSY (1 << 5)
54#define SDMA1_BUSY (1 << 6)
55#define SRBM_STATUS 0xE50
56#define UVD_RQ_PENDING (1 << 1)
57#define GRBM_RQ_PENDING (1 << 5)
58#define VMC_BUSY (1 << 8)
59#define MCB_BUSY (1 << 9)
60#define MCB_NON_DISPLAY_BUSY (1 << 10)
61#define MCC_BUSY (1 << 11)
62#define MCD_BUSY (1 << 12)
63#define SEM_BUSY (1 << 14)
64#define IH_BUSY (1 << 17)
65#define UVD_BUSY (1 << 19)
66
67#define SRBM_SOFT_RESET 0xE60
68#define SOFT_RESET_BIF (1 << 1)
69#define SOFT_RESET_R0PLL (1 << 4)
70#define SOFT_RESET_DC (1 << 5)
71#define SOFT_RESET_SDMA1 (1 << 6)
72#define SOFT_RESET_GRBM (1 << 8)
73#define SOFT_RESET_HDP (1 << 9)
74#define SOFT_RESET_IH (1 << 10)
75#define SOFT_RESET_MC (1 << 11)
76#define SOFT_RESET_ROM (1 << 14)
77#define SOFT_RESET_SEM (1 << 15)
78#define SOFT_RESET_VMC (1 << 17)
79#define SOFT_RESET_SDMA (1 << 20)
80#define SOFT_RESET_TST (1 << 21)
81#define SOFT_RESET_REGBB (1 << 22)
82#define SOFT_RESET_ORB (1 << 23)
83#define SOFT_RESET_VCE (1 << 24)
84
85#define VM_L2_CNTL 0x1400
86#define ENABLE_L2_CACHE (1 << 0)
87#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
88#define L2_CACHE_PTE_ENDIAN_SWAP_MODE(x) ((x) << 2)
89#define L2_CACHE_PDE_ENDIAN_SWAP_MODE(x) ((x) << 4)
90#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
91#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
92#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 15)
93#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 19)
94#define VM_L2_CNTL2 0x1404
95#define INVALIDATE_ALL_L1_TLBS (1 << 0)
96#define INVALIDATE_L2_CACHE (1 << 1)
97#define INVALIDATE_CACHE_MODE(x) ((x) << 26)
98#define INVALIDATE_PTE_AND_PDE_CACHES 0
99#define INVALIDATE_ONLY_PTE_CACHES 1
100#define INVALIDATE_ONLY_PDE_CACHES 2
101#define VM_L2_CNTL3 0x1408
102#define BANK_SELECT(x) ((x) << 0)
103#define L2_CACHE_UPDATE_MODE(x) ((x) << 6)
104#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
105#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
106#define VM_L2_STATUS 0x140C
107#define L2_BUSY (1 << 0)
108#define VM_CONTEXT0_CNTL 0x1410
109#define ENABLE_CONTEXT (1 << 0)
110#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
111#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
112#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
113#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
114#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
115#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
116#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
117#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
118#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
119#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
120#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
121#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
122#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
123#define VM_CONTEXT1_CNTL 0x1414
124#define VM_CONTEXT0_CNTL2 0x1430
125#define VM_CONTEXT1_CNTL2 0x1434
126#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x1438
127#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x143c
128#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x1440
129#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x1444
130#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x1448
131#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x144c
132#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450
133#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454
134
135#define VM_INVALIDATE_REQUEST 0x1478
136#define VM_INVALIDATE_RESPONSE 0x147c
137
138#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
139
140#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
141
142#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
143#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c
144
145#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153c
146#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x1540
147#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x1544
148#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x1548
149#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x154c
150#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x1550
151#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x1554
152#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x1558
153#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155c
154#define VM_CONTEXT1_PAGE_TABLE_START_ADDR 0x1560
155
156#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
157#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580
158
159#define MC_SHARED_CHMAP 0x2004
160#define NOOFCHAN_SHIFT 12
161#define NOOFCHAN_MASK 0x0000f000
162#define MC_SHARED_CHREMAP 0x2008
163
164#define CHUB_CONTROL 0x1864
165#define BYPASS_VM (1 << 0)
166
167#define MC_VM_FB_LOCATION 0x2024
168#define MC_VM_AGP_TOP 0x2028
169#define MC_VM_AGP_BOT 0x202C
170#define MC_VM_AGP_BASE 0x2030
171#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
172#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
173#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
174
175#define MC_VM_MX_L1_TLB_CNTL 0x2064
176#define ENABLE_L1_TLB (1 << 0)
177#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
178#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
179#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
180#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
181#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
182#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
183#define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6)
184#define MC_VM_FB_OFFSET 0x2068
185
186#define MC_SHARED_BLACKOUT_CNTL 0x20ac
187
188#define MC_ARB_RAMCFG 0x2760
189#define NOOFBANK_SHIFT 0
190#define NOOFBANK_MASK 0x00000003
191#define NOOFRANK_SHIFT 2
192#define NOOFRANK_MASK 0x00000004
193#define NOOFROWS_SHIFT 3
194#define NOOFROWS_MASK 0x00000038
195#define NOOFCOLS_SHIFT 6
196#define NOOFCOLS_MASK 0x000000C0
197#define CHANSIZE_SHIFT 8
198#define CHANSIZE_MASK 0x00000100
199#define NOOFGROUPS_SHIFT 12
200#define NOOFGROUPS_MASK 0x00001000
201
202#define MC_SEQ_SUP_CNTL 0x28c8
203#define RUN_MASK (1 << 0)
204#define MC_SEQ_SUP_PGM 0x28cc
205
206#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
207#define TRAIN_DONE_D0 (1 << 30)
208#define TRAIN_DONE_D1 (1 << 31)
209
210#define MC_IO_PAD_CNTL_D0 0x29d0
211#define MEM_FALL_OUT_CMD (1 << 8)
212
213#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
214#define MC_SEQ_IO_DEBUG_DATA 0x2a48
215
216#define HDP_HOST_PATH_CNTL 0x2C00
217#define HDP_NONSURFACE_BASE 0x2C04
218#define HDP_NONSURFACE_INFO 0x2C08
219#define HDP_NONSURFACE_SIZE 0x2C0C
220
221#define HDP_ADDR_CONFIG 0x2F48
222#define HDP_MISC_CNTL 0x2F4C
223#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
224
225#define IH_RB_CNTL 0x3e00
226# define IH_RB_ENABLE (1 << 0)
227# define IH_RB_SIZE(x) ((x) << 1) /* log2 */
228# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
229# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
230# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
231# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
232# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
233#define IH_RB_BASE 0x3e04
234#define IH_RB_RPTR 0x3e08
235#define IH_RB_WPTR 0x3e0c
236# define RB_OVERFLOW (1 << 0)
237# define WPTR_OFFSET_MASK 0x3fffc
238#define IH_RB_WPTR_ADDR_HI 0x3e10
239#define IH_RB_WPTR_ADDR_LO 0x3e14
240#define IH_CNTL 0x3e18
241# define ENABLE_INTR (1 << 0)
242# define IH_MC_SWAP(x) ((x) << 1)
243# define IH_MC_SWAP_NONE 0
244# define IH_MC_SWAP_16BIT 1
245# define IH_MC_SWAP_32BIT 2
246# define IH_MC_SWAP_64BIT 3
247# define RPTR_REARM (1 << 4)
248# define MC_WRREQ_CREDIT(x) ((x) << 15)
249# define MC_WR_CLEAN_CNT(x) ((x) << 20)
250# define MC_VMID(x) ((x) << 25)
251
252#define CONFIG_MEMSIZE 0x5428
253
254#define INTERRUPT_CNTL 0x5468
255# define IH_DUMMY_RD_OVERRIDE (1 << 0)
256# define IH_DUMMY_RD_EN (1 << 1)
257# define IH_REQ_NONSNOOP_EN (1 << 3)
258# define GEN_IH_INT_EN (1 << 8)
259#define INTERRUPT_CNTL2 0x546c
260
261#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
262
263#define BIF_FB_EN 0x5490
264#define FB_READ_EN (1 << 0)
265#define FB_WRITE_EN (1 << 1)
266
267#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
268
269#define GPU_HDP_FLUSH_REQ 0x54DC
270#define GPU_HDP_FLUSH_DONE 0x54E0
271#define CP0 (1 << 0)
272#define CP1 (1 << 1)
273#define CP2 (1 << 2)
274#define CP3 (1 << 3)
275#define CP4 (1 << 4)
276#define CP5 (1 << 5)
277#define CP6 (1 << 6)
278#define CP7 (1 << 7)
279#define CP8 (1 << 8)
280#define CP9 (1 << 9)
281#define SDMA0 (1 << 10)
282#define SDMA1 (1 << 11)
283
284/* 0x6b04, 0x7704, 0x10304, 0x10f04, 0x11b04, 0x12704 */
285#define LB_MEMORY_CTRL 0x6b04
286#define LB_MEMORY_SIZE(x) ((x) << 0)
287#define LB_MEMORY_CONFIG(x) ((x) << 20)
288
289#define DPG_WATERMARK_MASK_CONTROL 0x6cc8
290# define LATENCY_WATERMARK_MASK(x) ((x) << 8)
291#define DPG_PIPE_LATENCY_CONTROL 0x6ccc
292# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
293# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
294
295/* 0x6b24, 0x7724, 0x10324, 0x10f24, 0x11b24, 0x12724 */
296#define LB_VLINE_STATUS 0x6b24
297# define VLINE_OCCURRED (1 << 0)
298# define VLINE_ACK (1 << 4)
299# define VLINE_STAT (1 << 12)
300# define VLINE_INTERRUPT (1 << 16)
301# define VLINE_INTERRUPT_TYPE (1 << 17)
302/* 0x6b2c, 0x772c, 0x1032c, 0x10f2c, 0x11b2c, 0x1272c */
303#define LB_VBLANK_STATUS 0x6b2c
304# define VBLANK_OCCURRED (1 << 0)
305# define VBLANK_ACK (1 << 4)
306# define VBLANK_STAT (1 << 12)
307# define VBLANK_INTERRUPT (1 << 16)
308# define VBLANK_INTERRUPT_TYPE (1 << 17)
309
310/* 0x6b20, 0x7720, 0x10320, 0x10f20, 0x11b20, 0x12720 */
311#define LB_INTERRUPT_MASK 0x6b20
312# define VBLANK_INTERRUPT_MASK (1 << 0)
313# define VLINE_INTERRUPT_MASK (1 << 4)
314# define VLINE2_INTERRUPT_MASK (1 << 8)
315
316#define DISP_INTERRUPT_STATUS 0x60f4
317# define LB_D1_VLINE_INTERRUPT (1 << 2)
318# define LB_D1_VBLANK_INTERRUPT (1 << 3)
319# define DC_HPD1_INTERRUPT (1 << 17)
320# define DC_HPD1_RX_INTERRUPT (1 << 18)
321# define DACA_AUTODETECT_INTERRUPT (1 << 22)
322# define DACB_AUTODETECT_INTERRUPT (1 << 23)
323# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
324# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
325#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
326# define LB_D2_VLINE_INTERRUPT (1 << 2)
327# define LB_D2_VBLANK_INTERRUPT (1 << 3)
328# define DC_HPD2_INTERRUPT (1 << 17)
329# define DC_HPD2_RX_INTERRUPT (1 << 18)
330# define DISP_TIMER_INTERRUPT (1 << 24)
331#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
332# define LB_D3_VLINE_INTERRUPT (1 << 2)
333# define LB_D3_VBLANK_INTERRUPT (1 << 3)
334# define DC_HPD3_INTERRUPT (1 << 17)
335# define DC_HPD3_RX_INTERRUPT (1 << 18)
336#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
337# define LB_D4_VLINE_INTERRUPT (1 << 2)
338# define LB_D4_VBLANK_INTERRUPT (1 << 3)
339# define DC_HPD4_INTERRUPT (1 << 17)
340# define DC_HPD4_RX_INTERRUPT (1 << 18)
341#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
342# define LB_D5_VLINE_INTERRUPT (1 << 2)
343# define LB_D5_VBLANK_INTERRUPT (1 << 3)
344# define DC_HPD5_INTERRUPT (1 << 17)
345# define DC_HPD5_RX_INTERRUPT (1 << 18)
346#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6150
347# define LB_D6_VLINE_INTERRUPT (1 << 2)
348# define LB_D6_VBLANK_INTERRUPT (1 << 3)
349# define DC_HPD6_INTERRUPT (1 << 17)
350# define DC_HPD6_RX_INTERRUPT (1 << 18)
351#define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780
352
353#define DAC_AUTODETECT_INT_CONTROL 0x67c8
354
355#define DC_HPD1_INT_STATUS 0x601c
356#define DC_HPD2_INT_STATUS 0x6028
357#define DC_HPD3_INT_STATUS 0x6034
358#define DC_HPD4_INT_STATUS 0x6040
359#define DC_HPD5_INT_STATUS 0x604c
360#define DC_HPD6_INT_STATUS 0x6058
361# define DC_HPDx_INT_STATUS (1 << 0)
362# define DC_HPDx_SENSE (1 << 1)
363# define DC_HPDx_SENSE_DELAYED (1 << 4)
364# define DC_HPDx_RX_INT_STATUS (1 << 8)
365
366#define DC_HPD1_INT_CONTROL 0x6020
367#define DC_HPD2_INT_CONTROL 0x602c
368#define DC_HPD3_INT_CONTROL 0x6038
369#define DC_HPD4_INT_CONTROL 0x6044
370#define DC_HPD5_INT_CONTROL 0x6050
371#define DC_HPD6_INT_CONTROL 0x605c
372# define DC_HPDx_INT_ACK (1 << 0)
373# define DC_HPDx_INT_POLARITY (1 << 8)
374# define DC_HPDx_INT_EN (1 << 16)
375# define DC_HPDx_RX_INT_ACK (1 << 20)
376# define DC_HPDx_RX_INT_EN (1 << 24)
377
378#define DC_HPD1_CONTROL 0x6024
379#define DC_HPD2_CONTROL 0x6030
380#define DC_HPD3_CONTROL 0x603c
381#define DC_HPD4_CONTROL 0x6048
382#define DC_HPD5_CONTROL 0x6054
383#define DC_HPD6_CONTROL 0x6060
384# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
385# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
386# define DC_HPDx_EN (1 << 28)
387
388#define GRBM_CNTL 0x8000
389#define GRBM_READ_TIMEOUT(x) ((x) << 0)
390
391#define GRBM_STATUS2 0x8008
392#define ME0PIPE1_CMDFIFO_AVAIL_MASK 0x0000000F
393#define ME0PIPE1_CF_RQ_PENDING (1 << 4)
394#define ME0PIPE1_PF_RQ_PENDING (1 << 5)
395#define ME1PIPE0_RQ_PENDING (1 << 6)
396#define ME1PIPE1_RQ_PENDING (1 << 7)
397#define ME1PIPE2_RQ_PENDING (1 << 8)
398#define ME1PIPE3_RQ_PENDING (1 << 9)
399#define ME2PIPE0_RQ_PENDING (1 << 10)
400#define ME2PIPE1_RQ_PENDING (1 << 11)
401#define ME2PIPE2_RQ_PENDING (1 << 12)
402#define ME2PIPE3_RQ_PENDING (1 << 13)
403#define RLC_RQ_PENDING (1 << 14)
404#define RLC_BUSY (1 << 24)
405#define TC_BUSY (1 << 25)
406#define CPF_BUSY (1 << 28)
407#define CPC_BUSY (1 << 29)
408#define CPG_BUSY (1 << 30)
409
410#define GRBM_STATUS 0x8010
411#define ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000F
412#define SRBM_RQ_PENDING (1 << 5)
413#define ME0PIPE0_CF_RQ_PENDING (1 << 7)
414#define ME0PIPE0_PF_RQ_PENDING (1 << 8)
415#define GDS_DMA_RQ_PENDING (1 << 9)
416#define DB_CLEAN (1 << 12)
417#define CB_CLEAN (1 << 13)
418#define TA_BUSY (1 << 14)
419#define GDS_BUSY (1 << 15)
420#define WD_BUSY_NO_DMA (1 << 16)
421#define VGT_BUSY (1 << 17)
422#define IA_BUSY_NO_DMA (1 << 18)
423#define IA_BUSY (1 << 19)
424#define SX_BUSY (1 << 20)
425#define WD_BUSY (1 << 21)
426#define SPI_BUSY (1 << 22)
427#define BCI_BUSY (1 << 23)
428#define SC_BUSY (1 << 24)
429#define PA_BUSY (1 << 25)
430#define DB_BUSY (1 << 26)
431#define CP_COHERENCY_BUSY (1 << 28)
432#define CP_BUSY (1 << 29)
433#define CB_BUSY (1 << 30)
434#define GUI_ACTIVE (1 << 31)
435#define GRBM_STATUS_SE0 0x8014
436#define GRBM_STATUS_SE1 0x8018
437#define GRBM_STATUS_SE2 0x8038
438#define GRBM_STATUS_SE3 0x803C
439#define SE_DB_CLEAN (1 << 1)
440#define SE_CB_CLEAN (1 << 2)
441#define SE_BCI_BUSY (1 << 22)
442#define SE_VGT_BUSY (1 << 23)
443#define SE_PA_BUSY (1 << 24)
444#define SE_TA_BUSY (1 << 25)
445#define SE_SX_BUSY (1 << 26)
446#define SE_SPI_BUSY (1 << 27)
447#define SE_SC_BUSY (1 << 29)
448#define SE_DB_BUSY (1 << 30)
449#define SE_CB_BUSY (1 << 31)
450
451#define GRBM_SOFT_RESET 0x8020
452#define SOFT_RESET_CP (1 << 0) /* All CP blocks */
453#define SOFT_RESET_RLC (1 << 2) /* RLC */
454#define SOFT_RESET_GFX (1 << 16) /* GFX */
455#define SOFT_RESET_CPF (1 << 17) /* CP fetcher shared by gfx and compute */
456#define SOFT_RESET_CPC (1 << 18) /* CP Compute (MEC1/2) */
457#define SOFT_RESET_CPG (1 << 19) /* CP GFX (PFP, ME, CE) */
458
459#define GRBM_INT_CNTL 0x8060
460# define RDERR_INT_ENABLE (1 << 0)
461# define GUI_IDLE_INT_ENABLE (1 << 19)
462
463#define CP_CPC_STATUS 0x8210
464#define CP_CPC_BUSY_STAT 0x8214
465#define CP_CPC_STALLED_STAT1 0x8218
466#define CP_CPF_STATUS 0x821c
467#define CP_CPF_BUSY_STAT 0x8220
468#define CP_CPF_STALLED_STAT1 0x8224
469
470#define CP_MEC_CNTL 0x8234
471#define MEC_ME2_HALT (1 << 28)
472#define MEC_ME1_HALT (1 << 30)
473
474#define CP_MEC_CNTL 0x8234
475#define MEC_ME2_HALT (1 << 28)
476#define MEC_ME1_HALT (1 << 30)
477
478#define CP_STALLED_STAT3 0x8670
479#define CP_STALLED_STAT1 0x8674
480#define CP_STALLED_STAT2 0x8678
481
482#define CP_STAT 0x8680
483
484#define CP_ME_CNTL 0x86D8
485#define CP_CE_HALT (1 << 24)
486#define CP_PFP_HALT (1 << 26)
487#define CP_ME_HALT (1 << 28)
488
489#define CP_RB0_RPTR 0x8700
490#define CP_RB_WPTR_DELAY 0x8704
491
492#define CP_MEQ_THRESHOLDS 0x8764
493#define MEQ1_START(x) ((x) << 0)
494#define MEQ2_START(x) ((x) << 8)
495
496#define VGT_VTX_VECT_EJECT_REG 0x88B0
497
498#define VGT_CACHE_INVALIDATION 0x88C4
499#define CACHE_INVALIDATION(x) ((x) << 0)
500#define VC_ONLY 0
501#define TC_ONLY 1
502#define VC_AND_TC 2
503#define AUTO_INVLD_EN(x) ((x) << 6)
504#define NO_AUTO 0
505#define ES_AUTO 1
506#define GS_AUTO 2
507#define ES_AND_GS_AUTO 3
508
509#define VGT_GS_VERTEX_REUSE 0x88D4
510
511#define CC_GC_SHADER_ARRAY_CONFIG 0x89bc
512#define INACTIVE_CUS_MASK 0xFFFF0000
513#define INACTIVE_CUS_SHIFT 16
514#define GC_USER_SHADER_ARRAY_CONFIG 0x89c0
515
516#define PA_CL_ENHANCE 0x8A14
517#define CLIP_VTX_REORDER_ENA (1 << 0)
518#define NUM_CLIP_SEQ(x) ((x) << 1)
519
520#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
521#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
522#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
523
524#define PA_SC_FIFO_SIZE 0x8BCC
525#define SC_FRONTEND_PRIM_FIFO_SIZE(x) ((x) << 0)
526#define SC_BACKEND_PRIM_FIFO_SIZE(x) ((x) << 6)
527#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 15)
528#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 23)
529
530#define PA_SC_ENHANCE 0x8BF0
531#define ENABLE_PA_SC_OUT_OF_ORDER (1 << 0)
532#define DISABLE_PA_SC_GUIDANCE (1 << 13)
533
534#define SQ_CONFIG 0x8C00
535
536#define SH_MEM_BASES 0x8C28
537/* if PTR32, these are the bases for scratch and lds */
538#define PRIVATE_BASE(x) ((x) << 0) /* scratch */
539#define SHARED_BASE(x) ((x) << 16) /* LDS */
540#define SH_MEM_APE1_BASE 0x8C2C
541/* if PTR32, this is the base location of GPUVM */
542#define SH_MEM_APE1_LIMIT 0x8C30
543/* if PTR32, this is the upper limit of GPUVM */
544#define SH_MEM_CONFIG 0x8C34
545#define PTR32 (1 << 0)
546#define ALIGNMENT_MODE(x) ((x) << 2)
547#define SH_MEM_ALIGNMENT_MODE_DWORD 0
548#define SH_MEM_ALIGNMENT_MODE_DWORD_STRICT 1
549#define SH_MEM_ALIGNMENT_MODE_STRICT 2
550#define SH_MEM_ALIGNMENT_MODE_UNALIGNED 3
551#define DEFAULT_MTYPE(x) ((x) << 4)
552#define APE1_MTYPE(x) ((x) << 7)
553
554#define SX_DEBUG_1 0x9060
555
556#define SPI_CONFIG_CNTL 0x9100
557
558#define SPI_CONFIG_CNTL_1 0x913C
559#define VTX_DONE_DELAY(x) ((x) << 0)
560#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
561
562#define TA_CNTL_AUX 0x9508
563
564#define DB_DEBUG 0x9830
565#define DB_DEBUG2 0x9834
566#define DB_DEBUG3 0x9838
567
568#define CC_RB_BACKEND_DISABLE 0x98F4
569#define BACKEND_DISABLE(x) ((x) << 16)
570#define GB_ADDR_CONFIG 0x98F8
571#define NUM_PIPES(x) ((x) << 0)
572#define NUM_PIPES_MASK 0x00000007
573#define NUM_PIPES_SHIFT 0
574#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
575#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
576#define PIPE_INTERLEAVE_SIZE_SHIFT 4
577#define NUM_SHADER_ENGINES(x) ((x) << 12)
578#define NUM_SHADER_ENGINES_MASK 0x00003000
579#define NUM_SHADER_ENGINES_SHIFT 12
580#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
581#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
582#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
583#define ROW_SIZE(x) ((x) << 28)
584#define ROW_SIZE_MASK 0x30000000
585#define ROW_SIZE_SHIFT 28
586
587#define GB_TILE_MODE0 0x9910
588# define ARRAY_MODE(x) ((x) << 2)
589# define ARRAY_LINEAR_GENERAL 0
590# define ARRAY_LINEAR_ALIGNED 1
591# define ARRAY_1D_TILED_THIN1 2
592# define ARRAY_2D_TILED_THIN1 4
593# define ARRAY_PRT_TILED_THIN1 5
594# define ARRAY_PRT_2D_TILED_THIN1 6
595# define PIPE_CONFIG(x) ((x) << 6)
596# define ADDR_SURF_P2 0
597# define ADDR_SURF_P4_8x16 4
598# define ADDR_SURF_P4_16x16 5
599# define ADDR_SURF_P4_16x32 6
600# define ADDR_SURF_P4_32x32 7
601# define ADDR_SURF_P8_16x16_8x16 8
602# define ADDR_SURF_P8_16x32_8x16 9
603# define ADDR_SURF_P8_32x32_8x16 10
604# define ADDR_SURF_P8_16x32_16x16 11
605# define ADDR_SURF_P8_32x32_16x16 12
606# define ADDR_SURF_P8_32x32_16x32 13
607# define ADDR_SURF_P8_32x64_32x32 14
608# define TILE_SPLIT(x) ((x) << 11)
609# define ADDR_SURF_TILE_SPLIT_64B 0
610# define ADDR_SURF_TILE_SPLIT_128B 1
611# define ADDR_SURF_TILE_SPLIT_256B 2
612# define ADDR_SURF_TILE_SPLIT_512B 3
613# define ADDR_SURF_TILE_SPLIT_1KB 4
614# define ADDR_SURF_TILE_SPLIT_2KB 5
615# define ADDR_SURF_TILE_SPLIT_4KB 6
616# define MICRO_TILE_MODE_NEW(x) ((x) << 22)
617# define ADDR_SURF_DISPLAY_MICRO_TILING 0
618# define ADDR_SURF_THIN_MICRO_TILING 1
619# define ADDR_SURF_DEPTH_MICRO_TILING 2
620# define ADDR_SURF_ROTATED_MICRO_TILING 3
621# define SAMPLE_SPLIT(x) ((x) << 25)
622# define ADDR_SURF_SAMPLE_SPLIT_1 0
623# define ADDR_SURF_SAMPLE_SPLIT_2 1
624# define ADDR_SURF_SAMPLE_SPLIT_4 2
625# define ADDR_SURF_SAMPLE_SPLIT_8 3
626
627#define GB_MACROTILE_MODE0 0x9990
628# define BANK_WIDTH(x) ((x) << 0)
629# define ADDR_SURF_BANK_WIDTH_1 0
630# define ADDR_SURF_BANK_WIDTH_2 1
631# define ADDR_SURF_BANK_WIDTH_4 2
632# define ADDR_SURF_BANK_WIDTH_8 3
633# define BANK_HEIGHT(x) ((x) << 2)
634# define ADDR_SURF_BANK_HEIGHT_1 0
635# define ADDR_SURF_BANK_HEIGHT_2 1
636# define ADDR_SURF_BANK_HEIGHT_4 2
637# define ADDR_SURF_BANK_HEIGHT_8 3
638# define MACRO_TILE_ASPECT(x) ((x) << 4)
639# define ADDR_SURF_MACRO_ASPECT_1 0
640# define ADDR_SURF_MACRO_ASPECT_2 1
641# define ADDR_SURF_MACRO_ASPECT_4 2
642# define ADDR_SURF_MACRO_ASPECT_8 3
643# define NUM_BANKS(x) ((x) << 6)
644# define ADDR_SURF_2_BANK 0
645# define ADDR_SURF_4_BANK 1
646# define ADDR_SURF_8_BANK 2
647# define ADDR_SURF_16_BANK 3
648
649#define CB_HW_CONTROL 0x9A10
650
651#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
652#define BACKEND_DISABLE_MASK 0x00FF0000
653#define BACKEND_DISABLE_SHIFT 16
654
655#define TCP_CHAN_STEER_LO 0xac0c
656#define TCP_CHAN_STEER_HI 0xac10
657
658#define TC_CFG_L1_LOAD_POLICY0 0xAC68
659#define TC_CFG_L1_LOAD_POLICY1 0xAC6C
660#define TC_CFG_L1_STORE_POLICY 0xAC70
661#define TC_CFG_L2_LOAD_POLICY0 0xAC74
662#define TC_CFG_L2_LOAD_POLICY1 0xAC78
663#define TC_CFG_L2_STORE_POLICY0 0xAC7C
664#define TC_CFG_L2_STORE_POLICY1 0xAC80
665#define TC_CFG_L2_ATOMIC_POLICY 0xAC84
666#define TC_CFG_L1_VOLATILE 0xAC88
667#define TC_CFG_L2_VOLATILE 0xAC8C
668
669#define CP_RB0_BASE 0xC100
670#define CP_RB0_CNTL 0xC104
671#define RB_BUFSZ(x) ((x) << 0)
672#define RB_BLKSZ(x) ((x) << 8)
673#define BUF_SWAP_32BIT (2 << 16)
674#define RB_NO_UPDATE (1 << 27)
675#define RB_RPTR_WR_ENA (1 << 31)
676
677#define CP_RB0_RPTR_ADDR 0xC10C
678#define RB_RPTR_SWAP_32BIT (2 << 0)
679#define CP_RB0_RPTR_ADDR_HI 0xC110
680#define CP_RB0_WPTR 0xC114
681
682#define CP_DEVICE_ID 0xC12C
683#define CP_ENDIAN_SWAP 0xC140
684#define CP_RB_VMID 0xC144
685
686#define CP_PFP_UCODE_ADDR 0xC150
687#define CP_PFP_UCODE_DATA 0xC154
688#define CP_ME_RAM_RADDR 0xC158
689#define CP_ME_RAM_WADDR 0xC15C
690#define CP_ME_RAM_DATA 0xC160
691
692#define CP_CE_UCODE_ADDR 0xC168
693#define CP_CE_UCODE_DATA 0xC16C
694#define CP_MEC_ME1_UCODE_ADDR 0xC170
695#define CP_MEC_ME1_UCODE_DATA 0xC174
696#define CP_MEC_ME2_UCODE_ADDR 0xC178
697#define CP_MEC_ME2_UCODE_DATA 0xC17C
698
699#define CP_INT_CNTL_RING0 0xC1A8
700# define CNTX_BUSY_INT_ENABLE (1 << 19)
701# define CNTX_EMPTY_INT_ENABLE (1 << 20)
702# define PRIV_INSTR_INT_ENABLE (1 << 22)
703# define PRIV_REG_INT_ENABLE (1 << 23)
704# define TIME_STAMP_INT_ENABLE (1 << 26)
705# define CP_RINGID2_INT_ENABLE (1 << 29)
706# define CP_RINGID1_INT_ENABLE (1 << 30)
707# define CP_RINGID0_INT_ENABLE (1 << 31)
708
709#define CP_INT_STATUS_RING0 0xC1B4
710# define PRIV_INSTR_INT_STAT (1 << 22)
711# define PRIV_REG_INT_STAT (1 << 23)
712# define TIME_STAMP_INT_STAT (1 << 26)
713# define CP_RINGID2_INT_STAT (1 << 29)
714# define CP_RINGID1_INT_STAT (1 << 30)
715# define CP_RINGID0_INT_STAT (1 << 31)
716
717#define CP_CPF_DEBUG 0xC200
718
719#define CP_PQ_WPTR_POLL_CNTL 0xC20C
720#define WPTR_POLL_EN (1 << 31)
721
722#define CP_ME1_PIPE0_INT_CNTL 0xC214
723#define CP_ME1_PIPE1_INT_CNTL 0xC218
724#define CP_ME1_PIPE2_INT_CNTL 0xC21C
725#define CP_ME1_PIPE3_INT_CNTL 0xC220
726#define CP_ME2_PIPE0_INT_CNTL 0xC224
727#define CP_ME2_PIPE1_INT_CNTL 0xC228
728#define CP_ME2_PIPE2_INT_CNTL 0xC22C
729#define CP_ME2_PIPE3_INT_CNTL 0xC230
730# define DEQUEUE_REQUEST_INT_ENABLE (1 << 13)
731# define WRM_POLL_TIMEOUT_INT_ENABLE (1 << 17)
732# define PRIV_REG_INT_ENABLE (1 << 23)
733# define TIME_STAMP_INT_ENABLE (1 << 26)
734# define GENERIC2_INT_ENABLE (1 << 29)
735# define GENERIC1_INT_ENABLE (1 << 30)
736# define GENERIC0_INT_ENABLE (1 << 31)
737#define CP_ME1_PIPE0_INT_STATUS 0xC214
738#define CP_ME1_PIPE1_INT_STATUS 0xC218
739#define CP_ME1_PIPE2_INT_STATUS 0xC21C
740#define CP_ME1_PIPE3_INT_STATUS 0xC220
741#define CP_ME2_PIPE0_INT_STATUS 0xC224
742#define CP_ME2_PIPE1_INT_STATUS 0xC228
743#define CP_ME2_PIPE2_INT_STATUS 0xC22C
744#define CP_ME2_PIPE3_INT_STATUS 0xC230
745# define DEQUEUE_REQUEST_INT_STATUS (1 << 13)
746# define WRM_POLL_TIMEOUT_INT_STATUS (1 << 17)
747# define PRIV_REG_INT_STATUS (1 << 23)
748# define TIME_STAMP_INT_STATUS (1 << 26)
749# define GENERIC2_INT_STATUS (1 << 29)
750# define GENERIC1_INT_STATUS (1 << 30)
751# define GENERIC0_INT_STATUS (1 << 31)
752
753#define CP_MAX_CONTEXT 0xC2B8
754
755#define CP_RB0_BASE_HI 0xC2C4
756
757#define RLC_CNTL 0xC300
758# define RLC_ENABLE (1 << 0)
759
760#define RLC_MC_CNTL 0xC30C
761
762#define RLC_LB_CNTR_MAX 0xC348
763
764#define RLC_LB_CNTL 0xC364
765
766#define RLC_LB_CNTR_INIT 0xC36C
767
768#define RLC_SAVE_AND_RESTORE_BASE 0xC374
769#define RLC_DRIVER_DMA_STATUS 0xC378
770
771#define RLC_GPM_UCODE_ADDR 0xC388
772#define RLC_GPM_UCODE_DATA 0xC38C
773#define RLC_GPU_CLOCK_COUNT_LSB 0xC390
774#define RLC_GPU_CLOCK_COUNT_MSB 0xC394
775#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC398
776#define RLC_UCODE_CNTL 0xC39C
777
778#define RLC_CGCG_CGLS_CTRL 0xC424
779
780#define RLC_LB_INIT_CU_MASK 0xC43C
781
782#define RLC_LB_PARAMS 0xC444
783
784#define RLC_SERDES_CU_MASTER_BUSY 0xC484
785#define RLC_SERDES_NONCU_MASTER_BUSY 0xC488
786# define SE_MASTER_BUSY_MASK 0x0000ffff
787# define GC_MASTER_BUSY (1 << 16)
788# define TC0_MASTER_BUSY (1 << 17)
789# define TC1_MASTER_BUSY (1 << 18)
790
791#define RLC_GPM_SCRATCH_ADDR 0xC4B0
792#define RLC_GPM_SCRATCH_DATA 0xC4B4
793
794#define CP_HPD_EOP_BASE_ADDR 0xC904
795#define CP_HPD_EOP_BASE_ADDR_HI 0xC908
796#define CP_HPD_EOP_VMID 0xC90C
797#define CP_HPD_EOP_CONTROL 0xC910
798#define EOP_SIZE(x) ((x) << 0)
799#define EOP_SIZE_MASK (0x3f << 0)
800#define CP_MQD_BASE_ADDR 0xC914
801#define CP_MQD_BASE_ADDR_HI 0xC918
802#define CP_HQD_ACTIVE 0xC91C
803#define CP_HQD_VMID 0xC920
804
805#define CP_HQD_PQ_BASE 0xC934
806#define CP_HQD_PQ_BASE_HI 0xC938
807#define CP_HQD_PQ_RPTR 0xC93C
808#define CP_HQD_PQ_RPTR_REPORT_ADDR 0xC940
809#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI 0xC944
810#define CP_HQD_PQ_WPTR_POLL_ADDR 0xC948
811#define CP_HQD_PQ_WPTR_POLL_ADDR_HI 0xC94C
812#define CP_HQD_PQ_DOORBELL_CONTROL 0xC950
813#define DOORBELL_OFFSET(x) ((x) << 2)
814#define DOORBELL_OFFSET_MASK (0x1fffff << 2)
815#define DOORBELL_SOURCE (1 << 28)
816#define DOORBELL_SCHD_HIT (1 << 29)
817#define DOORBELL_EN (1 << 30)
818#define DOORBELL_HIT (1 << 31)
819#define CP_HQD_PQ_WPTR 0xC954
820#define CP_HQD_PQ_CONTROL 0xC958
821#define QUEUE_SIZE(x) ((x) << 0)
822#define QUEUE_SIZE_MASK (0x3f << 0)
823#define RPTR_BLOCK_SIZE(x) ((x) << 8)
824#define RPTR_BLOCK_SIZE_MASK (0x3f << 8)
825#define PQ_VOLATILE (1 << 26)
826#define NO_UPDATE_RPTR (1 << 27)
827#define UNORD_DISPATCH (1 << 28)
828#define ROQ_PQ_IB_FLIP (1 << 29)
829#define PRIV_STATE (1 << 30)
830#define KMD_QUEUE (1 << 31)
831
832#define CP_HQD_DEQUEUE_REQUEST 0xC974
833
834#define CP_MQD_CONTROL 0xC99C
835#define MQD_VMID(x) ((x) << 0)
836#define MQD_VMID_MASK (0xf << 0)
837
838#define PA_SC_RASTER_CONFIG 0x28350
839# define RASTER_CONFIG_RB_MAP_0 0
840# define RASTER_CONFIG_RB_MAP_1 1
841# define RASTER_CONFIG_RB_MAP_2 2
842# define RASTER_CONFIG_RB_MAP_3 3
843
844#define VGT_EVENT_INITIATOR 0x28a90
845# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
846# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
847# define SAMPLE_STREAMOUTSTATS3 (3 << 0)
848# define CACHE_FLUSH_TS (4 << 0)
849# define CACHE_FLUSH (6 << 0)
850# define CS_PARTIAL_FLUSH (7 << 0)
851# define VGT_STREAMOUT_RESET (10 << 0)
852# define END_OF_PIPE_INCR_DE (11 << 0)
853# define END_OF_PIPE_IB_END (12 << 0)
854# define RST_PIX_CNT (13 << 0)
855# define VS_PARTIAL_FLUSH (15 << 0)
856# define PS_PARTIAL_FLUSH (16 << 0)
857# define CACHE_FLUSH_AND_INV_TS_EVENT (20 << 0)
858# define ZPASS_DONE (21 << 0)
859# define CACHE_FLUSH_AND_INV_EVENT (22 << 0)
860# define PERFCOUNTER_START (23 << 0)
861# define PERFCOUNTER_STOP (24 << 0)
862# define PIPELINESTAT_START (25 << 0)
863# define PIPELINESTAT_STOP (26 << 0)
864# define PERFCOUNTER_SAMPLE (27 << 0)
865# define SAMPLE_PIPELINESTAT (30 << 0)
866# define SO_VGT_STREAMOUT_FLUSH (31 << 0)
867# define SAMPLE_STREAMOUTSTATS (32 << 0)
868# define RESET_VTX_CNT (33 << 0)
869# define VGT_FLUSH (36 << 0)
870# define BOTTOM_OF_PIPE_TS (40 << 0)
871# define DB_CACHE_FLUSH_AND_INV (42 << 0)
872# define FLUSH_AND_INV_DB_DATA_TS (43 << 0)
873# define FLUSH_AND_INV_DB_META (44 << 0)
874# define FLUSH_AND_INV_CB_DATA_TS (45 << 0)
875# define FLUSH_AND_INV_CB_META (46 << 0)
876# define CS_DONE (47 << 0)
877# define PS_DONE (48 << 0)
878# define FLUSH_AND_INV_CB_PIXEL_DATA (49 << 0)
879# define THREAD_TRACE_START (51 << 0)
880# define THREAD_TRACE_STOP (52 << 0)
881# define THREAD_TRACE_FLUSH (54 << 0)
882# define THREAD_TRACE_FINISH (55 << 0)
883# define PIXEL_PIPE_STAT_CONTROL (56 << 0)
884# define PIXEL_PIPE_STAT_DUMP (57 << 0)
885# define PIXEL_PIPE_STAT_RESET (58 << 0)
886
887#define SCRATCH_REG0 0x30100
888#define SCRATCH_REG1 0x30104
889#define SCRATCH_REG2 0x30108
890#define SCRATCH_REG3 0x3010C
891#define SCRATCH_REG4 0x30110
892#define SCRATCH_REG5 0x30114
893#define SCRATCH_REG6 0x30118
894#define SCRATCH_REG7 0x3011C
895
896#define SCRATCH_UMSK 0x30140
897#define SCRATCH_ADDR 0x30144
898
899#define CP_SEM_WAIT_TIMER 0x301BC
900
901#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x301C8
902
903#define CP_WAIT_REG_MEM_TIMEOUT 0x301D0
904
905#define GRBM_GFX_INDEX 0x30800
906#define INSTANCE_INDEX(x) ((x) << 0)
907#define SH_INDEX(x) ((x) << 8)
908#define SE_INDEX(x) ((x) << 16)
909#define SH_BROADCAST_WRITES (1 << 29)
910#define INSTANCE_BROADCAST_WRITES (1 << 30)
911#define SE_BROADCAST_WRITES (1 << 31)
912
913#define VGT_ESGS_RING_SIZE 0x30900
914#define VGT_GSVS_RING_SIZE 0x30904
915#define VGT_PRIMITIVE_TYPE 0x30908
916#define VGT_INDEX_TYPE 0x3090C
917
918#define VGT_NUM_INDICES 0x30930
919#define VGT_NUM_INSTANCES 0x30934
920#define VGT_TF_RING_SIZE 0x30938
921#define VGT_HS_OFFCHIP_PARAM 0x3093C
922#define VGT_TF_MEMORY_BASE 0x30940
923
924#define PA_SU_LINE_STIPPLE_VALUE 0x30a00
925#define PA_SC_LINE_STIPPLE_STATE 0x30a04
926
927#define SQC_CACHES 0x30d20
928
929#define CP_PERFMON_CNTL 0x36020
930
931#define CGTS_TCC_DISABLE 0x3c00c
932#define CGTS_USER_TCC_DISABLE 0x3c010
933#define TCC_DISABLE_MASK 0xFFFF0000
934#define TCC_DISABLE_SHIFT 16
935
936#define CB_CGTT_SCLK_CTRL 0x3c2a0
937
938/*
939 * PM4
940 */
941#define PACKET_TYPE0 0
942#define PACKET_TYPE1 1
943#define PACKET_TYPE2 2
944#define PACKET_TYPE3 3
945
946#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
947#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
948#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
949#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
950#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
951 (((reg) >> 2) & 0xFFFF) | \
952 ((n) & 0x3FFF) << 16)
953#define CP_PACKET2 0x80000000
954#define PACKET2_PAD_SHIFT 0
955#define PACKET2_PAD_MASK (0x3fffffff << 0)
956
957#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
958
959#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
960 (((op) & 0xFF) << 8) | \
961 ((n) & 0x3FFF) << 16)
962
963#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
964
965/* Packet 3 types */
966#define PACKET3_NOP 0x10
967#define PACKET3_SET_BASE 0x11
968#define PACKET3_BASE_INDEX(x) ((x) << 0)
969#define CE_PARTITION_BASE 3
970#define PACKET3_CLEAR_STATE 0x12
971#define PACKET3_INDEX_BUFFER_SIZE 0x13
972#define PACKET3_DISPATCH_DIRECT 0x15
973#define PACKET3_DISPATCH_INDIRECT 0x16
974#define PACKET3_ATOMIC_GDS 0x1D
975#define PACKET3_ATOMIC_MEM 0x1E
976#define PACKET3_OCCLUSION_QUERY 0x1F
977#define PACKET3_SET_PREDICATION 0x20
978#define PACKET3_REG_RMW 0x21
979#define PACKET3_COND_EXEC 0x22
980#define PACKET3_PRED_EXEC 0x23
981#define PACKET3_DRAW_INDIRECT 0x24
982#define PACKET3_DRAW_INDEX_INDIRECT 0x25
983#define PACKET3_INDEX_BASE 0x26
984#define PACKET3_DRAW_INDEX_2 0x27
985#define PACKET3_CONTEXT_CONTROL 0x28
986#define PACKET3_INDEX_TYPE 0x2A
987#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
988#define PACKET3_DRAW_INDEX_AUTO 0x2D
989#define PACKET3_NUM_INSTANCES 0x2F
990#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
991#define PACKET3_INDIRECT_BUFFER_CONST 0x33
992#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
993#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
994#define PACKET3_DRAW_PREAMBLE 0x36
995#define PACKET3_WRITE_DATA 0x37
996#define WRITE_DATA_DST_SEL(x) ((x) << 8)
997 /* 0 - register
998 * 1 - memory (sync - via GRBM)
999 * 2 - gl2
1000 * 3 - gds
1001 * 4 - reserved
1002 * 5 - memory (async - direct)
1003 */
1004#define WR_ONE_ADDR (1 << 16)
1005#define WR_CONFIRM (1 << 20)
1006#define WRITE_DATA_CACHE_POLICY(x) ((x) << 25)
1007 /* 0 - LRU
1008 * 1 - Stream
1009 */
1010#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
1011 /* 0 - me
1012 * 1 - pfp
1013 * 2 - ce
1014 */
1015#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
1016#define PACKET3_MEM_SEMAPHORE 0x39
1017# define PACKET3_SEM_USE_MAILBOX (0x1 << 16)
1018# define PACKET3_SEM_SEL_SIGNAL_TYPE (0x1 << 20) /* 0 = increment, 1 = write 1 */
1019# define PACKET3_SEM_CLIENT_CODE ((x) << 24) /* 0 = CP, 1 = CB, 2 = DB */
1020# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
1021# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
1022#define PACKET3_COPY_DW 0x3B
1023#define PACKET3_WAIT_REG_MEM 0x3C
1024#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
1025 /* 0 - always
1026 * 1 - <
1027 * 2 - <=
1028 * 3 - ==
1029 * 4 - !=
1030 * 5 - >=
1031 * 6 - >
1032 */
1033#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
1034 /* 0 - reg
1035 * 1 - mem
1036 */
1037#define WAIT_REG_MEM_OPERATION(x) ((x) << 6)
1038 /* 0 - wait_reg_mem
1039 * 1 - wr_wait_wr_reg
1040 */
1041#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
1042 /* 0 - me
1043 * 1 - pfp
1044 */
1045#define PACKET3_INDIRECT_BUFFER 0x3F
1046#define INDIRECT_BUFFER_TCL2_VOLATILE (1 << 22)
1047#define INDIRECT_BUFFER_VALID (1 << 23)
1048#define INDIRECT_BUFFER_CACHE_POLICY(x) ((x) << 28)
1049 /* 0 - LRU
1050 * 1 - Stream
1051 * 2 - Bypass
1052 */
1053#define PACKET3_COPY_DATA 0x40
1054#define PACKET3_PFP_SYNC_ME 0x42
1055#define PACKET3_SURFACE_SYNC 0x43
1056# define PACKET3_DEST_BASE_0_ENA (1 << 0)
1057# define PACKET3_DEST_BASE_1_ENA (1 << 1)
1058# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
1059# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
1060# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
1061# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
1062# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
1063# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
1064# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
1065# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
1066# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
1067# define PACKET3_TCL1_VOL_ACTION_ENA (1 << 15)
1068# define PACKET3_TC_VOL_ACTION_ENA (1 << 16) /* L2 */
1069# define PACKET3_TC_WB_ACTION_ENA (1 << 18) /* L2 */
1070# define PACKET3_DEST_BASE_2_ENA (1 << 19)
1071# define PACKET3_DEST_BASE_3_ENA (1 << 21)
1072# define PACKET3_TCL1_ACTION_ENA (1 << 22)
1073# define PACKET3_TC_ACTION_ENA (1 << 23) /* L2 */
1074# define PACKET3_CB_ACTION_ENA (1 << 25)
1075# define PACKET3_DB_ACTION_ENA (1 << 26)
1076# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
1077# define PACKET3_SH_KCACHE_VOL_ACTION_ENA (1 << 28)
1078# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
1079#define PACKET3_COND_WRITE 0x45
1080#define PACKET3_EVENT_WRITE 0x46
1081#define EVENT_TYPE(x) ((x) << 0)
1082#define EVENT_INDEX(x) ((x) << 8)
1083 /* 0 - any non-TS event
1084 * 1 - ZPASS_DONE, PIXEL_PIPE_STAT_*
1085 * 2 - SAMPLE_PIPELINESTAT
1086 * 3 - SAMPLE_STREAMOUTSTAT*
1087 * 4 - *S_PARTIAL_FLUSH
1088 * 5 - EOP events
1089 * 6 - EOS events
1090 */
1091#define PACKET3_EVENT_WRITE_EOP 0x47
1092#define EOP_TCL1_VOL_ACTION_EN (1 << 12)
1093#define EOP_TC_VOL_ACTION_EN (1 << 13) /* L2 */
1094#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */
1095#define EOP_TCL1_ACTION_EN (1 << 16)
1096#define EOP_TC_ACTION_EN (1 << 17) /* L2 */
1097#define EOP_CACHE_POLICY(x) ((x) << 25)
1098 /* 0 - LRU
1099 * 1 - Stream
1100 * 2 - Bypass
1101 */
1102#define EOP_TCL2_VOLATILE (1 << 27)
1103#define DATA_SEL(x) ((x) << 29)
1104 /* 0 - discard
1105 * 1 - send low 32bit data
1106 * 2 - send 64bit data
1107 * 3 - send 64bit GPU counter value
1108 * 4 - send 64bit sys counter value
1109 */
1110#define INT_SEL(x) ((x) << 24)
1111 /* 0 - none
1112 * 1 - interrupt only (DATA_SEL = 0)
1113 * 2 - interrupt when data write is confirmed
1114 */
1115#define DST_SEL(x) ((x) << 16)
1116 /* 0 - MC
1117 * 1 - TC/L2
1118 */
1119#define PACKET3_EVENT_WRITE_EOS 0x48
1120#define PACKET3_RELEASE_MEM 0x49
1121#define PACKET3_PREAMBLE_CNTL 0x4A
1122# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
1123# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
1124#define PACKET3_DMA_DATA 0x50
1125#define PACKET3_AQUIRE_MEM 0x58
1126#define PACKET3_REWIND 0x59
1127#define PACKET3_LOAD_UCONFIG_REG 0x5E
1128#define PACKET3_LOAD_SH_REG 0x5F
1129#define PACKET3_LOAD_CONFIG_REG 0x60
1130#define PACKET3_LOAD_CONTEXT_REG 0x61
1131#define PACKET3_SET_CONFIG_REG 0x68
1132#define PACKET3_SET_CONFIG_REG_START 0x00008000
1133#define PACKET3_SET_CONFIG_REG_END 0x0000b000
1134#define PACKET3_SET_CONTEXT_REG 0x69
1135#define PACKET3_SET_CONTEXT_REG_START 0x00028000
1136#define PACKET3_SET_CONTEXT_REG_END 0x00029000
1137#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
1138#define PACKET3_SET_SH_REG 0x76
1139#define PACKET3_SET_SH_REG_START 0x0000b000
1140#define PACKET3_SET_SH_REG_END 0x0000c000
1141#define PACKET3_SET_SH_REG_OFFSET 0x77
1142#define PACKET3_SET_QUEUE_REG 0x78
1143#define PACKET3_SET_UCONFIG_REG 0x79
1144#define PACKET3_SET_UCONFIG_REG_START 0x00030000
1145#define PACKET3_SET_UCONFIG_REG_END 0x00031000
1146#define PACKET3_SCRATCH_RAM_WRITE 0x7D
1147#define PACKET3_SCRATCH_RAM_READ 0x7E
1148#define PACKET3_LOAD_CONST_RAM 0x80
1149#define PACKET3_WRITE_CONST_RAM 0x81
1150#define PACKET3_DUMP_CONST_RAM 0x83
1151#define PACKET3_INCREMENT_CE_COUNTER 0x84
1152#define PACKET3_INCREMENT_DE_COUNTER 0x85
1153#define PACKET3_WAIT_ON_CE_COUNTER 0x86
1154#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
1155#define PACKET3_SWITCH_BUFFER 0x8B
1156
1157/* SDMA - first instance at 0xd000, second at 0xd800 */
1158#define SDMA0_REGISTER_OFFSET 0x0 /* not a register */
1159#define SDMA1_REGISTER_OFFSET 0x800 /* not a register */
1160
1161#define SDMA0_UCODE_ADDR 0xD000
1162#define SDMA0_UCODE_DATA 0xD004
1163
1164#define SDMA0_CNTL 0xD010
1165# define TRAP_ENABLE (1 << 0)
1166# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
1167# define SEM_WAIT_INT_ENABLE (1 << 2)
1168# define DATA_SWAP_ENABLE (1 << 3)
1169# define FENCE_SWAP_ENABLE (1 << 4)
1170# define AUTO_CTXSW_ENABLE (1 << 18)
1171# define CTXEMPTY_INT_ENABLE (1 << 28)
1172
1173#define SDMA0_TILING_CONFIG 0xD018
1174
1175#define SDMA0_SEM_INCOMPLETE_TIMER_CNTL 0xD020
1176#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL 0xD024
1177
1178#define SDMA0_STATUS_REG 0xd034
1179# define SDMA_IDLE (1 << 0)
1180
1181#define SDMA0_ME_CNTL 0xD048
1182# define SDMA_HALT (1 << 0)
1183
1184#define SDMA0_GFX_RB_CNTL 0xD200
1185# define SDMA_RB_ENABLE (1 << 0)
1186# define SDMA_RB_SIZE(x) ((x) << 1) /* log2 */
1187# define SDMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
1188# define SDMA_RPTR_WRITEBACK_ENABLE (1 << 12)
1189# define SDMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
1190# define SDMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
1191#define SDMA0_GFX_RB_BASE 0xD204
1192#define SDMA0_GFX_RB_BASE_HI 0xD208
1193#define SDMA0_GFX_RB_RPTR 0xD20C
1194#define SDMA0_GFX_RB_WPTR 0xD210
1195
1196#define SDMA0_GFX_RB_RPTR_ADDR_HI 0xD220
1197#define SDMA0_GFX_RB_RPTR_ADDR_LO 0xD224
1198#define SDMA0_GFX_IB_CNTL 0xD228
1199# define SDMA_IB_ENABLE (1 << 0)
1200# define SDMA_IB_SWAP_ENABLE (1 << 4)
1201# define SDMA_SWITCH_INSIDE_IB (1 << 8)
1202# define SDMA_CMD_VMID(x) ((x) << 16)
1203
1204#define SDMA0_GFX_VIRTUAL_ADDR 0xD29C
1205#define SDMA0_GFX_APE1_CNTL 0xD2A0
1206
1207#define SDMA_PACKET(op, sub_op, e) ((((e) & 0xFFFF) << 16) | \
1208 (((sub_op) & 0xFF) << 8) | \
1209 (((op) & 0xFF) << 0))
1210/* sDMA opcodes */
1211#define SDMA_OPCODE_NOP 0
1212#define SDMA_OPCODE_COPY 1
1213# define SDMA_COPY_SUB_OPCODE_LINEAR 0
1214# define SDMA_COPY_SUB_OPCODE_TILED 1
1215# define SDMA_COPY_SUB_OPCODE_SOA 3
1216# define SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW 4
1217# define SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW 5
1218# define SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW 6
1219#define SDMA_OPCODE_WRITE 2
1220# define SDMA_WRITE_SUB_OPCODE_LINEAR 0
1221# define SDMA_WRTIE_SUB_OPCODE_TILED 1
1222#define SDMA_OPCODE_INDIRECT_BUFFER 4
1223#define SDMA_OPCODE_FENCE 5
1224#define SDMA_OPCODE_TRAP 6
1225#define SDMA_OPCODE_SEMAPHORE 7
1226# define SDMA_SEMAPHORE_EXTRA_O (1 << 13)
1227 /* 0 - increment
1228 * 1 - write 1
1229 */
1230# define SDMA_SEMAPHORE_EXTRA_S (1 << 14)
1231 /* 0 - wait
1232 * 1 - signal
1233 */
1234# define SDMA_SEMAPHORE_EXTRA_M (1 << 15)
1235 /* mailbox */
1236#define SDMA_OPCODE_POLL_REG_MEM 8
1237# define SDMA_POLL_REG_MEM_EXTRA_OP(x) ((x) << 10)
1238 /* 0 - wait_reg_mem
1239 * 1 - wr_wait_wr_reg
1240 */
1241# define SDMA_POLL_REG_MEM_EXTRA_FUNC(x) ((x) << 12)
1242 /* 0 - always
1243 * 1 - <
1244 * 2 - <=
1245 * 3 - ==
1246 * 4 - !=
1247 * 5 - >=
1248 * 6 - >
1249 */
1250# define SDMA_POLL_REG_MEM_EXTRA_M (1 << 15)
1251 /* 0 = register
1252 * 1 = memory
1253 */
1254#define SDMA_OPCODE_COND_EXEC 9
1255#define SDMA_OPCODE_CONSTANT_FILL 11
1256# define SDMA_CONSTANT_FILL_EXTRA_SIZE(x) ((x) << 14)
1257 /* 0 = byte fill
1258 * 2 = DW fill
1259 */
1260#define SDMA_OPCODE_GENERATE_PTE_PDE 12
1261#define SDMA_OPCODE_TIMESTAMP 13
1262# define SDMA_TIMESTAMP_SUB_OPCODE_SET_LOCAL 0
1263# define SDMA_TIMESTAMP_SUB_OPCODE_GET_LOCAL 1
1264# define SDMA_TIMESTAMP_SUB_OPCODE_GET_GLOBAL 2
1265#define SDMA_OPCODE_SRBM_WRITE 14
1266# define SDMA_SRBM_WRITE_EXTRA_BYTE_ENABLE(x) ((x) << 12)
1267 /* byte mask */
1268
1269/* UVD */
1270
1271#define UVD_UDEC_ADDR_CONFIG 0xef4c
1272#define UVD_UDEC_DB_ADDR_CONFIG 0xef50
1273#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
1274
1275#define UVD_LMI_EXT40_ADDR 0xf498
1276#define UVD_LMI_ADDR_EXT 0xf594
1277#define UVD_VCPU_CACHE_OFFSET0 0xf608
1278#define UVD_VCPU_CACHE_SIZE0 0xf60c
1279#define UVD_VCPU_CACHE_OFFSET1 0xf610
1280#define UVD_VCPU_CACHE_SIZE1 0xf614
1281#define UVD_VCPU_CACHE_OFFSET2 0xf618
1282#define UVD_VCPU_CACHE_SIZE2 0xf61c
1283
1284#define UVD_RBC_RB_RPTR 0xf690
1285#define UVD_RBC_RB_WPTR 0xf694
1286
1287/* UVD clocks */
1288
1289#define CG_DCLK_CNTL 0xC050009C
1290# define DCLK_DIVIDER_MASK 0x7f
1291# define DCLK_DIR_CNTL_EN (1 << 8)
1292#define CG_DCLK_STATUS 0xC05000A0
1293# define DCLK_STATUS (1 << 0)
1294#define CG_VCLK_CNTL 0xC05000A4
1295#define CG_VCLK_STATUS 0xC05000A8
1296
1297#endif
diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h
new file mode 100644
index 000000000000..c00339440c5e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_cayman.h
@@ -0,0 +1,1081 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24static const u32 SECT_CONTEXT_def_1[] =
25{
26 0x00000000, // DB_RENDER_CONTROL
27 0x00000000, // DB_COUNT_CONTROL
28 0x00000000, // DB_DEPTH_VIEW
29 0x00000000, // DB_RENDER_OVERRIDE
30 0x00000000, // DB_RENDER_OVERRIDE2
31 0x00000000, // DB_HTILE_DATA_BASE
32 0, // HOLE
33 0, // HOLE
34 0, // HOLE
35 0, // HOLE
36 0x00000000, // DB_STENCIL_CLEAR
37 0x00000000, // DB_DEPTH_CLEAR
38 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
39 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
40 0, // HOLE
41 0x00000000, // DB_DEPTH_INFO
42 0x00000000, // DB_Z_INFO
43 0x00000000, // DB_STENCIL_INFO
44 0x00000000, // DB_Z_READ_BASE
45 0x00000000, // DB_STENCIL_READ_BASE
46 0x00000000, // DB_Z_WRITE_BASE
47 0x00000000, // DB_STENCIL_WRITE_BASE
48 0x00000000, // DB_DEPTH_SIZE
49 0x00000000, // DB_DEPTH_SLICE
50 0, // HOLE
51 0, // HOLE
52 0, // HOLE
53 0, // HOLE
54 0, // HOLE
55 0, // HOLE
56 0, // HOLE
57 0, // HOLE
58 0, // HOLE
59 0, // HOLE
60 0, // HOLE
61 0, // HOLE
62 0, // HOLE
63 0, // HOLE
64 0, // HOLE
65 0, // HOLE
66 0, // HOLE
67 0, // HOLE
68 0, // HOLE
69 0, // HOLE
70 0, // HOLE
71 0, // HOLE
72 0, // HOLE
73 0, // HOLE
74 0, // HOLE
75 0, // HOLE
76 0, // HOLE
77 0, // HOLE
78 0, // HOLE
79 0, // HOLE
80 0, // HOLE
81 0, // HOLE
82 0, // HOLE
83 0, // HOLE
84 0, // HOLE
85 0, // HOLE
86 0, // HOLE
87 0, // HOLE
88 0, // HOLE
89 0, // HOLE
90 0, // HOLE
91 0, // HOLE
92 0, // HOLE
93 0, // HOLE
94 0, // HOLE
95 0, // HOLE
96 0, // HOLE
97 0, // HOLE
98 0, // HOLE
99 0, // HOLE
100 0, // HOLE
101 0, // HOLE
102 0, // HOLE
103 0, // HOLE
104 0, // HOLE
105 0, // HOLE
106 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_0
107 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_1
108 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_2
109 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_3
110 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_4
111 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_5
112 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_6
113 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_7
114 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_8
115 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_9
116 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_10
117 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_11
118 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_12
119 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_13
120 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_14
121 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_15
122 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_0
123 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_1
124 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_2
125 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_3
126 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_4
127 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_5
128 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_6
129 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_7
130 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_8
131 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_9
132 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_10
133 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_11
134 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_12
135 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_13
136 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_14
137 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_15
138 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_0
139 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_1
140 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_2
141 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_3
142 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_4
143 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_5
144 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_6
145 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_7
146 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_8
147 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_9
148 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_10
149 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_11
150 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_12
151 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_13
152 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_14
153 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_15
154 0x00000000, // PA_SC_WINDOW_OFFSET
155 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
156 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
157 0x0000ffff, // PA_SC_CLIPRECT_RULE
158 0x00000000, // PA_SC_CLIPRECT_0_TL
159 0x40004000, // PA_SC_CLIPRECT_0_BR
160 0x00000000, // PA_SC_CLIPRECT_1_TL
161 0x40004000, // PA_SC_CLIPRECT_1_BR
162 0x00000000, // PA_SC_CLIPRECT_2_TL
163 0x40004000, // PA_SC_CLIPRECT_2_BR
164 0x00000000, // PA_SC_CLIPRECT_3_TL
165 0x40004000, // PA_SC_CLIPRECT_3_BR
166 0xaa99aaaa, // PA_SC_EDGERULE
167 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
168 0xffffffff, // CB_TARGET_MASK
169 0xffffffff, // CB_SHADER_MASK
170 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
171 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
172 0x00000000, // COHER_DEST_BASE_0
173 0x00000000, // COHER_DEST_BASE_1
174 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
175 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
176 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
177 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
178 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
179 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
180 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
181 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
182 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
183 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
184 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
185 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
186 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
187 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
188 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
189 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
190 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
191 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
192 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
193 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
194 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
195 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
196 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
197 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
198 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
199 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
200 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
201 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
202 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
203 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
204 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
205 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
206 0x00000000, // PA_SC_VPORT_ZMIN_0
207 0x3f800000, // PA_SC_VPORT_ZMAX_0
208 0x00000000, // PA_SC_VPORT_ZMIN_1
209 0x3f800000, // PA_SC_VPORT_ZMAX_1
210 0x00000000, // PA_SC_VPORT_ZMIN_2
211 0x3f800000, // PA_SC_VPORT_ZMAX_2
212 0x00000000, // PA_SC_VPORT_ZMIN_3
213 0x3f800000, // PA_SC_VPORT_ZMAX_3
214 0x00000000, // PA_SC_VPORT_ZMIN_4
215 0x3f800000, // PA_SC_VPORT_ZMAX_4
216 0x00000000, // PA_SC_VPORT_ZMIN_5
217 0x3f800000, // PA_SC_VPORT_ZMAX_5
218 0x00000000, // PA_SC_VPORT_ZMIN_6
219 0x3f800000, // PA_SC_VPORT_ZMAX_6
220 0x00000000, // PA_SC_VPORT_ZMIN_7
221 0x3f800000, // PA_SC_VPORT_ZMAX_7
222 0x00000000, // PA_SC_VPORT_ZMIN_8
223 0x3f800000, // PA_SC_VPORT_ZMAX_8
224 0x00000000, // PA_SC_VPORT_ZMIN_9
225 0x3f800000, // PA_SC_VPORT_ZMAX_9
226 0x00000000, // PA_SC_VPORT_ZMIN_10
227 0x3f800000, // PA_SC_VPORT_ZMAX_10
228 0x00000000, // PA_SC_VPORT_ZMIN_11
229 0x3f800000, // PA_SC_VPORT_ZMAX_11
230 0x00000000, // PA_SC_VPORT_ZMIN_12
231 0x3f800000, // PA_SC_VPORT_ZMAX_12
232 0x00000000, // PA_SC_VPORT_ZMIN_13
233 0x3f800000, // PA_SC_VPORT_ZMAX_13
234 0x00000000, // PA_SC_VPORT_ZMIN_14
235 0x3f800000, // PA_SC_VPORT_ZMAX_14
236 0x00000000, // PA_SC_VPORT_ZMIN_15
237 0x3f800000, // PA_SC_VPORT_ZMAX_15
238 0x00000000, // SX_MISC
239 0x00000000, // SX_SURFACE_SYNC
240 0x00000000, // SX_SCATTER_EXPORT_BASE
241 0x00000000, // SX_SCATTER_EXPORT_SIZE
242 0x00000000, // CP_PERFMON_CNTX_CNTL
243 0x00000000, // CP_RINGID
244 0x00000000, // CP_VMID
245 0, // HOLE
246 0, // HOLE
247 0, // HOLE
248 0, // HOLE
249 0, // HOLE
250 0x00000000, // SQ_VTX_SEMANTIC_0
251 0x00000000, // SQ_VTX_SEMANTIC_1
252 0x00000000, // SQ_VTX_SEMANTIC_2
253 0x00000000, // SQ_VTX_SEMANTIC_3
254 0x00000000, // SQ_VTX_SEMANTIC_4
255 0x00000000, // SQ_VTX_SEMANTIC_5
256 0x00000000, // SQ_VTX_SEMANTIC_6
257 0x00000000, // SQ_VTX_SEMANTIC_7
258 0x00000000, // SQ_VTX_SEMANTIC_8
259 0x00000000, // SQ_VTX_SEMANTIC_9
260 0x00000000, // SQ_VTX_SEMANTIC_10
261 0x00000000, // SQ_VTX_SEMANTIC_11
262 0x00000000, // SQ_VTX_SEMANTIC_12
263 0x00000000, // SQ_VTX_SEMANTIC_13
264 0x00000000, // SQ_VTX_SEMANTIC_14
265 0x00000000, // SQ_VTX_SEMANTIC_15
266 0x00000000, // SQ_VTX_SEMANTIC_16
267 0x00000000, // SQ_VTX_SEMANTIC_17
268 0x00000000, // SQ_VTX_SEMANTIC_18
269 0x00000000, // SQ_VTX_SEMANTIC_19
270 0x00000000, // SQ_VTX_SEMANTIC_20
271 0x00000000, // SQ_VTX_SEMANTIC_21
272 0x00000000, // SQ_VTX_SEMANTIC_22
273 0x00000000, // SQ_VTX_SEMANTIC_23
274 0x00000000, // SQ_VTX_SEMANTIC_24
275 0x00000000, // SQ_VTX_SEMANTIC_25
276 0x00000000, // SQ_VTX_SEMANTIC_26
277 0x00000000, // SQ_VTX_SEMANTIC_27
278 0x00000000, // SQ_VTX_SEMANTIC_28
279 0x00000000, // SQ_VTX_SEMANTIC_29
280 0x00000000, // SQ_VTX_SEMANTIC_30
281 0x00000000, // SQ_VTX_SEMANTIC_31
282 0xffffffff, // VGT_MAX_VTX_INDX
283 0x00000000, // VGT_MIN_VTX_INDX
284 0x00000000, // VGT_INDX_OFFSET
285 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
286 0x00000000, // SX_ALPHA_TEST_CONTROL
287 0x00000000, // CB_BLEND_RED
288 0x00000000, // CB_BLEND_GREEN
289 0x00000000, // CB_BLEND_BLUE
290 0x00000000, // CB_BLEND_ALPHA
291 0, // HOLE
292 0, // HOLE
293 0, // HOLE
294 0x00000000, // DB_STENCILREFMASK
295 0x00000000, // DB_STENCILREFMASK_BF
296 0x00000000, // SX_ALPHA_REF
297 0x00000000, // PA_CL_VPORT_XSCALE
298 0x00000000, // PA_CL_VPORT_XOFFSET
299 0x00000000, // PA_CL_VPORT_YSCALE
300 0x00000000, // PA_CL_VPORT_YOFFSET
301 0x00000000, // PA_CL_VPORT_ZSCALE
302 0x00000000, // PA_CL_VPORT_ZOFFSET
303 0x00000000, // PA_CL_VPORT_XSCALE_1
304 0x00000000, // PA_CL_VPORT_XOFFSET_1
305 0x00000000, // PA_CL_VPORT_YSCALE_1
306 0x00000000, // PA_CL_VPORT_YOFFSET_1
307 0x00000000, // PA_CL_VPORT_ZSCALE_1
308 0x00000000, // PA_CL_VPORT_ZOFFSET_1
309 0x00000000, // PA_CL_VPORT_XSCALE_2
310 0x00000000, // PA_CL_VPORT_XOFFSET_2
311 0x00000000, // PA_CL_VPORT_YSCALE_2
312 0x00000000, // PA_CL_VPORT_YOFFSET_2
313 0x00000000, // PA_CL_VPORT_ZSCALE_2
314 0x00000000, // PA_CL_VPORT_ZOFFSET_2
315 0x00000000, // PA_CL_VPORT_XSCALE_3
316 0x00000000, // PA_CL_VPORT_XOFFSET_3
317 0x00000000, // PA_CL_VPORT_YSCALE_3
318 0x00000000, // PA_CL_VPORT_YOFFSET_3
319 0x00000000, // PA_CL_VPORT_ZSCALE_3
320 0x00000000, // PA_CL_VPORT_ZOFFSET_3
321 0x00000000, // PA_CL_VPORT_XSCALE_4
322 0x00000000, // PA_CL_VPORT_XOFFSET_4
323 0x00000000, // PA_CL_VPORT_YSCALE_4
324 0x00000000, // PA_CL_VPORT_YOFFSET_4
325 0x00000000, // PA_CL_VPORT_ZSCALE_4
326 0x00000000, // PA_CL_VPORT_ZOFFSET_4
327 0x00000000, // PA_CL_VPORT_XSCALE_5
328 0x00000000, // PA_CL_VPORT_XOFFSET_5
329 0x00000000, // PA_CL_VPORT_YSCALE_5
330 0x00000000, // PA_CL_VPORT_YOFFSET_5
331 0x00000000, // PA_CL_VPORT_ZSCALE_5
332 0x00000000, // PA_CL_VPORT_ZOFFSET_5
333 0x00000000, // PA_CL_VPORT_XSCALE_6
334 0x00000000, // PA_CL_VPORT_XOFFSET_6
335 0x00000000, // PA_CL_VPORT_YSCALE_6
336 0x00000000, // PA_CL_VPORT_YOFFSET_6
337 0x00000000, // PA_CL_VPORT_ZSCALE_6
338 0x00000000, // PA_CL_VPORT_ZOFFSET_6
339 0x00000000, // PA_CL_VPORT_XSCALE_7
340 0x00000000, // PA_CL_VPORT_XOFFSET_7
341 0x00000000, // PA_CL_VPORT_YSCALE_7
342 0x00000000, // PA_CL_VPORT_YOFFSET_7
343 0x00000000, // PA_CL_VPORT_ZSCALE_7
344 0x00000000, // PA_CL_VPORT_ZOFFSET_7
345 0x00000000, // PA_CL_VPORT_XSCALE_8
346 0x00000000, // PA_CL_VPORT_XOFFSET_8
347 0x00000000, // PA_CL_VPORT_YSCALE_8
348 0x00000000, // PA_CL_VPORT_YOFFSET_8
349 0x00000000, // PA_CL_VPORT_ZSCALE_8
350 0x00000000, // PA_CL_VPORT_ZOFFSET_8
351 0x00000000, // PA_CL_VPORT_XSCALE_9
352 0x00000000, // PA_CL_VPORT_XOFFSET_9
353 0x00000000, // PA_CL_VPORT_YSCALE_9
354 0x00000000, // PA_CL_VPORT_YOFFSET_9
355 0x00000000, // PA_CL_VPORT_ZSCALE_9
356 0x00000000, // PA_CL_VPORT_ZOFFSET_9
357 0x00000000, // PA_CL_VPORT_XSCALE_10
358 0x00000000, // PA_CL_VPORT_XOFFSET_10
359 0x00000000, // PA_CL_VPORT_YSCALE_10
360 0x00000000, // PA_CL_VPORT_YOFFSET_10
361 0x00000000, // PA_CL_VPORT_ZSCALE_10
362 0x00000000, // PA_CL_VPORT_ZOFFSET_10
363 0x00000000, // PA_CL_VPORT_XSCALE_11
364 0x00000000, // PA_CL_VPORT_XOFFSET_11
365 0x00000000, // PA_CL_VPORT_YSCALE_11
366 0x00000000, // PA_CL_VPORT_YOFFSET_11
367 0x00000000, // PA_CL_VPORT_ZSCALE_11
368 0x00000000, // PA_CL_VPORT_ZOFFSET_11
369 0x00000000, // PA_CL_VPORT_XSCALE_12
370 0x00000000, // PA_CL_VPORT_XOFFSET_12
371 0x00000000, // PA_CL_VPORT_YSCALE_12
372 0x00000000, // PA_CL_VPORT_YOFFSET_12
373 0x00000000, // PA_CL_VPORT_ZSCALE_12
374 0x00000000, // PA_CL_VPORT_ZOFFSET_12
375 0x00000000, // PA_CL_VPORT_XSCALE_13
376 0x00000000, // PA_CL_VPORT_XOFFSET_13
377 0x00000000, // PA_CL_VPORT_YSCALE_13
378 0x00000000, // PA_CL_VPORT_YOFFSET_13
379 0x00000000, // PA_CL_VPORT_ZSCALE_13
380 0x00000000, // PA_CL_VPORT_ZOFFSET_13
381 0x00000000, // PA_CL_VPORT_XSCALE_14
382 0x00000000, // PA_CL_VPORT_XOFFSET_14
383 0x00000000, // PA_CL_VPORT_YSCALE_14
384 0x00000000, // PA_CL_VPORT_YOFFSET_14
385 0x00000000, // PA_CL_VPORT_ZSCALE_14
386 0x00000000, // PA_CL_VPORT_ZOFFSET_14
387 0x00000000, // PA_CL_VPORT_XSCALE_15
388 0x00000000, // PA_CL_VPORT_XOFFSET_15
389 0x00000000, // PA_CL_VPORT_YSCALE_15
390 0x00000000, // PA_CL_VPORT_YOFFSET_15
391 0x00000000, // PA_CL_VPORT_ZSCALE_15
392 0x00000000, // PA_CL_VPORT_ZOFFSET_15
393 0x00000000, // PA_CL_UCP_0_X
394 0x00000000, // PA_CL_UCP_0_Y
395 0x00000000, // PA_CL_UCP_0_Z
396 0x00000000, // PA_CL_UCP_0_W
397 0x00000000, // PA_CL_UCP_1_X
398 0x00000000, // PA_CL_UCP_1_Y
399 0x00000000, // PA_CL_UCP_1_Z
400 0x00000000, // PA_CL_UCP_1_W
401 0x00000000, // PA_CL_UCP_2_X
402 0x00000000, // PA_CL_UCP_2_Y
403 0x00000000, // PA_CL_UCP_2_Z
404 0x00000000, // PA_CL_UCP_2_W
405 0x00000000, // PA_CL_UCP_3_X
406 0x00000000, // PA_CL_UCP_3_Y
407 0x00000000, // PA_CL_UCP_3_Z
408 0x00000000, // PA_CL_UCP_3_W
409 0x00000000, // PA_CL_UCP_4_X
410 0x00000000, // PA_CL_UCP_4_Y
411 0x00000000, // PA_CL_UCP_4_Z
412 0x00000000, // PA_CL_UCP_4_W
413 0x00000000, // PA_CL_UCP_5_X
414 0x00000000, // PA_CL_UCP_5_Y
415 0x00000000, // PA_CL_UCP_5_Z
416 0x00000000, // PA_CL_UCP_5_W
417 0x00000000, // SPI_VS_OUT_ID_0
418 0x00000000, // SPI_VS_OUT_ID_1
419 0x00000000, // SPI_VS_OUT_ID_2
420 0x00000000, // SPI_VS_OUT_ID_3
421 0x00000000, // SPI_VS_OUT_ID_4
422 0x00000000, // SPI_VS_OUT_ID_5
423 0x00000000, // SPI_VS_OUT_ID_6
424 0x00000000, // SPI_VS_OUT_ID_7
425 0x00000000, // SPI_VS_OUT_ID_8
426 0x00000000, // SPI_VS_OUT_ID_9
427 0x00000000, // SPI_PS_INPUT_CNTL_0
428 0x00000000, // SPI_PS_INPUT_CNTL_1
429 0x00000000, // SPI_PS_INPUT_CNTL_2
430 0x00000000, // SPI_PS_INPUT_CNTL_3
431 0x00000000, // SPI_PS_INPUT_CNTL_4
432 0x00000000, // SPI_PS_INPUT_CNTL_5
433 0x00000000, // SPI_PS_INPUT_CNTL_6
434 0x00000000, // SPI_PS_INPUT_CNTL_7
435 0x00000000, // SPI_PS_INPUT_CNTL_8
436 0x00000000, // SPI_PS_INPUT_CNTL_9
437 0x00000000, // SPI_PS_INPUT_CNTL_10
438 0x00000000, // SPI_PS_INPUT_CNTL_11
439 0x00000000, // SPI_PS_INPUT_CNTL_12
440 0x00000000, // SPI_PS_INPUT_CNTL_13
441 0x00000000, // SPI_PS_INPUT_CNTL_14
442 0x00000000, // SPI_PS_INPUT_CNTL_15
443 0x00000000, // SPI_PS_INPUT_CNTL_16
444 0x00000000, // SPI_PS_INPUT_CNTL_17
445 0x00000000, // SPI_PS_INPUT_CNTL_18
446 0x00000000, // SPI_PS_INPUT_CNTL_19
447 0x00000000, // SPI_PS_INPUT_CNTL_20
448 0x00000000, // SPI_PS_INPUT_CNTL_21
449 0x00000000, // SPI_PS_INPUT_CNTL_22
450 0x00000000, // SPI_PS_INPUT_CNTL_23
451 0x00000000, // SPI_PS_INPUT_CNTL_24
452 0x00000000, // SPI_PS_INPUT_CNTL_25
453 0x00000000, // SPI_PS_INPUT_CNTL_26
454 0x00000000, // SPI_PS_INPUT_CNTL_27
455 0x00000000, // SPI_PS_INPUT_CNTL_28
456 0x00000000, // SPI_PS_INPUT_CNTL_29
457 0x00000000, // SPI_PS_INPUT_CNTL_30
458 0x00000000, // SPI_PS_INPUT_CNTL_31
459 0x00000000, // SPI_VS_OUT_CONFIG
460 0x00000001, // SPI_THREAD_GROUPING
461 0x00000002, // SPI_PS_IN_CONTROL_0
462 0x00000000, // SPI_PS_IN_CONTROL_1
463 0x00000000, // SPI_INTERP_CONTROL_0
464 0x00000000, // SPI_INPUT_Z
465 0x00000000, // SPI_FOG_CNTL
466 0x00000000, // SPI_BARYC_CNTL
467 0x00000000, // SPI_PS_IN_CONTROL_2
468 0x00000000, // SPI_COMPUTE_INPUT_CNTL
469 0x00000000, // SPI_COMPUTE_NUM_THREAD_X
470 0x00000000, // SPI_COMPUTE_NUM_THREAD_Y
471 0x00000000, // SPI_COMPUTE_NUM_THREAD_Z
472 0x00000000, // SPI_GPR_MGMT
473 0x00000000, // SPI_LDS_MGMT
474 0x00000000, // SPI_STACK_MGMT
475 0x00000000, // SPI_WAVE_MGMT_1
476 0x00000000, // SPI_WAVE_MGMT_2
477 0, // HOLE
478 0, // HOLE
479 0, // HOLE
480 0, // HOLE
481 0, // HOLE
482 0x00000000, // GDS_ADDR_BASE
483 0x00003fff, // GDS_ADDR_SIZE
484 0, // HOLE
485 0, // HOLE
486 0x00000000, // GDS_ORDERED_COUNT
487 0, // HOLE
488 0, // HOLE
489 0, // HOLE
490 0x00000000, // GDS_APPEND_CONSUME_UAV0
491 0x00000000, // GDS_APPEND_CONSUME_UAV1
492 0x00000000, // GDS_APPEND_CONSUME_UAV2
493 0x00000000, // GDS_APPEND_CONSUME_UAV3
494 0x00000000, // GDS_APPEND_CONSUME_UAV4
495 0x00000000, // GDS_APPEND_CONSUME_UAV5
496 0x00000000, // GDS_APPEND_CONSUME_UAV6
497 0x00000000, // GDS_APPEND_CONSUME_UAV7
498 0x00000000, // GDS_APPEND_CONSUME_UAV8
499 0x00000000, // GDS_APPEND_CONSUME_UAV9
500 0x00000000, // GDS_APPEND_CONSUME_UAV10
501 0x00000000, // GDS_APPEND_CONSUME_UAV11
502 0, // HOLE
503 0, // HOLE
504 0, // HOLE
505 0, // HOLE
506 0x00000000, // CB_BLEND0_CONTROL
507 0x00000000, // CB_BLEND1_CONTROL
508 0x00000000, // CB_BLEND2_CONTROL
509 0x00000000, // CB_BLEND3_CONTROL
510 0x00000000, // CB_BLEND4_CONTROL
511 0x00000000, // CB_BLEND5_CONTROL
512 0x00000000, // CB_BLEND6_CONTROL
513 0x00000000, // CB_BLEND7_CONTROL
514};
515static const u32 SECT_CONTEXT_def_2[] =
516{
517 0x00000000, // PA_CL_POINT_X_RAD
518 0x00000000, // PA_CL_POINT_Y_RAD
519 0x00000000, // PA_CL_POINT_SIZE
520 0x00000000, // PA_CL_POINT_CULL_RAD
521 0x00000000, // VGT_DMA_BASE_HI
522 0x00000000, // VGT_DMA_BASE
523};
524static const u32 SECT_CONTEXT_def_3[] =
525{
526 0x00000000, // DB_DEPTH_CONTROL
527 0x00000000, // DB_EQAA
528 0x00000000, // CB_COLOR_CONTROL
529 0x00000200, // DB_SHADER_CONTROL
530 0x00000000, // PA_CL_CLIP_CNTL
531 0x00000000, // PA_SU_SC_MODE_CNTL
532 0x00000000, // PA_CL_VTE_CNTL
533 0x00000000, // PA_CL_VS_OUT_CNTL
534 0x00000000, // PA_CL_NANINF_CNTL
535 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
536 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
537 0x00000000, // PA_SU_PRIM_FILTER_CNTL
538 0x00000000, // SQ_LSTMP_RING_ITEMSIZE
539 0x00000000, // SQ_HSTMP_RING_ITEMSIZE
540 0, // HOLE
541 0, // HOLE
542 0x00000000, // SQ_PGM_START_PS
543 0x00000000, // SQ_PGM_RESOURCES_PS
544 0x00000000, // SQ_PGM_RESOURCES_2_PS
545 0x00000000, // SQ_PGM_EXPORTS_PS
546 0, // HOLE
547 0, // HOLE
548 0, // HOLE
549 0x00000000, // SQ_PGM_START_VS
550 0x00000000, // SQ_PGM_RESOURCES_VS
551 0x00000000, // SQ_PGM_RESOURCES_2_VS
552 0, // HOLE
553 0, // HOLE
554 0, // HOLE
555 0x00000000, // SQ_PGM_START_GS
556 0x00000000, // SQ_PGM_RESOURCES_GS
557 0x00000000, // SQ_PGM_RESOURCES_2_GS
558 0, // HOLE
559 0, // HOLE
560 0, // HOLE
561 0x00000000, // SQ_PGM_START_ES
562 0x00000000, // SQ_PGM_RESOURCES_ES
563 0x00000000, // SQ_PGM_RESOURCES_2_ES
564 0, // HOLE
565 0, // HOLE
566 0, // HOLE
567 0x00000000, // SQ_PGM_START_FS
568 0x00000000, // SQ_PGM_RESOURCES_FS
569 0, // HOLE
570 0, // HOLE
571 0, // HOLE
572 0x00000000, // SQ_PGM_START_HS
573 0x00000000, // SQ_PGM_RESOURCES_HS
574 0x00000000, // SQ_PGM_RESOURCES_2_HS
575 0, // HOLE
576 0, // HOLE
577 0, // HOLE
578 0x00000000, // SQ_PGM_START_LS
579 0x00000000, // SQ_PGM_RESOURCES_LS
580 0x00000000, // SQ_PGM_RESOURCES_2_LS
581};
582static const u32 SECT_CONTEXT_def_4[] =
583{
584 0x00000000, // SQ_LDS_ALLOC
585 0x00000000, // SQ_LDS_ALLOC_PS
586 0x00000000, // SQ_VTX_SEMANTIC_CLEAR
587 0, // HOLE
588 0x00000000, // SQ_THREAD_TRACE_CTRL
589 0, // HOLE
590 0x00000000, // SQ_ESGS_RING_ITEMSIZE
591 0x00000000, // SQ_GSVS_RING_ITEMSIZE
592 0x00000000, // SQ_ESTMP_RING_ITEMSIZE
593 0x00000000, // SQ_GSTMP_RING_ITEMSIZE
594 0x00000000, // SQ_VSTMP_RING_ITEMSIZE
595 0x00000000, // SQ_PSTMP_RING_ITEMSIZE
596 0, // HOLE
597 0x00000000, // SQ_GS_VERT_ITEMSIZE
598 0x00000000, // SQ_GS_VERT_ITEMSIZE_1
599 0x00000000, // SQ_GS_VERT_ITEMSIZE_2
600 0x00000000, // SQ_GS_VERT_ITEMSIZE_3
601 0x00000000, // SQ_GSVS_RING_OFFSET_1
602 0x00000000, // SQ_GSVS_RING_OFFSET_2
603 0x00000000, // SQ_GSVS_RING_OFFSET_3
604 0x00000000, // SQ_GWS_RING_OFFSET
605 0, // HOLE
606 0x00000000, // SQ_ALU_CONST_CACHE_PS_0
607 0x00000000, // SQ_ALU_CONST_CACHE_PS_1
608 0x00000000, // SQ_ALU_CONST_CACHE_PS_2
609 0x00000000, // SQ_ALU_CONST_CACHE_PS_3
610 0x00000000, // SQ_ALU_CONST_CACHE_PS_4
611 0x00000000, // SQ_ALU_CONST_CACHE_PS_5
612 0x00000000, // SQ_ALU_CONST_CACHE_PS_6
613 0x00000000, // SQ_ALU_CONST_CACHE_PS_7
614 0x00000000, // SQ_ALU_CONST_CACHE_PS_8
615 0x00000000, // SQ_ALU_CONST_CACHE_PS_9
616 0x00000000, // SQ_ALU_CONST_CACHE_PS_10
617 0x00000000, // SQ_ALU_CONST_CACHE_PS_11
618 0x00000000, // SQ_ALU_CONST_CACHE_PS_12
619 0x00000000, // SQ_ALU_CONST_CACHE_PS_13
620 0x00000000, // SQ_ALU_CONST_CACHE_PS_14
621 0x00000000, // SQ_ALU_CONST_CACHE_PS_15
622 0x00000000, // SQ_ALU_CONST_CACHE_VS_0
623 0x00000000, // SQ_ALU_CONST_CACHE_VS_1
624 0x00000000, // SQ_ALU_CONST_CACHE_VS_2
625 0x00000000, // SQ_ALU_CONST_CACHE_VS_3
626 0x00000000, // SQ_ALU_CONST_CACHE_VS_4
627 0x00000000, // SQ_ALU_CONST_CACHE_VS_5
628 0x00000000, // SQ_ALU_CONST_CACHE_VS_6
629 0x00000000, // SQ_ALU_CONST_CACHE_VS_7
630 0x00000000, // SQ_ALU_CONST_CACHE_VS_8
631 0x00000000, // SQ_ALU_CONST_CACHE_VS_9
632 0x00000000, // SQ_ALU_CONST_CACHE_VS_10
633 0x00000000, // SQ_ALU_CONST_CACHE_VS_11
634 0x00000000, // SQ_ALU_CONST_CACHE_VS_12
635 0x00000000, // SQ_ALU_CONST_CACHE_VS_13
636 0x00000000, // SQ_ALU_CONST_CACHE_VS_14
637 0x00000000, // SQ_ALU_CONST_CACHE_VS_15
638 0x00000000, // SQ_ALU_CONST_CACHE_GS_0
639 0x00000000, // SQ_ALU_CONST_CACHE_GS_1
640 0x00000000, // SQ_ALU_CONST_CACHE_GS_2
641 0x00000000, // SQ_ALU_CONST_CACHE_GS_3
642 0x00000000, // SQ_ALU_CONST_CACHE_GS_4
643 0x00000000, // SQ_ALU_CONST_CACHE_GS_5
644 0x00000000, // SQ_ALU_CONST_CACHE_GS_6
645 0x00000000, // SQ_ALU_CONST_CACHE_GS_7
646 0x00000000, // SQ_ALU_CONST_CACHE_GS_8
647 0x00000000, // SQ_ALU_CONST_CACHE_GS_9
648 0x00000000, // SQ_ALU_CONST_CACHE_GS_10
649 0x00000000, // SQ_ALU_CONST_CACHE_GS_11
650 0x00000000, // SQ_ALU_CONST_CACHE_GS_12
651 0x00000000, // SQ_ALU_CONST_CACHE_GS_13
652 0x00000000, // SQ_ALU_CONST_CACHE_GS_14
653 0x00000000, // SQ_ALU_CONST_CACHE_GS_15
654 0x00000000, // PA_SU_POINT_SIZE
655 0x00000000, // PA_SU_POINT_MINMAX
656 0x00000000, // PA_SU_LINE_CNTL
657 0x00000000, // PA_SC_LINE_STIPPLE
658 0x00000000, // VGT_OUTPUT_PATH_CNTL
659 0x00000000, // VGT_HOS_CNTL
660 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
661 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
662 0x00000000, // VGT_HOS_REUSE_DEPTH
663 0x00000000, // VGT_GROUP_PRIM_TYPE
664 0x00000000, // VGT_GROUP_FIRST_DECR
665 0x00000000, // VGT_GROUP_DECR
666 0x00000000, // VGT_GROUP_VECT_0_CNTL
667 0x00000000, // VGT_GROUP_VECT_1_CNTL
668 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
669 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
670 0x00000000, // VGT_GS_MODE
671 0, // HOLE
672 0x00000000, // PA_SC_MODE_CNTL_0
673 0x00000000, // PA_SC_MODE_CNTL_1
674 0x00000000, // VGT_ENHANCE
675 0x00000100, // VGT_GS_PER_ES
676 0x00000080, // VGT_ES_PER_GS
677 0x00000002, // VGT_GS_PER_VS
678 0, // HOLE
679 0, // HOLE
680 0, // HOLE
681 0x00000000, // VGT_GS_OUT_PRIM_TYPE
682 0x00000000, // IA_ENHANCE
683};
684static const u32 SECT_CONTEXT_def_5[] =
685{
686 0x00000000, // VGT_DMA_MAX_SIZE
687 0x00000000, // VGT_DMA_INDEX_TYPE
688 0, // HOLE
689 0x00000000, // VGT_PRIMITIVEID_EN
690 0x00000000, // VGT_DMA_NUM_INSTANCES
691};
692static const u32 SECT_CONTEXT_def_6[] =
693{
694 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
695 0, // HOLE
696 0, // HOLE
697 0x00000000, // VGT_INSTANCE_STEP_RATE_0
698 0x00000000, // VGT_INSTANCE_STEP_RATE_1
699 0x000000ff, // IA_MULTI_VGT_PARAM
700 0, // HOLE
701 0, // HOLE
702 0x00000000, // VGT_REUSE_OFF
703 0x00000000, // VGT_VTX_CNT_EN
704 0x00000000, // DB_HTILE_SURFACE
705 0x00000000, // DB_SRESULTS_COMPARE_STATE0
706 0x00000000, // DB_SRESULTS_COMPARE_STATE1
707 0x00000000, // DB_PRELOAD_CONTROL
708 0, // HOLE
709 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
710 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
711 0x00000000, // VGT_STRMOUT_BUFFER_BASE_0
712 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
713 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
714 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
715 0x00000000, // VGT_STRMOUT_BUFFER_BASE_1
716 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
717 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
718 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
719 0x00000000, // VGT_STRMOUT_BUFFER_BASE_2
720 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
721 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
722 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
723 0x00000000, // VGT_STRMOUT_BUFFER_BASE_3
724 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
725 0x00000000, // VGT_STRMOUT_BASE_OFFSET_0
726 0x00000000, // VGT_STRMOUT_BASE_OFFSET_1
727 0x00000000, // VGT_STRMOUT_BASE_OFFSET_2
728 0x00000000, // VGT_STRMOUT_BASE_OFFSET_3
729 0, // HOLE
730 0, // HOLE
731 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
732 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
733 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
734 0, // HOLE
735 0x00000000, // VGT_GS_MAX_VERT_OUT
736 0, // HOLE
737 0, // HOLE
738 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_0
739 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_1
740 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_2
741 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_3
742 0x00000000, // VGT_SHADER_STAGES_EN
743 0x00000000, // VGT_LS_HS_CONFIG
744 0, // HOLE
745 0, // HOLE
746 0, // HOLE
747 0, // HOLE
748 0x00000000, // VGT_TF_PARAM
749 0x00000000, // DB_ALPHA_TO_MASK
750};
751static const u32 SECT_CONTEXT_def_7[] =
752{
753 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
754 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
755 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
756 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
757 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
758 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
759 0x00000000, // VGT_GS_INSTANCE_CNT
760 0x00000000, // VGT_STRMOUT_CONFIG
761 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
762 0x00000000, // CB_IMMED0_BASE
763 0x00000000, // CB_IMMED1_BASE
764 0x00000000, // CB_IMMED2_BASE
765 0x00000000, // CB_IMMED3_BASE
766 0x00000000, // CB_IMMED4_BASE
767 0x00000000, // CB_IMMED5_BASE
768 0x00000000, // CB_IMMED6_BASE
769 0x00000000, // CB_IMMED7_BASE
770 0x00000000, // CB_IMMED8_BASE
771 0x00000000, // CB_IMMED9_BASE
772 0x00000000, // CB_IMMED10_BASE
773 0x00000000, // CB_IMMED11_BASE
774 0, // HOLE
775 0, // HOLE
776 0x00000000, // PA_SC_CENTROID_PRIORITY_0
777 0x00000000, // PA_SC_CENTROID_PRIORITY_1
778 0x00001000, // PA_SC_LINE_CNTL
779 0x00000000, // PA_SC_AA_CONFIG
780 0x00000005, // PA_SU_VTX_CNTL
781 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
782 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
783 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
784 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
785 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
786 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
787 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
788 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
789 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
790 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
791 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
792 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
793 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
794 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
795 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
796 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
797 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
798 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
799 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
800 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
801 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
802 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
803 0x00000000, // CB_CLRCMP_CONTROL
804 0x00000000, // CB_CLRCMP_SRC
805 0x00000000, // CB_CLRCMP_DST
806 0x00000000, // CB_CLRCMP_MSK
807 0, // HOLE
808 0, // HOLE
809 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
810 0x00000010, // VGT_OUT_DEALLOC_CNTL
811 0x00000000, // CB_COLOR0_BASE
812 0x00000000, // CB_COLOR0_PITCH
813 0x00000000, // CB_COLOR0_SLICE
814 0x00000000, // CB_COLOR0_VIEW
815 0x00000000, // CB_COLOR0_INFO
816 0x00000000, // CB_COLOR0_ATTRIB
817 0x00000000, // CB_COLOR0_DIM
818 0x00000000, // CB_COLOR0_CMASK
819 0x00000000, // CB_COLOR0_CMASK_SLICE
820 0x00000000, // CB_COLOR0_FMASK
821 0x00000000, // CB_COLOR0_FMASK_SLICE
822 0x00000000, // CB_COLOR0_CLEAR_WORD0
823 0x00000000, // CB_COLOR0_CLEAR_WORD1
824 0x00000000, // CB_COLOR0_CLEAR_WORD2
825 0x00000000, // CB_COLOR0_CLEAR_WORD3
826 0x00000000, // CB_COLOR1_BASE
827 0x00000000, // CB_COLOR1_PITCH
828 0x00000000, // CB_COLOR1_SLICE
829 0x00000000, // CB_COLOR1_VIEW
830 0x00000000, // CB_COLOR1_INFO
831 0x00000000, // CB_COLOR1_ATTRIB
832 0x00000000, // CB_COLOR1_DIM
833 0x00000000, // CB_COLOR1_CMASK
834 0x00000000, // CB_COLOR1_CMASK_SLICE
835 0x00000000, // CB_COLOR1_FMASK
836 0x00000000, // CB_COLOR1_FMASK_SLICE
837 0x00000000, // CB_COLOR1_CLEAR_WORD0
838 0x00000000, // CB_COLOR1_CLEAR_WORD1
839 0x00000000, // CB_COLOR1_CLEAR_WORD2
840 0x00000000, // CB_COLOR1_CLEAR_WORD3
841 0x00000000, // CB_COLOR2_BASE
842 0x00000000, // CB_COLOR2_PITCH
843 0x00000000, // CB_COLOR2_SLICE
844 0x00000000, // CB_COLOR2_VIEW
845 0x00000000, // CB_COLOR2_INFO
846 0x00000000, // CB_COLOR2_ATTRIB
847 0x00000000, // CB_COLOR2_DIM
848 0x00000000, // CB_COLOR2_CMASK
849 0x00000000, // CB_COLOR2_CMASK_SLICE
850 0x00000000, // CB_COLOR2_FMASK
851 0x00000000, // CB_COLOR2_FMASK_SLICE
852 0x00000000, // CB_COLOR2_CLEAR_WORD0
853 0x00000000, // CB_COLOR2_CLEAR_WORD1
854 0x00000000, // CB_COLOR2_CLEAR_WORD2
855 0x00000000, // CB_COLOR2_CLEAR_WORD3
856 0x00000000, // CB_COLOR3_BASE
857 0x00000000, // CB_COLOR3_PITCH
858 0x00000000, // CB_COLOR3_SLICE
859 0x00000000, // CB_COLOR3_VIEW
860 0x00000000, // CB_COLOR3_INFO
861 0x00000000, // CB_COLOR3_ATTRIB
862 0x00000000, // CB_COLOR3_DIM
863 0x00000000, // CB_COLOR3_CMASK
864 0x00000000, // CB_COLOR3_CMASK_SLICE
865 0x00000000, // CB_COLOR3_FMASK
866 0x00000000, // CB_COLOR3_FMASK_SLICE
867 0x00000000, // CB_COLOR3_CLEAR_WORD0
868 0x00000000, // CB_COLOR3_CLEAR_WORD1
869 0x00000000, // CB_COLOR3_CLEAR_WORD2
870 0x00000000, // CB_COLOR3_CLEAR_WORD3
871 0x00000000, // CB_COLOR4_BASE
872 0x00000000, // CB_COLOR4_PITCH
873 0x00000000, // CB_COLOR4_SLICE
874 0x00000000, // CB_COLOR4_VIEW
875 0x00000000, // CB_COLOR4_INFO
876 0x00000000, // CB_COLOR4_ATTRIB
877 0x00000000, // CB_COLOR4_DIM
878 0x00000000, // CB_COLOR4_CMASK
879 0x00000000, // CB_COLOR4_CMASK_SLICE
880 0x00000000, // CB_COLOR4_FMASK
881 0x00000000, // CB_COLOR4_FMASK_SLICE
882 0x00000000, // CB_COLOR4_CLEAR_WORD0
883 0x00000000, // CB_COLOR4_CLEAR_WORD1
884 0x00000000, // CB_COLOR4_CLEAR_WORD2
885 0x00000000, // CB_COLOR4_CLEAR_WORD3
886 0x00000000, // CB_COLOR5_BASE
887 0x00000000, // CB_COLOR5_PITCH
888 0x00000000, // CB_COLOR5_SLICE
889 0x00000000, // CB_COLOR5_VIEW
890 0x00000000, // CB_COLOR5_INFO
891 0x00000000, // CB_COLOR5_ATTRIB
892 0x00000000, // CB_COLOR5_DIM
893 0x00000000, // CB_COLOR5_CMASK
894 0x00000000, // CB_COLOR5_CMASK_SLICE
895 0x00000000, // CB_COLOR5_FMASK
896 0x00000000, // CB_COLOR5_FMASK_SLICE
897 0x00000000, // CB_COLOR5_CLEAR_WORD0
898 0x00000000, // CB_COLOR5_CLEAR_WORD1
899 0x00000000, // CB_COLOR5_CLEAR_WORD2
900 0x00000000, // CB_COLOR5_CLEAR_WORD3
901 0x00000000, // CB_COLOR6_BASE
902 0x00000000, // CB_COLOR6_PITCH
903 0x00000000, // CB_COLOR6_SLICE
904 0x00000000, // CB_COLOR6_VIEW
905 0x00000000, // CB_COLOR6_INFO
906 0x00000000, // CB_COLOR6_ATTRIB
907 0x00000000, // CB_COLOR6_DIM
908 0x00000000, // CB_COLOR6_CMASK
909 0x00000000, // CB_COLOR6_CMASK_SLICE
910 0x00000000, // CB_COLOR6_FMASK
911 0x00000000, // CB_COLOR6_FMASK_SLICE
912 0x00000000, // CB_COLOR6_CLEAR_WORD0
913 0x00000000, // CB_COLOR6_CLEAR_WORD1
914 0x00000000, // CB_COLOR6_CLEAR_WORD2
915 0x00000000, // CB_COLOR6_CLEAR_WORD3
916 0x00000000, // CB_COLOR7_BASE
917 0x00000000, // CB_COLOR7_PITCH
918 0x00000000, // CB_COLOR7_SLICE
919 0x00000000, // CB_COLOR7_VIEW
920 0x00000000, // CB_COLOR7_INFO
921 0x00000000, // CB_COLOR7_ATTRIB
922 0x00000000, // CB_COLOR7_DIM
923 0x00000000, // CB_COLOR7_CMASK
924 0x00000000, // CB_COLOR7_CMASK_SLICE
925 0x00000000, // CB_COLOR7_FMASK
926 0x00000000, // CB_COLOR7_FMASK_SLICE
927 0x00000000, // CB_COLOR7_CLEAR_WORD0
928 0x00000000, // CB_COLOR7_CLEAR_WORD1
929 0x00000000, // CB_COLOR7_CLEAR_WORD2
930 0x00000000, // CB_COLOR7_CLEAR_WORD3
931 0x00000000, // CB_COLOR8_BASE
932 0x00000000, // CB_COLOR8_PITCH
933 0x00000000, // CB_COLOR8_SLICE
934 0x00000000, // CB_COLOR8_VIEW
935 0x00000000, // CB_COLOR8_INFO
936 0x00000000, // CB_COLOR8_ATTRIB
937 0x00000000, // CB_COLOR8_DIM
938 0x00000000, // CB_COLOR9_BASE
939 0x00000000, // CB_COLOR9_PITCH
940 0x00000000, // CB_COLOR9_SLICE
941 0x00000000, // CB_COLOR9_VIEW
942 0x00000000, // CB_COLOR9_INFO
943 0x00000000, // CB_COLOR9_ATTRIB
944 0x00000000, // CB_COLOR9_DIM
945 0x00000000, // CB_COLOR10_BASE
946 0x00000000, // CB_COLOR10_PITCH
947 0x00000000, // CB_COLOR10_SLICE
948 0x00000000, // CB_COLOR10_VIEW
949 0x00000000, // CB_COLOR10_INFO
950 0x00000000, // CB_COLOR10_ATTRIB
951 0x00000000, // CB_COLOR10_DIM
952 0x00000000, // CB_COLOR11_BASE
953 0x00000000, // CB_COLOR11_PITCH
954 0x00000000, // CB_COLOR11_SLICE
955 0x00000000, // CB_COLOR11_VIEW
956 0x00000000, // CB_COLOR11_INFO
957 0x00000000, // CB_COLOR11_ATTRIB
958 0x00000000, // CB_COLOR11_DIM
959 0, // HOLE
960 0, // HOLE
961 0, // HOLE
962 0, // HOLE
963 0, // HOLE
964 0, // HOLE
965 0, // HOLE
966 0, // HOLE
967 0, // HOLE
968 0, // HOLE
969 0, // HOLE
970 0, // HOLE
971 0, // HOLE
972 0, // HOLE
973 0, // HOLE
974 0, // HOLE
975 0, // HOLE
976 0, // HOLE
977 0, // HOLE
978 0, // HOLE
979 0x00000000, // SQ_ALU_CONST_CACHE_HS_0
980 0x00000000, // SQ_ALU_CONST_CACHE_HS_1
981 0x00000000, // SQ_ALU_CONST_CACHE_HS_2
982 0x00000000, // SQ_ALU_CONST_CACHE_HS_3
983 0x00000000, // SQ_ALU_CONST_CACHE_HS_4
984 0x00000000, // SQ_ALU_CONST_CACHE_HS_5
985 0x00000000, // SQ_ALU_CONST_CACHE_HS_6
986 0x00000000, // SQ_ALU_CONST_CACHE_HS_7
987 0x00000000, // SQ_ALU_CONST_CACHE_HS_8
988 0x00000000, // SQ_ALU_CONST_CACHE_HS_9
989 0x00000000, // SQ_ALU_CONST_CACHE_HS_10
990 0x00000000, // SQ_ALU_CONST_CACHE_HS_11
991 0x00000000, // SQ_ALU_CONST_CACHE_HS_12
992 0x00000000, // SQ_ALU_CONST_CACHE_HS_13
993 0x00000000, // SQ_ALU_CONST_CACHE_HS_14
994 0x00000000, // SQ_ALU_CONST_CACHE_HS_15
995 0x00000000, // SQ_ALU_CONST_CACHE_LS_0
996 0x00000000, // SQ_ALU_CONST_CACHE_LS_1
997 0x00000000, // SQ_ALU_CONST_CACHE_LS_2
998 0x00000000, // SQ_ALU_CONST_CACHE_LS_3
999 0x00000000, // SQ_ALU_CONST_CACHE_LS_4
1000 0x00000000, // SQ_ALU_CONST_CACHE_LS_5
1001 0x00000000, // SQ_ALU_CONST_CACHE_LS_6
1002 0x00000000, // SQ_ALU_CONST_CACHE_LS_7
1003 0x00000000, // SQ_ALU_CONST_CACHE_LS_8
1004 0x00000000, // SQ_ALU_CONST_CACHE_LS_9
1005 0x00000000, // SQ_ALU_CONST_CACHE_LS_10
1006 0x00000000, // SQ_ALU_CONST_CACHE_LS_11
1007 0x00000000, // SQ_ALU_CONST_CACHE_LS_12
1008 0x00000000, // SQ_ALU_CONST_CACHE_LS_13
1009 0x00000000, // SQ_ALU_CONST_CACHE_LS_14
1010 0x00000000, // SQ_ALU_CONST_CACHE_LS_15
1011 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_0
1012 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_1
1013 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_2
1014 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_3
1015 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_4
1016 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_5
1017 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_6
1018 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_7
1019 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_8
1020 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_9
1021 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_10
1022 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_11
1023 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_12
1024 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_13
1025 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_14
1026 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_15
1027 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_0
1028 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_1
1029 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_2
1030 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_3
1031 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_4
1032 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_5
1033 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_6
1034 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_7
1035 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_8
1036 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_9
1037 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_10
1038 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_11
1039 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_12
1040 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_13
1041 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_14
1042 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_15
1043};
1044static const struct cs_extent_def SECT_CONTEXT_defs[] =
1045{
1046 {SECT_CONTEXT_def_1, 0x0000a000, 488 },
1047 {SECT_CONTEXT_def_2, 0x0000a1f5, 6 },
1048 {SECT_CONTEXT_def_3, 0x0000a200, 55 },
1049 {SECT_CONTEXT_def_4, 0x0000a23a, 99 },
1050 {SECT_CONTEXT_def_5, 0x0000a29e, 5 },
1051 {SECT_CONTEXT_def_6, 0x0000a2a5, 56 },
1052 {SECT_CONTEXT_def_7, 0x0000a2de, 290 },
1053 { 0, 0, 0 }
1054};
1055static const u32 SECT_CLEAR_def_1[] =
1056{
1057 0xffffffff, // SQ_TEX_SAMPLER_CLEAR
1058 0xffffffff, // SQ_TEX_RESOURCE_CLEAR
1059 0xffffffff, // SQ_LOOP_BOOL_CLEAR
1060};
1061static const struct cs_extent_def SECT_CLEAR_defs[] =
1062{
1063 {SECT_CLEAR_def_1, 0x0000ffc0, 3 },
1064 { 0, 0, 0 }
1065};
1066static const u32 SECT_CTRLCONST_def_1[] =
1067{
1068 0x00000000, // SQ_VTX_BASE_VTX_LOC
1069 0x00000000, // SQ_VTX_START_INST_LOC
1070};
1071static const struct cs_extent_def SECT_CTRLCONST_defs[] =
1072{
1073 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
1074 { 0, 0, 0 }
1075};
1076struct cs_section_def cayman_cs_data[] = {
1077 { SECT_CONTEXT_defs, SECT_CONTEXT },
1078 { SECT_CLEAR_defs, SECT_CLEAR },
1079 { SECT_CTRLCONST_defs, SECT_CTRLCONST },
1080 { 0, SECT_NONE }
1081};
diff --git a/drivers/gpu/drm/radeon/clearstate_defs.h b/drivers/gpu/drm/radeon/clearstate_defs.h
new file mode 100644
index 000000000000..3eda707d7388
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_defs.h
@@ -0,0 +1,44 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef CLEARSTATE_DEFS_H
24#define CLEARSTATE_DEFS_H
25
26enum section_id {
27 SECT_NONE,
28 SECT_CONTEXT,
29 SECT_CLEAR,
30 SECT_CTRLCONST
31};
32
33struct cs_extent_def {
34 const unsigned int *extent;
35 const unsigned int reg_index;
36 const unsigned int reg_count;
37};
38
39struct cs_section_def {
40 const struct cs_extent_def *section;
41 const enum section_id id;
42};
43
44#endif
diff --git a/drivers/gpu/drm/radeon/clearstate_evergreen.h b/drivers/gpu/drm/radeon/clearstate_evergreen.h
new file mode 100644
index 000000000000..4791d856b7fd
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_evergreen.h
@@ -0,0 +1,1080 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24static const u32 SECT_CONTEXT_def_1[] =
25{
26 0x00000000, // DB_RENDER_CONTROL
27 0x00000000, // DB_COUNT_CONTROL
28 0x00000000, // DB_DEPTH_VIEW
29 0x00000000, // DB_RENDER_OVERRIDE
30 0x00000000, // DB_RENDER_OVERRIDE2
31 0x00000000, // DB_HTILE_DATA_BASE
32 0, // HOLE
33 0, // HOLE
34 0, // HOLE
35 0, // HOLE
36 0x00000000, // DB_STENCIL_CLEAR
37 0x00000000, // DB_DEPTH_CLEAR
38 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
39 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
40 0, // HOLE
41 0, // HOLE
42 0x00000000, // DB_Z_INFO
43 0x00000000, // DB_STENCIL_INFO
44 0x00000000, // DB_Z_READ_BASE
45 0x00000000, // DB_STENCIL_READ_BASE
46 0x00000000, // DB_Z_WRITE_BASE
47 0x00000000, // DB_STENCIL_WRITE_BASE
48 0x00000000, // DB_DEPTH_SIZE
49 0x00000000, // DB_DEPTH_SLICE
50 0, // HOLE
51 0, // HOLE
52 0, // HOLE
53 0, // HOLE
54 0, // HOLE
55 0, // HOLE
56 0, // HOLE
57 0, // HOLE
58 0, // HOLE
59 0, // HOLE
60 0, // HOLE
61 0, // HOLE
62 0, // HOLE
63 0, // HOLE
64 0, // HOLE
65 0, // HOLE
66 0, // HOLE
67 0, // HOLE
68 0, // HOLE
69 0, // HOLE
70 0, // HOLE
71 0, // HOLE
72 0, // HOLE
73 0, // HOLE
74 0, // HOLE
75 0, // HOLE
76 0, // HOLE
77 0, // HOLE
78 0, // HOLE
79 0, // HOLE
80 0, // HOLE
81 0, // HOLE
82 0, // HOLE
83 0, // HOLE
84 0, // HOLE
85 0, // HOLE
86 0, // HOLE
87 0, // HOLE
88 0, // HOLE
89 0, // HOLE
90 0, // HOLE
91 0, // HOLE
92 0, // HOLE
93 0, // HOLE
94 0, // HOLE
95 0, // HOLE
96 0, // HOLE
97 0, // HOLE
98 0, // HOLE
99 0, // HOLE
100 0, // HOLE
101 0, // HOLE
102 0, // HOLE
103 0, // HOLE
104 0, // HOLE
105 0, // HOLE
106 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_0
107 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_1
108 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_2
109 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_3
110 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_4
111 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_5
112 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_6
113 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_7
114 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_8
115 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_9
116 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_10
117 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_11
118 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_12
119 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_13
120 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_14
121 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_PS_15
122 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_0
123 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_1
124 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_2
125 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_3
126 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_4
127 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_5
128 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_6
129 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_7
130 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_8
131 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_9
132 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_10
133 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_11
134 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_12
135 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_13
136 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_14
137 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_VS_15
138 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_0
139 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_1
140 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_2
141 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_3
142 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_4
143 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_5
144 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_6
145 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_7
146 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_8
147 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_9
148 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_10
149 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_11
150 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_12
151 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_13
152 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_14
153 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_GS_15
154 0x00000000, // PA_SC_WINDOW_OFFSET
155 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
156 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
157 0x0000ffff, // PA_SC_CLIPRECT_RULE
158 0x00000000, // PA_SC_CLIPRECT_0_TL
159 0x40004000, // PA_SC_CLIPRECT_0_BR
160 0x00000000, // PA_SC_CLIPRECT_1_TL
161 0x40004000, // PA_SC_CLIPRECT_1_BR
162 0x00000000, // PA_SC_CLIPRECT_2_TL
163 0x40004000, // PA_SC_CLIPRECT_2_BR
164 0x00000000, // PA_SC_CLIPRECT_3_TL
165 0x40004000, // PA_SC_CLIPRECT_3_BR
166 0xaa99aaaa, // PA_SC_EDGERULE
167 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
168 0xffffffff, // CB_TARGET_MASK
169 0xffffffff, // CB_SHADER_MASK
170 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
171 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
172 0x00000000, // COHER_DEST_BASE_0
173 0x00000000, // COHER_DEST_BASE_1
174 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
175 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
176 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
177 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
178 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
179 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
180 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
181 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
182 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
183 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
184 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
185 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
186 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
187 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
188 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
189 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
190 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
191 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
192 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
193 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
194 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
195 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
196 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
197 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
198 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
199 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
200 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
201 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
202 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
203 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
204 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
205 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
206 0x00000000, // PA_SC_VPORT_ZMIN_0
207 0x3f800000, // PA_SC_VPORT_ZMAX_0
208 0x00000000, // PA_SC_VPORT_ZMIN_1
209 0x3f800000, // PA_SC_VPORT_ZMAX_1
210 0x00000000, // PA_SC_VPORT_ZMIN_2
211 0x3f800000, // PA_SC_VPORT_ZMAX_2
212 0x00000000, // PA_SC_VPORT_ZMIN_3
213 0x3f800000, // PA_SC_VPORT_ZMAX_3
214 0x00000000, // PA_SC_VPORT_ZMIN_4
215 0x3f800000, // PA_SC_VPORT_ZMAX_4
216 0x00000000, // PA_SC_VPORT_ZMIN_5
217 0x3f800000, // PA_SC_VPORT_ZMAX_5
218 0x00000000, // PA_SC_VPORT_ZMIN_6
219 0x3f800000, // PA_SC_VPORT_ZMAX_6
220 0x00000000, // PA_SC_VPORT_ZMIN_7
221 0x3f800000, // PA_SC_VPORT_ZMAX_7
222 0x00000000, // PA_SC_VPORT_ZMIN_8
223 0x3f800000, // PA_SC_VPORT_ZMAX_8
224 0x00000000, // PA_SC_VPORT_ZMIN_9
225 0x3f800000, // PA_SC_VPORT_ZMAX_9
226 0x00000000, // PA_SC_VPORT_ZMIN_10
227 0x3f800000, // PA_SC_VPORT_ZMAX_10
228 0x00000000, // PA_SC_VPORT_ZMIN_11
229 0x3f800000, // PA_SC_VPORT_ZMAX_11
230 0x00000000, // PA_SC_VPORT_ZMIN_12
231 0x3f800000, // PA_SC_VPORT_ZMAX_12
232 0x00000000, // PA_SC_VPORT_ZMIN_13
233 0x3f800000, // PA_SC_VPORT_ZMAX_13
234 0x00000000, // PA_SC_VPORT_ZMIN_14
235 0x3f800000, // PA_SC_VPORT_ZMAX_14
236 0x00000000, // PA_SC_VPORT_ZMIN_15
237 0x3f800000, // PA_SC_VPORT_ZMAX_15
238 0x00000000, // SX_MISC
239 0x00000000, // SX_SURFACE_SYNC
240 0x00000000, // CP_PERFMON_CNTX_CNTL
241 0, // HOLE
242 0, // HOLE
243 0, // HOLE
244 0, // HOLE
245 0, // HOLE
246 0, // HOLE
247 0, // HOLE
248 0, // HOLE
249 0, // HOLE
250 0x00000000, // SQ_VTX_SEMANTIC_0
251 0x00000000, // SQ_VTX_SEMANTIC_1
252 0x00000000, // SQ_VTX_SEMANTIC_2
253 0x00000000, // SQ_VTX_SEMANTIC_3
254 0x00000000, // SQ_VTX_SEMANTIC_4
255 0x00000000, // SQ_VTX_SEMANTIC_5
256 0x00000000, // SQ_VTX_SEMANTIC_6
257 0x00000000, // SQ_VTX_SEMANTIC_7
258 0x00000000, // SQ_VTX_SEMANTIC_8
259 0x00000000, // SQ_VTX_SEMANTIC_9
260 0x00000000, // SQ_VTX_SEMANTIC_10
261 0x00000000, // SQ_VTX_SEMANTIC_11
262 0x00000000, // SQ_VTX_SEMANTIC_12
263 0x00000000, // SQ_VTX_SEMANTIC_13
264 0x00000000, // SQ_VTX_SEMANTIC_14
265 0x00000000, // SQ_VTX_SEMANTIC_15
266 0x00000000, // SQ_VTX_SEMANTIC_16
267 0x00000000, // SQ_VTX_SEMANTIC_17
268 0x00000000, // SQ_VTX_SEMANTIC_18
269 0x00000000, // SQ_VTX_SEMANTIC_19
270 0x00000000, // SQ_VTX_SEMANTIC_20
271 0x00000000, // SQ_VTX_SEMANTIC_21
272 0x00000000, // SQ_VTX_SEMANTIC_22
273 0x00000000, // SQ_VTX_SEMANTIC_23
274 0x00000000, // SQ_VTX_SEMANTIC_24
275 0x00000000, // SQ_VTX_SEMANTIC_25
276 0x00000000, // SQ_VTX_SEMANTIC_26
277 0x00000000, // SQ_VTX_SEMANTIC_27
278 0x00000000, // SQ_VTX_SEMANTIC_28
279 0x00000000, // SQ_VTX_SEMANTIC_29
280 0x00000000, // SQ_VTX_SEMANTIC_30
281 0x00000000, // SQ_VTX_SEMANTIC_31
282 0xffffffff, // VGT_MAX_VTX_INDX
283 0x00000000, // VGT_MIN_VTX_INDX
284 0x00000000, // VGT_INDX_OFFSET
285 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
286 0x00000000, // SX_ALPHA_TEST_CONTROL
287 0x00000000, // CB_BLEND_RED
288 0x00000000, // CB_BLEND_GREEN
289 0x00000000, // CB_BLEND_BLUE
290 0x00000000, // CB_BLEND_ALPHA
291 0, // HOLE
292 0, // HOLE
293 0, // HOLE
294 0x00000000, // DB_STENCILREFMASK
295 0x00000000, // DB_STENCILREFMASK_BF
296 0x00000000, // SX_ALPHA_REF
297 0x00000000, // PA_CL_VPORT_XSCALE
298 0x00000000, // PA_CL_VPORT_XOFFSET
299 0x00000000, // PA_CL_VPORT_YSCALE
300 0x00000000, // PA_CL_VPORT_YOFFSET
301 0x00000000, // PA_CL_VPORT_ZSCALE
302 0x00000000, // PA_CL_VPORT_ZOFFSET
303 0x00000000, // PA_CL_VPORT_XSCALE_1
304 0x00000000, // PA_CL_VPORT_XOFFSET_1
305 0x00000000, // PA_CL_VPORT_YSCALE_1
306 0x00000000, // PA_CL_VPORT_YOFFSET_1
307 0x00000000, // PA_CL_VPORT_ZSCALE_1
308 0x00000000, // PA_CL_VPORT_ZOFFSET_1
309 0x00000000, // PA_CL_VPORT_XSCALE_2
310 0x00000000, // PA_CL_VPORT_XOFFSET_2
311 0x00000000, // PA_CL_VPORT_YSCALE_2
312 0x00000000, // PA_CL_VPORT_YOFFSET_2
313 0x00000000, // PA_CL_VPORT_ZSCALE_2
314 0x00000000, // PA_CL_VPORT_ZOFFSET_2
315 0x00000000, // PA_CL_VPORT_XSCALE_3
316 0x00000000, // PA_CL_VPORT_XOFFSET_3
317 0x00000000, // PA_CL_VPORT_YSCALE_3
318 0x00000000, // PA_CL_VPORT_YOFFSET_3
319 0x00000000, // PA_CL_VPORT_ZSCALE_3
320 0x00000000, // PA_CL_VPORT_ZOFFSET_3
321 0x00000000, // PA_CL_VPORT_XSCALE_4
322 0x00000000, // PA_CL_VPORT_XOFFSET_4
323 0x00000000, // PA_CL_VPORT_YSCALE_4
324 0x00000000, // PA_CL_VPORT_YOFFSET_4
325 0x00000000, // PA_CL_VPORT_ZSCALE_4
326 0x00000000, // PA_CL_VPORT_ZOFFSET_4
327 0x00000000, // PA_CL_VPORT_XSCALE_5
328 0x00000000, // PA_CL_VPORT_XOFFSET_5
329 0x00000000, // PA_CL_VPORT_YSCALE_5
330 0x00000000, // PA_CL_VPORT_YOFFSET_5
331 0x00000000, // PA_CL_VPORT_ZSCALE_5
332 0x00000000, // PA_CL_VPORT_ZOFFSET_5
333 0x00000000, // PA_CL_VPORT_XSCALE_6
334 0x00000000, // PA_CL_VPORT_XOFFSET_6
335 0x00000000, // PA_CL_VPORT_YSCALE_6
336 0x00000000, // PA_CL_VPORT_YOFFSET_6
337 0x00000000, // PA_CL_VPORT_ZSCALE_6
338 0x00000000, // PA_CL_VPORT_ZOFFSET_6
339 0x00000000, // PA_CL_VPORT_XSCALE_7
340 0x00000000, // PA_CL_VPORT_XOFFSET_7
341 0x00000000, // PA_CL_VPORT_YSCALE_7
342 0x00000000, // PA_CL_VPORT_YOFFSET_7
343 0x00000000, // PA_CL_VPORT_ZSCALE_7
344 0x00000000, // PA_CL_VPORT_ZOFFSET_7
345 0x00000000, // PA_CL_VPORT_XSCALE_8
346 0x00000000, // PA_CL_VPORT_XOFFSET_8
347 0x00000000, // PA_CL_VPORT_YSCALE_8
348 0x00000000, // PA_CL_VPORT_YOFFSET_8
349 0x00000000, // PA_CL_VPORT_ZSCALE_8
350 0x00000000, // PA_CL_VPORT_ZOFFSET_8
351 0x00000000, // PA_CL_VPORT_XSCALE_9
352 0x00000000, // PA_CL_VPORT_XOFFSET_9
353 0x00000000, // PA_CL_VPORT_YSCALE_9
354 0x00000000, // PA_CL_VPORT_YOFFSET_9
355 0x00000000, // PA_CL_VPORT_ZSCALE_9
356 0x00000000, // PA_CL_VPORT_ZOFFSET_9
357 0x00000000, // PA_CL_VPORT_XSCALE_10
358 0x00000000, // PA_CL_VPORT_XOFFSET_10
359 0x00000000, // PA_CL_VPORT_YSCALE_10
360 0x00000000, // PA_CL_VPORT_YOFFSET_10
361 0x00000000, // PA_CL_VPORT_ZSCALE_10
362 0x00000000, // PA_CL_VPORT_ZOFFSET_10
363 0x00000000, // PA_CL_VPORT_XSCALE_11
364 0x00000000, // PA_CL_VPORT_XOFFSET_11
365 0x00000000, // PA_CL_VPORT_YSCALE_11
366 0x00000000, // PA_CL_VPORT_YOFFSET_11
367 0x00000000, // PA_CL_VPORT_ZSCALE_11
368 0x00000000, // PA_CL_VPORT_ZOFFSET_11
369 0x00000000, // PA_CL_VPORT_XSCALE_12
370 0x00000000, // PA_CL_VPORT_XOFFSET_12
371 0x00000000, // PA_CL_VPORT_YSCALE_12
372 0x00000000, // PA_CL_VPORT_YOFFSET_12
373 0x00000000, // PA_CL_VPORT_ZSCALE_12
374 0x00000000, // PA_CL_VPORT_ZOFFSET_12
375 0x00000000, // PA_CL_VPORT_XSCALE_13
376 0x00000000, // PA_CL_VPORT_XOFFSET_13
377 0x00000000, // PA_CL_VPORT_YSCALE_13
378 0x00000000, // PA_CL_VPORT_YOFFSET_13
379 0x00000000, // PA_CL_VPORT_ZSCALE_13
380 0x00000000, // PA_CL_VPORT_ZOFFSET_13
381 0x00000000, // PA_CL_VPORT_XSCALE_14
382 0x00000000, // PA_CL_VPORT_XOFFSET_14
383 0x00000000, // PA_CL_VPORT_YSCALE_14
384 0x00000000, // PA_CL_VPORT_YOFFSET_14
385 0x00000000, // PA_CL_VPORT_ZSCALE_14
386 0x00000000, // PA_CL_VPORT_ZOFFSET_14
387 0x00000000, // PA_CL_VPORT_XSCALE_15
388 0x00000000, // PA_CL_VPORT_XOFFSET_15
389 0x00000000, // PA_CL_VPORT_YSCALE_15
390 0x00000000, // PA_CL_VPORT_YOFFSET_15
391 0x00000000, // PA_CL_VPORT_ZSCALE_15
392 0x00000000, // PA_CL_VPORT_ZOFFSET_15
393 0x00000000, // PA_CL_UCP_0_X
394 0x00000000, // PA_CL_UCP_0_Y
395 0x00000000, // PA_CL_UCP_0_Z
396 0x00000000, // PA_CL_UCP_0_W
397 0x00000000, // PA_CL_UCP_1_X
398 0x00000000, // PA_CL_UCP_1_Y
399 0x00000000, // PA_CL_UCP_1_Z
400 0x00000000, // PA_CL_UCP_1_W
401 0x00000000, // PA_CL_UCP_2_X
402 0x00000000, // PA_CL_UCP_2_Y
403 0x00000000, // PA_CL_UCP_2_Z
404 0x00000000, // PA_CL_UCP_2_W
405 0x00000000, // PA_CL_UCP_3_X
406 0x00000000, // PA_CL_UCP_3_Y
407 0x00000000, // PA_CL_UCP_3_Z
408 0x00000000, // PA_CL_UCP_3_W
409 0x00000000, // PA_CL_UCP_4_X
410 0x00000000, // PA_CL_UCP_4_Y
411 0x00000000, // PA_CL_UCP_4_Z
412 0x00000000, // PA_CL_UCP_4_W
413 0x00000000, // PA_CL_UCP_5_X
414 0x00000000, // PA_CL_UCP_5_Y
415 0x00000000, // PA_CL_UCP_5_Z
416 0x00000000, // PA_CL_UCP_5_W
417 0x00000000, // SPI_VS_OUT_ID_0
418 0x00000000, // SPI_VS_OUT_ID_1
419 0x00000000, // SPI_VS_OUT_ID_2
420 0x00000000, // SPI_VS_OUT_ID_3
421 0x00000000, // SPI_VS_OUT_ID_4
422 0x00000000, // SPI_VS_OUT_ID_5
423 0x00000000, // SPI_VS_OUT_ID_6
424 0x00000000, // SPI_VS_OUT_ID_7
425 0x00000000, // SPI_VS_OUT_ID_8
426 0x00000000, // SPI_VS_OUT_ID_9
427 0x00000000, // SPI_PS_INPUT_CNTL_0
428 0x00000000, // SPI_PS_INPUT_CNTL_1
429 0x00000000, // SPI_PS_INPUT_CNTL_2
430 0x00000000, // SPI_PS_INPUT_CNTL_3
431 0x00000000, // SPI_PS_INPUT_CNTL_4
432 0x00000000, // SPI_PS_INPUT_CNTL_5
433 0x00000000, // SPI_PS_INPUT_CNTL_6
434 0x00000000, // SPI_PS_INPUT_CNTL_7
435 0x00000000, // SPI_PS_INPUT_CNTL_8
436 0x00000000, // SPI_PS_INPUT_CNTL_9
437 0x00000000, // SPI_PS_INPUT_CNTL_10
438 0x00000000, // SPI_PS_INPUT_CNTL_11
439 0x00000000, // SPI_PS_INPUT_CNTL_12
440 0x00000000, // SPI_PS_INPUT_CNTL_13
441 0x00000000, // SPI_PS_INPUT_CNTL_14
442 0x00000000, // SPI_PS_INPUT_CNTL_15
443 0x00000000, // SPI_PS_INPUT_CNTL_16
444 0x00000000, // SPI_PS_INPUT_CNTL_17
445 0x00000000, // SPI_PS_INPUT_CNTL_18
446 0x00000000, // SPI_PS_INPUT_CNTL_19
447 0x00000000, // SPI_PS_INPUT_CNTL_20
448 0x00000000, // SPI_PS_INPUT_CNTL_21
449 0x00000000, // SPI_PS_INPUT_CNTL_22
450 0x00000000, // SPI_PS_INPUT_CNTL_23
451 0x00000000, // SPI_PS_INPUT_CNTL_24
452 0x00000000, // SPI_PS_INPUT_CNTL_25
453 0x00000000, // SPI_PS_INPUT_CNTL_26
454 0x00000000, // SPI_PS_INPUT_CNTL_27
455 0x00000000, // SPI_PS_INPUT_CNTL_28
456 0x00000000, // SPI_PS_INPUT_CNTL_29
457 0x00000000, // SPI_PS_INPUT_CNTL_30
458 0x00000000, // SPI_PS_INPUT_CNTL_31
459 0x00000000, // SPI_VS_OUT_CONFIG
460 0x00000001, // SPI_THREAD_GROUPING
461 0x00000000, // SPI_PS_IN_CONTROL_0
462 0x00000000, // SPI_PS_IN_CONTROL_1
463 0x00000000, // SPI_INTERP_CONTROL_0
464 0x00000000, // SPI_INPUT_Z
465 0x00000000, // SPI_FOG_CNTL
466 0x00000000, // SPI_BARYC_CNTL
467 0x00000000, // SPI_PS_IN_CONTROL_2
468 0x00000000, // SPI_COMPUTE_INPUT_CNTL
469 0x00000000, // SPI_COMPUTE_NUM_THREAD_X
470 0x00000000, // SPI_COMPUTE_NUM_THREAD_Y
471 0x00000000, // SPI_COMPUTE_NUM_THREAD_Z
472 0, // HOLE
473 0, // HOLE
474 0, // HOLE
475 0, // HOLE
476 0, // HOLE
477 0, // HOLE
478 0, // HOLE
479 0, // HOLE
480 0, // HOLE
481 0, // HOLE
482 0x00000000, // GDS_ADDR_BASE
483 0x00003fff, // GDS_ADDR_SIZE
484 0x00000001, // GDS_ORDERED_WAVE_PER_SE
485 0x00000000, // GDS_APPEND_CONSUME_UAV0
486 0x00000000, // GDS_APPEND_CONSUME_UAV1
487 0x00000000, // GDS_APPEND_CONSUME_UAV2
488 0x00000000, // GDS_APPEND_CONSUME_UAV3
489 0x00000000, // GDS_APPEND_CONSUME_UAV4
490 0x00000000, // GDS_APPEND_CONSUME_UAV5
491 0x00000000, // GDS_APPEND_CONSUME_UAV6
492 0x00000000, // GDS_APPEND_CONSUME_UAV7
493 0x00000000, // GDS_APPEND_CONSUME_UAV8
494 0x00000000, // GDS_APPEND_CONSUME_UAV9
495 0x00000000, // GDS_APPEND_CONSUME_UAV10
496 0x00000000, // GDS_APPEND_CONSUME_UAV11
497 0, // HOLE
498 0, // HOLE
499 0, // HOLE
500 0, // HOLE
501 0, // HOLE
502 0, // HOLE
503 0, // HOLE
504 0, // HOLE
505 0, // HOLE
506 0x00000000, // CB_BLEND0_CONTROL
507 0x00000000, // CB_BLEND1_CONTROL
508 0x00000000, // CB_BLEND2_CONTROL
509 0x00000000, // CB_BLEND3_CONTROL
510 0x00000000, // CB_BLEND4_CONTROL
511 0x00000000, // CB_BLEND5_CONTROL
512 0x00000000, // CB_BLEND6_CONTROL
513 0x00000000, // CB_BLEND7_CONTROL
514};
515static const u32 SECT_CONTEXT_def_2[] =
516{
517 0x00000000, // PA_CL_POINT_X_RAD
518 0x00000000, // PA_CL_POINT_Y_RAD
519 0x00000000, // PA_CL_POINT_SIZE
520 0x00000000, // PA_CL_POINT_CULL_RAD
521 0x00000000, // VGT_DMA_BASE_HI
522 0x00000000, // VGT_DMA_BASE
523};
524static const u32 SECT_CONTEXT_def_3[] =
525{
526 0x00000000, // DB_DEPTH_CONTROL
527 0, // HOLE
528 0x00000000, // CB_COLOR_CONTROL
529 0x00000200, // DB_SHADER_CONTROL
530 0x00000000, // PA_CL_CLIP_CNTL
531 0x00000000, // PA_SU_SC_MODE_CNTL
532 0x00000000, // PA_CL_VTE_CNTL
533 0x00000000, // PA_CL_VS_OUT_CNTL
534 0x00000000, // PA_CL_NANINF_CNTL
535 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
536 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
537 0x00000000, // PA_SU_PRIM_FILTER_CNTL
538 0x00000000, // SQ_LSTMP_RING_ITEMSIZE
539 0x00000000, // SQ_HSTMP_RING_ITEMSIZE
540 0x00000000, // SQ_DYN_GPR_RESOURCE_LIMIT_1
541 0, // HOLE
542 0x00000000, // SQ_PGM_START_PS
543 0x00000000, // SQ_PGM_RESOURCES_PS
544 0x00000000, // SQ_PGM_RESOURCES_2_PS
545 0x00000000, // SQ_PGM_EXPORTS_PS
546 0, // HOLE
547 0, // HOLE
548 0, // HOLE
549 0x00000000, // SQ_PGM_START_VS
550 0x00000000, // SQ_PGM_RESOURCES_VS
551 0x00000000, // SQ_PGM_RESOURCES_2_VS
552 0, // HOLE
553 0, // HOLE
554 0, // HOLE
555 0x00000000, // SQ_PGM_START_GS
556 0x00000000, // SQ_PGM_RESOURCES_GS
557 0x00000000, // SQ_PGM_RESOURCES_2_GS
558 0, // HOLE
559 0, // HOLE
560 0, // HOLE
561 0x00000000, // SQ_PGM_START_ES
562 0x00000000, // SQ_PGM_RESOURCES_ES
563 0x00000000, // SQ_PGM_RESOURCES_2_ES
564 0, // HOLE
565 0, // HOLE
566 0, // HOLE
567 0x00000000, // SQ_PGM_START_FS
568 0x00000000, // SQ_PGM_RESOURCES_FS
569 0, // HOLE
570 0, // HOLE
571 0, // HOLE
572 0x00000000, // SQ_PGM_START_HS
573 0x00000000, // SQ_PGM_RESOURCES_HS
574 0x00000000, // SQ_PGM_RESOURCES_2_HS
575 0, // HOLE
576 0, // HOLE
577 0, // HOLE
578 0x00000000, // SQ_PGM_START_LS
579 0x00000000, // SQ_PGM_RESOURCES_LS
580 0x00000000, // SQ_PGM_RESOURCES_2_LS
581};
582static const u32 SECT_CONTEXT_def_4[] =
583{
584 0x00000000, // SQ_LDS_ALLOC
585 0x00000000, // SQ_LDS_ALLOC_PS
586 0x00000000, // SQ_VTX_SEMANTIC_CLEAR
587 0, // HOLE
588 0x00000000, // SQ_THREAD_TRACE_CTRL
589 0, // HOLE
590 0x00000000, // SQ_ESGS_RING_ITEMSIZE
591 0x00000000, // SQ_GSVS_RING_ITEMSIZE
592 0x00000000, // SQ_ESTMP_RING_ITEMSIZE
593 0x00000000, // SQ_GSTMP_RING_ITEMSIZE
594 0x00000000, // SQ_VSTMP_RING_ITEMSIZE
595 0x00000000, // SQ_PSTMP_RING_ITEMSIZE
596 0, // HOLE
597 0x00000000, // SQ_GS_VERT_ITEMSIZE
598 0x00000000, // SQ_GS_VERT_ITEMSIZE_1
599 0x00000000, // SQ_GS_VERT_ITEMSIZE_2
600 0x00000000, // SQ_GS_VERT_ITEMSIZE_3
601 0x00000000, // SQ_GSVS_RING_OFFSET_1
602 0x00000000, // SQ_GSVS_RING_OFFSET_2
603 0x00000000, // SQ_GSVS_RING_OFFSET_3
604 0, // HOLE
605 0, // HOLE
606 0x00000000, // SQ_ALU_CONST_CACHE_PS_0
607 0x00000000, // SQ_ALU_CONST_CACHE_PS_1
608 0x00000000, // SQ_ALU_CONST_CACHE_PS_2
609 0x00000000, // SQ_ALU_CONST_CACHE_PS_3
610 0x00000000, // SQ_ALU_CONST_CACHE_PS_4
611 0x00000000, // SQ_ALU_CONST_CACHE_PS_5
612 0x00000000, // SQ_ALU_CONST_CACHE_PS_6
613 0x00000000, // SQ_ALU_CONST_CACHE_PS_7
614 0x00000000, // SQ_ALU_CONST_CACHE_PS_8
615 0x00000000, // SQ_ALU_CONST_CACHE_PS_9
616 0x00000000, // SQ_ALU_CONST_CACHE_PS_10
617 0x00000000, // SQ_ALU_CONST_CACHE_PS_11
618 0x00000000, // SQ_ALU_CONST_CACHE_PS_12
619 0x00000000, // SQ_ALU_CONST_CACHE_PS_13
620 0x00000000, // SQ_ALU_CONST_CACHE_PS_14
621 0x00000000, // SQ_ALU_CONST_CACHE_PS_15
622 0x00000000, // SQ_ALU_CONST_CACHE_VS_0
623 0x00000000, // SQ_ALU_CONST_CACHE_VS_1
624 0x00000000, // SQ_ALU_CONST_CACHE_VS_2
625 0x00000000, // SQ_ALU_CONST_CACHE_VS_3
626 0x00000000, // SQ_ALU_CONST_CACHE_VS_4
627 0x00000000, // SQ_ALU_CONST_CACHE_VS_5
628 0x00000000, // SQ_ALU_CONST_CACHE_VS_6
629 0x00000000, // SQ_ALU_CONST_CACHE_VS_7
630 0x00000000, // SQ_ALU_CONST_CACHE_VS_8
631 0x00000000, // SQ_ALU_CONST_CACHE_VS_9
632 0x00000000, // SQ_ALU_CONST_CACHE_VS_10
633 0x00000000, // SQ_ALU_CONST_CACHE_VS_11
634 0x00000000, // SQ_ALU_CONST_CACHE_VS_12
635 0x00000000, // SQ_ALU_CONST_CACHE_VS_13
636 0x00000000, // SQ_ALU_CONST_CACHE_VS_14
637 0x00000000, // SQ_ALU_CONST_CACHE_VS_15
638 0x00000000, // SQ_ALU_CONST_CACHE_GS_0
639 0x00000000, // SQ_ALU_CONST_CACHE_GS_1
640 0x00000000, // SQ_ALU_CONST_CACHE_GS_2
641 0x00000000, // SQ_ALU_CONST_CACHE_GS_3
642 0x00000000, // SQ_ALU_CONST_CACHE_GS_4
643 0x00000000, // SQ_ALU_CONST_CACHE_GS_5
644 0x00000000, // SQ_ALU_CONST_CACHE_GS_6
645 0x00000000, // SQ_ALU_CONST_CACHE_GS_7
646 0x00000000, // SQ_ALU_CONST_CACHE_GS_8
647 0x00000000, // SQ_ALU_CONST_CACHE_GS_9
648 0x00000000, // SQ_ALU_CONST_CACHE_GS_10
649 0x00000000, // SQ_ALU_CONST_CACHE_GS_11
650 0x00000000, // SQ_ALU_CONST_CACHE_GS_12
651 0x00000000, // SQ_ALU_CONST_CACHE_GS_13
652 0x00000000, // SQ_ALU_CONST_CACHE_GS_14
653 0x00000000, // SQ_ALU_CONST_CACHE_GS_15
654 0x00000000, // PA_SU_POINT_SIZE
655 0x00000000, // PA_SU_POINT_MINMAX
656 0x00000000, // PA_SU_LINE_CNTL
657 0x00000000, // PA_SC_LINE_STIPPLE
658 0x00000000, // VGT_OUTPUT_PATH_CNTL
659 0x00000000, // VGT_HOS_CNTL
660 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
661 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
662 0x00000000, // VGT_HOS_REUSE_DEPTH
663 0x00000000, // VGT_GROUP_PRIM_TYPE
664 0x00000000, // VGT_GROUP_FIRST_DECR
665 0x00000000, // VGT_GROUP_DECR
666 0x00000000, // VGT_GROUP_VECT_0_CNTL
667 0x00000000, // VGT_GROUP_VECT_1_CNTL
668 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
669 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
670 0x00000000, // VGT_GS_MODE
671 0, // HOLE
672 0x00000000, // PA_SC_MODE_CNTL_0
673 0x00000000, // PA_SC_MODE_CNTL_1
674 0x00000000, // VGT_ENHANCE
675 0x00000000, // VGT_GS_PER_ES
676 0x00000000, // VGT_ES_PER_GS
677 0x00000000, // VGT_GS_PER_VS
678 0, // HOLE
679 0, // HOLE
680 0, // HOLE
681 0x00000000, // VGT_GS_OUT_PRIM_TYPE
682};
683static const u32 SECT_CONTEXT_def_5[] =
684{
685 0x00000000, // VGT_DMA_MAX_SIZE
686 0x00000000, // VGT_DMA_INDEX_TYPE
687 0, // HOLE
688 0x00000000, // VGT_PRIMITIVEID_EN
689 0x00000000, // VGT_DMA_NUM_INSTANCES
690};
691static const u32 SECT_CONTEXT_def_6[] =
692{
693 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
694 0, // HOLE
695 0, // HOLE
696 0x00000000, // VGT_INSTANCE_STEP_RATE_0
697 0x00000000, // VGT_INSTANCE_STEP_RATE_1
698 0, // HOLE
699 0, // HOLE
700 0, // HOLE
701 0x00000000, // VGT_REUSE_OFF
702 0x00000000, // VGT_VTX_CNT_EN
703 0x00000000, // DB_HTILE_SURFACE
704 0x00000000, // DB_SRESULTS_COMPARE_STATE0
705 0x00000000, // DB_SRESULTS_COMPARE_STATE1
706 0x00000000, // DB_PRELOAD_CONTROL
707 0, // HOLE
708 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
709 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
710 0x00000000, // VGT_STRMOUT_BUFFER_BASE_0
711 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
712 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
713 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
714 0x00000000, // VGT_STRMOUT_BUFFER_BASE_1
715 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
716 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
717 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
718 0x00000000, // VGT_STRMOUT_BUFFER_BASE_2
719 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
720 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
721 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
722 0x00000000, // VGT_STRMOUT_BUFFER_BASE_3
723 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
724 0x00000000, // VGT_STRMOUT_BASE_OFFSET_0
725 0x00000000, // VGT_STRMOUT_BASE_OFFSET_1
726 0x00000000, // VGT_STRMOUT_BASE_OFFSET_2
727 0x00000000, // VGT_STRMOUT_BASE_OFFSET_3
728 0, // HOLE
729 0, // HOLE
730 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
731 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
732 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
733 0, // HOLE
734 0x00000000, // VGT_GS_MAX_VERT_OUT
735 0, // HOLE
736 0, // HOLE
737 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_0
738 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_1
739 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_2
740 0x00000000, // VGT_STRMOUT_BASE_OFFSET_HI_3
741 0x00000000, // VGT_SHADER_STAGES_EN
742 0x00000000, // VGT_LS_HS_CONFIG
743 0x00000000, // VGT_LS_SIZE
744 0x00000000, // VGT_HS_SIZE
745 0x00000000, // VGT_LS_HS_ALLOC
746 0x00000000, // VGT_HS_PATCH_CONST
747 0x00000000, // VGT_TF_PARAM
748 0x00000000, // DB_ALPHA_TO_MASK
749};
750static const u32 SECT_CONTEXT_def_7[] =
751{
752 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
753 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
754 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
755 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
756 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
757 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
758 0x00000000, // VGT_GS_INSTANCE_CNT
759 0x00000000, // VGT_STRMOUT_CONFIG
760 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
761 0x00000000, // CB_IMMED0_BASE
762 0x00000000, // CB_IMMED1_BASE
763 0x00000000, // CB_IMMED2_BASE
764 0x00000000, // CB_IMMED3_BASE
765 0x00000000, // CB_IMMED4_BASE
766 0x00000000, // CB_IMMED5_BASE
767 0x00000000, // CB_IMMED6_BASE
768 0x00000000, // CB_IMMED7_BASE
769 0x00000000, // CB_IMMED8_BASE
770 0x00000000, // CB_IMMED9_BASE
771 0x00000000, // CB_IMMED10_BASE
772 0x00000000, // CB_IMMED11_BASE
773 0, // HOLE
774 0, // HOLE
775 0, // HOLE
776 0, // HOLE
777 0, // HOLE
778 0, // HOLE
779 0, // HOLE
780 0, // HOLE
781 0, // HOLE
782 0, // HOLE
783 0, // HOLE
784 0, // HOLE
785 0, // HOLE
786 0x00001000, // PA_SC_LINE_CNTL
787 0x00000000, // PA_SC_AA_CONFIG
788 0x00000005, // PA_SU_VTX_CNTL
789 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
790 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
791 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
792 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
793 0x00000000, // PA_SC_AA_SAMPLE_LOCS_0
794 0x00000000, // PA_SC_AA_SAMPLE_LOCS_1
795 0x00000000, // PA_SC_AA_SAMPLE_LOCS_2
796 0x00000000, // PA_SC_AA_SAMPLE_LOCS_3
797 0x00000000, // PA_SC_AA_SAMPLE_LOCS_4
798 0x00000000, // PA_SC_AA_SAMPLE_LOCS_5
799 0x00000000, // PA_SC_AA_SAMPLE_LOCS_6
800 0x00000000, // PA_SC_AA_SAMPLE_LOCS_7
801 0xffffffff, // PA_SC_AA_MASK
802 0x00000000, // CB_CLRCMP_CONTROL
803 0x00000000, // CB_CLRCMP_SRC
804 0x00000000, // CB_CLRCMP_DST
805 0x00000000, // CB_CLRCMP_MSK
806 0, // HOLE
807 0, // HOLE
808 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
809 0x00000010, // VGT_OUT_DEALLOC_CNTL
810 0x00000000, // CB_COLOR0_BASE
811 0x00000000, // CB_COLOR0_PITCH
812 0x00000000, // CB_COLOR0_SLICE
813 0x00000000, // CB_COLOR0_VIEW
814 0x00000000, // CB_COLOR0_INFO
815 0x00000000, // CB_COLOR0_ATTRIB
816 0x00000000, // CB_COLOR0_DIM
817 0x00000000, // CB_COLOR0_CMASK
818 0x00000000, // CB_COLOR0_CMASK_SLICE
819 0x00000000, // CB_COLOR0_FMASK
820 0x00000000, // CB_COLOR0_FMASK_SLICE
821 0x00000000, // CB_COLOR0_CLEAR_WORD0
822 0x00000000, // CB_COLOR0_CLEAR_WORD1
823 0x00000000, // CB_COLOR0_CLEAR_WORD2
824 0x00000000, // CB_COLOR0_CLEAR_WORD3
825 0x00000000, // CB_COLOR1_BASE
826 0x00000000, // CB_COLOR1_PITCH
827 0x00000000, // CB_COLOR1_SLICE
828 0x00000000, // CB_COLOR1_VIEW
829 0x00000000, // CB_COLOR1_INFO
830 0x00000000, // CB_COLOR1_ATTRIB
831 0x00000000, // CB_COLOR1_DIM
832 0x00000000, // CB_COLOR1_CMASK
833 0x00000000, // CB_COLOR1_CMASK_SLICE
834 0x00000000, // CB_COLOR1_FMASK
835 0x00000000, // CB_COLOR1_FMASK_SLICE
836 0x00000000, // CB_COLOR1_CLEAR_WORD0
837 0x00000000, // CB_COLOR1_CLEAR_WORD1
838 0x00000000, // CB_COLOR1_CLEAR_WORD2
839 0x00000000, // CB_COLOR1_CLEAR_WORD3
840 0x00000000, // CB_COLOR2_BASE
841 0x00000000, // CB_COLOR2_PITCH
842 0x00000000, // CB_COLOR2_SLICE
843 0x00000000, // CB_COLOR2_VIEW
844 0x00000000, // CB_COLOR2_INFO
845 0x00000000, // CB_COLOR2_ATTRIB
846 0x00000000, // CB_COLOR2_DIM
847 0x00000000, // CB_COLOR2_CMASK
848 0x00000000, // CB_COLOR2_CMASK_SLICE
849 0x00000000, // CB_COLOR2_FMASK
850 0x00000000, // CB_COLOR2_FMASK_SLICE
851 0x00000000, // CB_COLOR2_CLEAR_WORD0
852 0x00000000, // CB_COLOR2_CLEAR_WORD1
853 0x00000000, // CB_COLOR2_CLEAR_WORD2
854 0x00000000, // CB_COLOR2_CLEAR_WORD3
855 0x00000000, // CB_COLOR3_BASE
856 0x00000000, // CB_COLOR3_PITCH
857 0x00000000, // CB_COLOR3_SLICE
858 0x00000000, // CB_COLOR3_VIEW
859 0x00000000, // CB_COLOR3_INFO
860 0x00000000, // CB_COLOR3_ATTRIB
861 0x00000000, // CB_COLOR3_DIM
862 0x00000000, // CB_COLOR3_CMASK
863 0x00000000, // CB_COLOR3_CMASK_SLICE
864 0x00000000, // CB_COLOR3_FMASK
865 0x00000000, // CB_COLOR3_FMASK_SLICE
866 0x00000000, // CB_COLOR3_CLEAR_WORD0
867 0x00000000, // CB_COLOR3_CLEAR_WORD1
868 0x00000000, // CB_COLOR3_CLEAR_WORD2
869 0x00000000, // CB_COLOR3_CLEAR_WORD3
870 0x00000000, // CB_COLOR4_BASE
871 0x00000000, // CB_COLOR4_PITCH
872 0x00000000, // CB_COLOR4_SLICE
873 0x00000000, // CB_COLOR4_VIEW
874 0x00000000, // CB_COLOR4_INFO
875 0x00000000, // CB_COLOR4_ATTRIB
876 0x00000000, // CB_COLOR4_DIM
877 0x00000000, // CB_COLOR4_CMASK
878 0x00000000, // CB_COLOR4_CMASK_SLICE
879 0x00000000, // CB_COLOR4_FMASK
880 0x00000000, // CB_COLOR4_FMASK_SLICE
881 0x00000000, // CB_COLOR4_CLEAR_WORD0
882 0x00000000, // CB_COLOR4_CLEAR_WORD1
883 0x00000000, // CB_COLOR4_CLEAR_WORD2
884 0x00000000, // CB_COLOR4_CLEAR_WORD3
885 0x00000000, // CB_COLOR5_BASE
886 0x00000000, // CB_COLOR5_PITCH
887 0x00000000, // CB_COLOR5_SLICE
888 0x00000000, // CB_COLOR5_VIEW
889 0x00000000, // CB_COLOR5_INFO
890 0x00000000, // CB_COLOR5_ATTRIB
891 0x00000000, // CB_COLOR5_DIM
892 0x00000000, // CB_COLOR5_CMASK
893 0x00000000, // CB_COLOR5_CMASK_SLICE
894 0x00000000, // CB_COLOR5_FMASK
895 0x00000000, // CB_COLOR5_FMASK_SLICE
896 0x00000000, // CB_COLOR5_CLEAR_WORD0
897 0x00000000, // CB_COLOR5_CLEAR_WORD1
898 0x00000000, // CB_COLOR5_CLEAR_WORD2
899 0x00000000, // CB_COLOR5_CLEAR_WORD3
900 0x00000000, // CB_COLOR6_BASE
901 0x00000000, // CB_COLOR6_PITCH
902 0x00000000, // CB_COLOR6_SLICE
903 0x00000000, // CB_COLOR6_VIEW
904 0x00000000, // CB_COLOR6_INFO
905 0x00000000, // CB_COLOR6_ATTRIB
906 0x00000000, // CB_COLOR6_DIM
907 0x00000000, // CB_COLOR6_CMASK
908 0x00000000, // CB_COLOR6_CMASK_SLICE
909 0x00000000, // CB_COLOR6_FMASK
910 0x00000000, // CB_COLOR6_FMASK_SLICE
911 0x00000000, // CB_COLOR6_CLEAR_WORD0
912 0x00000000, // CB_COLOR6_CLEAR_WORD1
913 0x00000000, // CB_COLOR6_CLEAR_WORD2
914 0x00000000, // CB_COLOR6_CLEAR_WORD3
915 0x00000000, // CB_COLOR7_BASE
916 0x00000000, // CB_COLOR7_PITCH
917 0x00000000, // CB_COLOR7_SLICE
918 0x00000000, // CB_COLOR7_VIEW
919 0x00000000, // CB_COLOR7_INFO
920 0x00000000, // CB_COLOR7_ATTRIB
921 0x00000000, // CB_COLOR7_DIM
922 0x00000000, // CB_COLOR7_CMASK
923 0x00000000, // CB_COLOR7_CMASK_SLICE
924 0x00000000, // CB_COLOR7_FMASK
925 0x00000000, // CB_COLOR7_FMASK_SLICE
926 0x00000000, // CB_COLOR7_CLEAR_WORD0
927 0x00000000, // CB_COLOR7_CLEAR_WORD1
928 0x00000000, // CB_COLOR7_CLEAR_WORD2
929 0x00000000, // CB_COLOR7_CLEAR_WORD3
930 0x00000000, // CB_COLOR8_BASE
931 0x00000000, // CB_COLOR8_PITCH
932 0x00000000, // CB_COLOR8_SLICE
933 0x00000000, // CB_COLOR8_VIEW
934 0x00000000, // CB_COLOR8_INFO
935 0x00000000, // CB_COLOR8_ATTRIB
936 0x00000000, // CB_COLOR8_DIM
937 0x00000000, // CB_COLOR9_BASE
938 0x00000000, // CB_COLOR9_PITCH
939 0x00000000, // CB_COLOR9_SLICE
940 0x00000000, // CB_COLOR9_VIEW
941 0x00000000, // CB_COLOR9_INFO
942 0x00000000, // CB_COLOR9_ATTRIB
943 0x00000000, // CB_COLOR9_DIM
944 0x00000000, // CB_COLOR10_BASE
945 0x00000000, // CB_COLOR10_PITCH
946 0x00000000, // CB_COLOR10_SLICE
947 0x00000000, // CB_COLOR10_VIEW
948 0x00000000, // CB_COLOR10_INFO
949 0x00000000, // CB_COLOR10_ATTRIB
950 0x00000000, // CB_COLOR10_DIM
951 0x00000000, // CB_COLOR11_BASE
952 0x00000000, // CB_COLOR11_PITCH
953 0x00000000, // CB_COLOR11_SLICE
954 0x00000000, // CB_COLOR11_VIEW
955 0x00000000, // CB_COLOR11_INFO
956 0x00000000, // CB_COLOR11_ATTRIB
957 0x00000000, // CB_COLOR11_DIM
958 0, // HOLE
959 0, // HOLE
960 0, // HOLE
961 0, // HOLE
962 0, // HOLE
963 0, // HOLE
964 0, // HOLE
965 0, // HOLE
966 0, // HOLE
967 0, // HOLE
968 0, // HOLE
969 0, // HOLE
970 0, // HOLE
971 0, // HOLE
972 0, // HOLE
973 0, // HOLE
974 0, // HOLE
975 0, // HOLE
976 0, // HOLE
977 0, // HOLE
978 0x00000000, // SQ_ALU_CONST_CACHE_HS_0
979 0x00000000, // SQ_ALU_CONST_CACHE_HS_1
980 0x00000000, // SQ_ALU_CONST_CACHE_HS_2
981 0x00000000, // SQ_ALU_CONST_CACHE_HS_3
982 0x00000000, // SQ_ALU_CONST_CACHE_HS_4
983 0x00000000, // SQ_ALU_CONST_CACHE_HS_5
984 0x00000000, // SQ_ALU_CONST_CACHE_HS_6
985 0x00000000, // SQ_ALU_CONST_CACHE_HS_7
986 0x00000000, // SQ_ALU_CONST_CACHE_HS_8
987 0x00000000, // SQ_ALU_CONST_CACHE_HS_9
988 0x00000000, // SQ_ALU_CONST_CACHE_HS_10
989 0x00000000, // SQ_ALU_CONST_CACHE_HS_11
990 0x00000000, // SQ_ALU_CONST_CACHE_HS_12
991 0x00000000, // SQ_ALU_CONST_CACHE_HS_13
992 0x00000000, // SQ_ALU_CONST_CACHE_HS_14
993 0x00000000, // SQ_ALU_CONST_CACHE_HS_15
994 0x00000000, // SQ_ALU_CONST_CACHE_LS_0
995 0x00000000, // SQ_ALU_CONST_CACHE_LS_1
996 0x00000000, // SQ_ALU_CONST_CACHE_LS_2
997 0x00000000, // SQ_ALU_CONST_CACHE_LS_3
998 0x00000000, // SQ_ALU_CONST_CACHE_LS_4
999 0x00000000, // SQ_ALU_CONST_CACHE_LS_5
1000 0x00000000, // SQ_ALU_CONST_CACHE_LS_6
1001 0x00000000, // SQ_ALU_CONST_CACHE_LS_7
1002 0x00000000, // SQ_ALU_CONST_CACHE_LS_8
1003 0x00000000, // SQ_ALU_CONST_CACHE_LS_9
1004 0x00000000, // SQ_ALU_CONST_CACHE_LS_10
1005 0x00000000, // SQ_ALU_CONST_CACHE_LS_11
1006 0x00000000, // SQ_ALU_CONST_CACHE_LS_12
1007 0x00000000, // SQ_ALU_CONST_CACHE_LS_13
1008 0x00000000, // SQ_ALU_CONST_CACHE_LS_14
1009 0x00000000, // SQ_ALU_CONST_CACHE_LS_15
1010 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_0
1011 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_1
1012 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_2
1013 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_3
1014 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_4
1015 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_5
1016 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_6
1017 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_7
1018 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_8
1019 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_9
1020 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_10
1021 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_11
1022 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_12
1023 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_13
1024 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_14
1025 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_HS_15
1026 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_0
1027 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_1
1028 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_2
1029 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_3
1030 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_4
1031 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_5
1032 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_6
1033 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_7
1034 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_8
1035 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_9
1036 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_10
1037 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_11
1038 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_12
1039 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_13
1040 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_14
1041 0x00000000, // SQ_ALU_CONST_BUFFER_SIZE_LS_15
1042};
1043static const struct cs_extent_def SECT_CONTEXT_defs[] =
1044{
1045 {SECT_CONTEXT_def_1, 0x0000a000, 488 },
1046 {SECT_CONTEXT_def_2, 0x0000a1f5, 6 },
1047 {SECT_CONTEXT_def_3, 0x0000a200, 55 },
1048 {SECT_CONTEXT_def_4, 0x0000a23a, 98 },
1049 {SECT_CONTEXT_def_5, 0x0000a29e, 5 },
1050 {SECT_CONTEXT_def_6, 0x0000a2a5, 56 },
1051 {SECT_CONTEXT_def_7, 0x0000a2de, 290 },
1052 { 0, 0, 0 }
1053};
1054static const u32 SECT_CLEAR_def_1[] =
1055{
1056 0xffffffff, // SQ_TEX_SAMPLER_CLEAR
1057 0xffffffff, // SQ_TEX_RESOURCE_CLEAR
1058 0xffffffff, // SQ_LOOP_BOOL_CLEAR
1059};
1060static const struct cs_extent_def SECT_CLEAR_defs[] =
1061{
1062 {SECT_CLEAR_def_1, 0x0000ffc0, 3 },
1063 { 0, 0, 0 }
1064};
1065static const u32 SECT_CTRLCONST_def_1[] =
1066{
1067 0x00000000, // SQ_VTX_BASE_VTX_LOC
1068 0x00000000, // SQ_VTX_START_INST_LOC
1069};
1070static const struct cs_extent_def SECT_CTRLCONST_defs[] =
1071{
1072 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
1073 { 0, 0, 0 }
1074};
1075struct cs_section_def evergreen_cs_data[] = {
1076 { SECT_CONTEXT_defs, SECT_CONTEXT },
1077 { SECT_CLEAR_defs, SECT_CLEAR },
1078 { SECT_CTRLCONST_defs, SECT_CTRLCONST },
1079 { 0, SECT_NONE }
1080};
diff --git a/drivers/gpu/drm/radeon/clearstate_si.h b/drivers/gpu/drm/radeon/clearstate_si.h
new file mode 100644
index 000000000000..b994cb2a35a0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_si.h
@@ -0,0 +1,941 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24static const u32 si_SECT_CONTEXT_def_1[] =
25{
26 0x00000000, // DB_RENDER_CONTROL
27 0x00000000, // DB_COUNT_CONTROL
28 0x00000000, // DB_DEPTH_VIEW
29 0x00000000, // DB_RENDER_OVERRIDE
30 0x00000000, // DB_RENDER_OVERRIDE2
31 0x00000000, // DB_HTILE_DATA_BASE
32 0, // HOLE
33 0, // HOLE
34 0x00000000, // DB_DEPTH_BOUNDS_MIN
35 0x00000000, // DB_DEPTH_BOUNDS_MAX
36 0x00000000, // DB_STENCIL_CLEAR
37 0x00000000, // DB_DEPTH_CLEAR
38 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
39 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
40 0, // HOLE
41 0x00000000, // DB_DEPTH_INFO
42 0x00000000, // DB_Z_INFO
43 0x00000000, // DB_STENCIL_INFO
44 0x00000000, // DB_Z_READ_BASE
45 0x00000000, // DB_STENCIL_READ_BASE
46 0x00000000, // DB_Z_WRITE_BASE
47 0x00000000, // DB_STENCIL_WRITE_BASE
48 0x00000000, // DB_DEPTH_SIZE
49 0x00000000, // DB_DEPTH_SLICE
50 0, // HOLE
51 0, // HOLE
52 0, // HOLE
53 0, // HOLE
54 0, // HOLE
55 0, // HOLE
56 0, // HOLE
57 0, // HOLE
58 0x00000000, // TA_BC_BASE_ADDR
59 0, // HOLE
60 0, // HOLE
61 0, // HOLE
62 0, // HOLE
63 0, // HOLE
64 0, // HOLE
65 0, // HOLE
66 0, // HOLE
67 0, // HOLE
68 0, // HOLE
69 0, // HOLE
70 0, // HOLE
71 0, // HOLE
72 0, // HOLE
73 0, // HOLE
74 0, // HOLE
75 0, // HOLE
76 0, // HOLE
77 0, // HOLE
78 0, // HOLE
79 0, // HOLE
80 0, // HOLE
81 0, // HOLE
82 0, // HOLE
83 0, // HOLE
84 0, // HOLE
85 0, // HOLE
86 0, // HOLE
87 0, // HOLE
88 0, // HOLE
89 0, // HOLE
90 0, // HOLE
91 0, // HOLE
92 0, // HOLE
93 0, // HOLE
94 0, // HOLE
95 0, // HOLE
96 0, // HOLE
97 0, // HOLE
98 0, // HOLE
99 0, // HOLE
100 0, // HOLE
101 0, // HOLE
102 0, // HOLE
103 0, // HOLE
104 0, // HOLE
105 0, // HOLE
106 0, // HOLE
107 0, // HOLE
108 0, // HOLE
109 0, // HOLE
110 0, // HOLE
111 0, // HOLE
112 0, // HOLE
113 0, // HOLE
114 0, // HOLE
115 0, // HOLE
116 0, // HOLE
117 0, // HOLE
118 0, // HOLE
119 0, // HOLE
120 0, // HOLE
121 0, // HOLE
122 0, // HOLE
123 0, // HOLE
124 0, // HOLE
125 0, // HOLE
126 0, // HOLE
127 0, // HOLE
128 0, // HOLE
129 0, // HOLE
130 0, // HOLE
131 0, // HOLE
132 0, // HOLE
133 0, // HOLE
134 0, // HOLE
135 0, // HOLE
136 0, // HOLE
137 0, // HOLE
138 0, // HOLE
139 0, // HOLE
140 0, // HOLE
141 0, // HOLE
142 0, // HOLE
143 0, // HOLE
144 0, // HOLE
145 0, // HOLE
146 0, // HOLE
147 0, // HOLE
148 0, // HOLE
149 0, // HOLE
150 0, // HOLE
151 0, // HOLE
152 0x00000000, // COHER_DEST_BASE_2
153 0x00000000, // COHER_DEST_BASE_3
154 0x00000000, // PA_SC_WINDOW_OFFSET
155 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
156 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
157 0x0000ffff, // PA_SC_CLIPRECT_RULE
158 0x00000000, // PA_SC_CLIPRECT_0_TL
159 0x40004000, // PA_SC_CLIPRECT_0_BR
160 0x00000000, // PA_SC_CLIPRECT_1_TL
161 0x40004000, // PA_SC_CLIPRECT_1_BR
162 0x00000000, // PA_SC_CLIPRECT_2_TL
163 0x40004000, // PA_SC_CLIPRECT_2_BR
164 0x00000000, // PA_SC_CLIPRECT_3_TL
165 0x40004000, // PA_SC_CLIPRECT_3_BR
166 0xaa99aaaa, // PA_SC_EDGERULE
167 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
168 0xffffffff, // CB_TARGET_MASK
169 0xffffffff, // CB_SHADER_MASK
170 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
171 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
172 0x00000000, // COHER_DEST_BASE_0
173 0x00000000, // COHER_DEST_BASE_1
174 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
175 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
176 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
177 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
178 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
179 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
180 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
181 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
182 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
183 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
184 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
185 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
186 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
187 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
188 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
189 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
190 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
191 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
192 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
193 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
194 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
195 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
196 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
197 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
198 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
199 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
200 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
201 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
202 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
203 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
204 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
205 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
206 0x00000000, // PA_SC_VPORT_ZMIN_0
207 0x3f800000, // PA_SC_VPORT_ZMAX_0
208 0x00000000, // PA_SC_VPORT_ZMIN_1
209 0x3f800000, // PA_SC_VPORT_ZMAX_1
210 0x00000000, // PA_SC_VPORT_ZMIN_2
211 0x3f800000, // PA_SC_VPORT_ZMAX_2
212 0x00000000, // PA_SC_VPORT_ZMIN_3
213 0x3f800000, // PA_SC_VPORT_ZMAX_3
214 0x00000000, // PA_SC_VPORT_ZMIN_4
215 0x3f800000, // PA_SC_VPORT_ZMAX_4
216 0x00000000, // PA_SC_VPORT_ZMIN_5
217 0x3f800000, // PA_SC_VPORT_ZMAX_5
218 0x00000000, // PA_SC_VPORT_ZMIN_6
219 0x3f800000, // PA_SC_VPORT_ZMAX_6
220 0x00000000, // PA_SC_VPORT_ZMIN_7
221 0x3f800000, // PA_SC_VPORT_ZMAX_7
222 0x00000000, // PA_SC_VPORT_ZMIN_8
223 0x3f800000, // PA_SC_VPORT_ZMAX_8
224 0x00000000, // PA_SC_VPORT_ZMIN_9
225 0x3f800000, // PA_SC_VPORT_ZMAX_9
226 0x00000000, // PA_SC_VPORT_ZMIN_10
227 0x3f800000, // PA_SC_VPORT_ZMAX_10
228 0x00000000, // PA_SC_VPORT_ZMIN_11
229 0x3f800000, // PA_SC_VPORT_ZMAX_11
230 0x00000000, // PA_SC_VPORT_ZMIN_12
231 0x3f800000, // PA_SC_VPORT_ZMAX_12
232 0x00000000, // PA_SC_VPORT_ZMIN_13
233 0x3f800000, // PA_SC_VPORT_ZMAX_13
234 0x00000000, // PA_SC_VPORT_ZMIN_14
235 0x3f800000, // PA_SC_VPORT_ZMAX_14
236 0x00000000, // PA_SC_VPORT_ZMIN_15
237 0x3f800000, // PA_SC_VPORT_ZMAX_15
238};
239static const u32 si_SECT_CONTEXT_def_2[] =
240{
241 0x00000000, // CP_PERFMON_CNTX_CNTL
242 0x00000000, // CP_RINGID
243 0x00000000, // CP_VMID
244 0, // HOLE
245 0, // HOLE
246 0, // HOLE
247 0, // HOLE
248 0, // HOLE
249 0, // HOLE
250 0, // HOLE
251 0, // HOLE
252 0, // HOLE
253 0, // HOLE
254 0, // HOLE
255 0, // HOLE
256 0, // HOLE
257 0, // HOLE
258 0, // HOLE
259 0, // HOLE
260 0, // HOLE
261 0, // HOLE
262 0, // HOLE
263 0, // HOLE
264 0, // HOLE
265 0, // HOLE
266 0, // HOLE
267 0, // HOLE
268 0, // HOLE
269 0, // HOLE
270 0, // HOLE
271 0, // HOLE
272 0, // HOLE
273 0, // HOLE
274 0, // HOLE
275 0, // HOLE
276 0, // HOLE
277 0, // HOLE
278 0, // HOLE
279 0, // HOLE
280 0, // HOLE
281 0xffffffff, // VGT_MAX_VTX_INDX
282 0x00000000, // VGT_MIN_VTX_INDX
283 0x00000000, // VGT_INDX_OFFSET
284 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
285 0, // HOLE
286 0x00000000, // CB_BLEND_RED
287 0x00000000, // CB_BLEND_GREEN
288 0x00000000, // CB_BLEND_BLUE
289 0x00000000, // CB_BLEND_ALPHA
290 0, // HOLE
291 0, // HOLE
292 0x00000000, // DB_STENCIL_CONTROL
293 0x00000000, // DB_STENCILREFMASK
294 0x00000000, // DB_STENCILREFMASK_BF
295 0, // HOLE
296 0x00000000, // PA_CL_VPORT_XSCALE
297 0x00000000, // PA_CL_VPORT_XOFFSET
298 0x00000000, // PA_CL_VPORT_YSCALE
299 0x00000000, // PA_CL_VPORT_YOFFSET
300 0x00000000, // PA_CL_VPORT_ZSCALE
301 0x00000000, // PA_CL_VPORT_ZOFFSET
302 0x00000000, // PA_CL_VPORT_XSCALE_1
303 0x00000000, // PA_CL_VPORT_XOFFSET_1
304 0x00000000, // PA_CL_VPORT_YSCALE_1
305 0x00000000, // PA_CL_VPORT_YOFFSET_1
306 0x00000000, // PA_CL_VPORT_ZSCALE_1
307 0x00000000, // PA_CL_VPORT_ZOFFSET_1
308 0x00000000, // PA_CL_VPORT_XSCALE_2
309 0x00000000, // PA_CL_VPORT_XOFFSET_2
310 0x00000000, // PA_CL_VPORT_YSCALE_2
311 0x00000000, // PA_CL_VPORT_YOFFSET_2
312 0x00000000, // PA_CL_VPORT_ZSCALE_2
313 0x00000000, // PA_CL_VPORT_ZOFFSET_2
314 0x00000000, // PA_CL_VPORT_XSCALE_3
315 0x00000000, // PA_CL_VPORT_XOFFSET_3
316 0x00000000, // PA_CL_VPORT_YSCALE_3
317 0x00000000, // PA_CL_VPORT_YOFFSET_3
318 0x00000000, // PA_CL_VPORT_ZSCALE_3
319 0x00000000, // PA_CL_VPORT_ZOFFSET_3
320 0x00000000, // PA_CL_VPORT_XSCALE_4
321 0x00000000, // PA_CL_VPORT_XOFFSET_4
322 0x00000000, // PA_CL_VPORT_YSCALE_4
323 0x00000000, // PA_CL_VPORT_YOFFSET_4
324 0x00000000, // PA_CL_VPORT_ZSCALE_4
325 0x00000000, // PA_CL_VPORT_ZOFFSET_4
326 0x00000000, // PA_CL_VPORT_XSCALE_5
327 0x00000000, // PA_CL_VPORT_XOFFSET_5
328 0x00000000, // PA_CL_VPORT_YSCALE_5
329 0x00000000, // PA_CL_VPORT_YOFFSET_5
330 0x00000000, // PA_CL_VPORT_ZSCALE_5
331 0x00000000, // PA_CL_VPORT_ZOFFSET_5
332 0x00000000, // PA_CL_VPORT_XSCALE_6
333 0x00000000, // PA_CL_VPORT_XOFFSET_6
334 0x00000000, // PA_CL_VPORT_YSCALE_6
335 0x00000000, // PA_CL_VPORT_YOFFSET_6
336 0x00000000, // PA_CL_VPORT_ZSCALE_6
337 0x00000000, // PA_CL_VPORT_ZOFFSET_6
338 0x00000000, // PA_CL_VPORT_XSCALE_7
339 0x00000000, // PA_CL_VPORT_XOFFSET_7
340 0x00000000, // PA_CL_VPORT_YSCALE_7
341 0x00000000, // PA_CL_VPORT_YOFFSET_7
342 0x00000000, // PA_CL_VPORT_ZSCALE_7
343 0x00000000, // PA_CL_VPORT_ZOFFSET_7
344 0x00000000, // PA_CL_VPORT_XSCALE_8
345 0x00000000, // PA_CL_VPORT_XOFFSET_8
346 0x00000000, // PA_CL_VPORT_YSCALE_8
347 0x00000000, // PA_CL_VPORT_YOFFSET_8
348 0x00000000, // PA_CL_VPORT_ZSCALE_8
349 0x00000000, // PA_CL_VPORT_ZOFFSET_8
350 0x00000000, // PA_CL_VPORT_XSCALE_9
351 0x00000000, // PA_CL_VPORT_XOFFSET_9
352 0x00000000, // PA_CL_VPORT_YSCALE_9
353 0x00000000, // PA_CL_VPORT_YOFFSET_9
354 0x00000000, // PA_CL_VPORT_ZSCALE_9
355 0x00000000, // PA_CL_VPORT_ZOFFSET_9
356 0x00000000, // PA_CL_VPORT_XSCALE_10
357 0x00000000, // PA_CL_VPORT_XOFFSET_10
358 0x00000000, // PA_CL_VPORT_YSCALE_10
359 0x00000000, // PA_CL_VPORT_YOFFSET_10
360 0x00000000, // PA_CL_VPORT_ZSCALE_10
361 0x00000000, // PA_CL_VPORT_ZOFFSET_10
362 0x00000000, // PA_CL_VPORT_XSCALE_11
363 0x00000000, // PA_CL_VPORT_XOFFSET_11
364 0x00000000, // PA_CL_VPORT_YSCALE_11
365 0x00000000, // PA_CL_VPORT_YOFFSET_11
366 0x00000000, // PA_CL_VPORT_ZSCALE_11
367 0x00000000, // PA_CL_VPORT_ZOFFSET_11
368 0x00000000, // PA_CL_VPORT_XSCALE_12
369 0x00000000, // PA_CL_VPORT_XOFFSET_12
370 0x00000000, // PA_CL_VPORT_YSCALE_12
371 0x00000000, // PA_CL_VPORT_YOFFSET_12
372 0x00000000, // PA_CL_VPORT_ZSCALE_12
373 0x00000000, // PA_CL_VPORT_ZOFFSET_12
374 0x00000000, // PA_CL_VPORT_XSCALE_13
375 0x00000000, // PA_CL_VPORT_XOFFSET_13
376 0x00000000, // PA_CL_VPORT_YSCALE_13
377 0x00000000, // PA_CL_VPORT_YOFFSET_13
378 0x00000000, // PA_CL_VPORT_ZSCALE_13
379 0x00000000, // PA_CL_VPORT_ZOFFSET_13
380 0x00000000, // PA_CL_VPORT_XSCALE_14
381 0x00000000, // PA_CL_VPORT_XOFFSET_14
382 0x00000000, // PA_CL_VPORT_YSCALE_14
383 0x00000000, // PA_CL_VPORT_YOFFSET_14
384 0x00000000, // PA_CL_VPORT_ZSCALE_14
385 0x00000000, // PA_CL_VPORT_ZOFFSET_14
386 0x00000000, // PA_CL_VPORT_XSCALE_15
387 0x00000000, // PA_CL_VPORT_XOFFSET_15
388 0x00000000, // PA_CL_VPORT_YSCALE_15
389 0x00000000, // PA_CL_VPORT_YOFFSET_15
390 0x00000000, // PA_CL_VPORT_ZSCALE_15
391 0x00000000, // PA_CL_VPORT_ZOFFSET_15
392 0x00000000, // PA_CL_UCP_0_X
393 0x00000000, // PA_CL_UCP_0_Y
394 0x00000000, // PA_CL_UCP_0_Z
395 0x00000000, // PA_CL_UCP_0_W
396 0x00000000, // PA_CL_UCP_1_X
397 0x00000000, // PA_CL_UCP_1_Y
398 0x00000000, // PA_CL_UCP_1_Z
399 0x00000000, // PA_CL_UCP_1_W
400 0x00000000, // PA_CL_UCP_2_X
401 0x00000000, // PA_CL_UCP_2_Y
402 0x00000000, // PA_CL_UCP_2_Z
403 0x00000000, // PA_CL_UCP_2_W
404 0x00000000, // PA_CL_UCP_3_X
405 0x00000000, // PA_CL_UCP_3_Y
406 0x00000000, // PA_CL_UCP_3_Z
407 0x00000000, // PA_CL_UCP_3_W
408 0x00000000, // PA_CL_UCP_4_X
409 0x00000000, // PA_CL_UCP_4_Y
410 0x00000000, // PA_CL_UCP_4_Z
411 0x00000000, // PA_CL_UCP_4_W
412 0x00000000, // PA_CL_UCP_5_X
413 0x00000000, // PA_CL_UCP_5_Y
414 0x00000000, // PA_CL_UCP_5_Z
415 0x00000000, // PA_CL_UCP_5_W
416 0, // HOLE
417 0, // HOLE
418 0, // HOLE
419 0, // HOLE
420 0, // HOLE
421 0, // HOLE
422 0, // HOLE
423 0, // HOLE
424 0, // HOLE
425 0, // HOLE
426 0x00000000, // SPI_PS_INPUT_CNTL_0
427 0x00000000, // SPI_PS_INPUT_CNTL_1
428 0x00000000, // SPI_PS_INPUT_CNTL_2
429 0x00000000, // SPI_PS_INPUT_CNTL_3
430 0x00000000, // SPI_PS_INPUT_CNTL_4
431 0x00000000, // SPI_PS_INPUT_CNTL_5
432 0x00000000, // SPI_PS_INPUT_CNTL_6
433 0x00000000, // SPI_PS_INPUT_CNTL_7
434 0x00000000, // SPI_PS_INPUT_CNTL_8
435 0x00000000, // SPI_PS_INPUT_CNTL_9
436 0x00000000, // SPI_PS_INPUT_CNTL_10
437 0x00000000, // SPI_PS_INPUT_CNTL_11
438 0x00000000, // SPI_PS_INPUT_CNTL_12
439 0x00000000, // SPI_PS_INPUT_CNTL_13
440 0x00000000, // SPI_PS_INPUT_CNTL_14
441 0x00000000, // SPI_PS_INPUT_CNTL_15
442 0x00000000, // SPI_PS_INPUT_CNTL_16
443 0x00000000, // SPI_PS_INPUT_CNTL_17
444 0x00000000, // SPI_PS_INPUT_CNTL_18
445 0x00000000, // SPI_PS_INPUT_CNTL_19
446 0x00000000, // SPI_PS_INPUT_CNTL_20
447 0x00000000, // SPI_PS_INPUT_CNTL_21
448 0x00000000, // SPI_PS_INPUT_CNTL_22
449 0x00000000, // SPI_PS_INPUT_CNTL_23
450 0x00000000, // SPI_PS_INPUT_CNTL_24
451 0x00000000, // SPI_PS_INPUT_CNTL_25
452 0x00000000, // SPI_PS_INPUT_CNTL_26
453 0x00000000, // SPI_PS_INPUT_CNTL_27
454 0x00000000, // SPI_PS_INPUT_CNTL_28
455 0x00000000, // SPI_PS_INPUT_CNTL_29
456 0x00000000, // SPI_PS_INPUT_CNTL_30
457 0x00000000, // SPI_PS_INPUT_CNTL_31
458 0x00000000, // SPI_VS_OUT_CONFIG
459 0, // HOLE
460 0x00000000, // SPI_PS_INPUT_ENA
461 0x00000000, // SPI_PS_INPUT_ADDR
462 0x00000000, // SPI_INTERP_CONTROL_0
463 0x00000002, // SPI_PS_IN_CONTROL
464 0, // HOLE
465 0x00000000, // SPI_BARYC_CNTL
466 0, // HOLE
467 0x00000000, // SPI_TMPRING_SIZE
468 0, // HOLE
469 0, // HOLE
470 0, // HOLE
471 0, // HOLE
472 0, // HOLE
473 0, // HOLE
474 0x00000000, // SPI_WAVE_MGMT_1
475 0x00000000, // SPI_WAVE_MGMT_2
476 0x00000000, // SPI_SHADER_POS_FORMAT
477 0x00000000, // SPI_SHADER_Z_FORMAT
478 0x00000000, // SPI_SHADER_COL_FORMAT
479 0, // HOLE
480 0, // HOLE
481 0, // HOLE
482 0, // HOLE
483 0, // HOLE
484 0, // HOLE
485 0, // HOLE
486 0, // HOLE
487 0, // HOLE
488 0, // HOLE
489 0, // HOLE
490 0, // HOLE
491 0, // HOLE
492 0, // HOLE
493 0, // HOLE
494 0, // HOLE
495 0, // HOLE
496 0, // HOLE
497 0, // HOLE
498 0, // HOLE
499 0, // HOLE
500 0, // HOLE
501 0, // HOLE
502 0, // HOLE
503 0, // HOLE
504 0, // HOLE
505 0x00000000, // CB_BLEND0_CONTROL
506 0x00000000, // CB_BLEND1_CONTROL
507 0x00000000, // CB_BLEND2_CONTROL
508 0x00000000, // CB_BLEND3_CONTROL
509 0x00000000, // CB_BLEND4_CONTROL
510 0x00000000, // CB_BLEND5_CONTROL
511 0x00000000, // CB_BLEND6_CONTROL
512 0x00000000, // CB_BLEND7_CONTROL
513};
514static const u32 si_SECT_CONTEXT_def_3[] =
515{
516 0x00000000, // PA_CL_POINT_X_RAD
517 0x00000000, // PA_CL_POINT_Y_RAD
518 0x00000000, // PA_CL_POINT_SIZE
519 0x00000000, // PA_CL_POINT_CULL_RAD
520 0x00000000, // VGT_DMA_BASE_HI
521 0x00000000, // VGT_DMA_BASE
522};
523static const u32 si_SECT_CONTEXT_def_4[] =
524{
525 0x00000000, // DB_DEPTH_CONTROL
526 0x00000000, // DB_EQAA
527 0x00000000, // CB_COLOR_CONTROL
528 0x00000000, // DB_SHADER_CONTROL
529 0x00090000, // PA_CL_CLIP_CNTL
530 0x00000004, // PA_SU_SC_MODE_CNTL
531 0x00000000, // PA_CL_VTE_CNTL
532 0x00000000, // PA_CL_VS_OUT_CNTL
533 0x00000000, // PA_CL_NANINF_CNTL
534 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
535 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
536 0x00000000, // PA_SU_PRIM_FILTER_CNTL
537 0, // HOLE
538 0, // HOLE
539 0, // HOLE
540 0, // HOLE
541 0, // HOLE
542 0, // HOLE
543 0, // HOLE
544 0, // HOLE
545 0, // HOLE
546 0, // HOLE
547 0, // HOLE
548 0, // HOLE
549 0, // HOLE
550 0, // HOLE
551 0, // HOLE
552 0, // HOLE
553 0, // HOLE
554 0, // HOLE
555 0, // HOLE
556 0, // HOLE
557 0, // HOLE
558 0, // HOLE
559 0, // HOLE
560 0, // HOLE
561 0, // HOLE
562 0, // HOLE
563 0, // HOLE
564 0, // HOLE
565 0, // HOLE
566 0, // HOLE
567 0, // HOLE
568 0, // HOLE
569 0, // HOLE
570 0, // HOLE
571 0, // HOLE
572 0, // HOLE
573 0, // HOLE
574 0, // HOLE
575 0, // HOLE
576 0, // HOLE
577 0, // HOLE
578 0, // HOLE
579 0, // HOLE
580 0, // HOLE
581 0, // HOLE
582 0, // HOLE
583 0, // HOLE
584 0, // HOLE
585 0, // HOLE
586 0, // HOLE
587 0, // HOLE
588 0, // HOLE
589 0, // HOLE
590 0, // HOLE
591 0, // HOLE
592 0, // HOLE
593 0, // HOLE
594 0, // HOLE
595 0, // HOLE
596 0, // HOLE
597 0, // HOLE
598 0, // HOLE
599 0, // HOLE
600 0, // HOLE
601 0, // HOLE
602 0, // HOLE
603 0, // HOLE
604 0, // HOLE
605 0, // HOLE
606 0, // HOLE
607 0, // HOLE
608 0, // HOLE
609 0, // HOLE
610 0, // HOLE
611 0, // HOLE
612 0, // HOLE
613 0, // HOLE
614 0, // HOLE
615 0, // HOLE
616 0, // HOLE
617 0, // HOLE
618 0, // HOLE
619 0, // HOLE
620 0, // HOLE
621 0, // HOLE
622 0, // HOLE
623 0, // HOLE
624 0, // HOLE
625 0, // HOLE
626 0, // HOLE
627 0, // HOLE
628 0, // HOLE
629 0, // HOLE
630 0, // HOLE
631 0, // HOLE
632 0, // HOLE
633 0, // HOLE
634 0, // HOLE
635 0, // HOLE
636 0, // HOLE
637 0, // HOLE
638 0, // HOLE
639 0, // HOLE
640 0, // HOLE
641 0, // HOLE
642 0, // HOLE
643 0, // HOLE
644 0, // HOLE
645 0, // HOLE
646 0, // HOLE
647 0, // HOLE
648 0, // HOLE
649 0, // HOLE
650 0, // HOLE
651 0, // HOLE
652 0, // HOLE
653 0x00000000, // PA_SU_POINT_SIZE
654 0x00000000, // PA_SU_POINT_MINMAX
655 0x00000000, // PA_SU_LINE_CNTL
656 0x00000000, // PA_SC_LINE_STIPPLE
657 0x00000000, // VGT_OUTPUT_PATH_CNTL
658 0x00000000, // VGT_HOS_CNTL
659 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
660 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
661 0x00000000, // VGT_HOS_REUSE_DEPTH
662 0x00000000, // VGT_GROUP_PRIM_TYPE
663 0x00000000, // VGT_GROUP_FIRST_DECR
664 0x00000000, // VGT_GROUP_DECR
665 0x00000000, // VGT_GROUP_VECT_0_CNTL
666 0x00000000, // VGT_GROUP_VECT_1_CNTL
667 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
668 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
669 0x00000000, // VGT_GS_MODE
670 0, // HOLE
671 0x00000000, // PA_SC_MODE_CNTL_0
672 0x00000000, // PA_SC_MODE_CNTL_1
673 0x00000000, // VGT_ENHANCE
674 0x00000100, // VGT_GS_PER_ES
675 0x00000080, // VGT_ES_PER_GS
676 0x00000002, // VGT_GS_PER_VS
677 0x00000000, // VGT_GSVS_RING_OFFSET_1
678 0x00000000, // VGT_GSVS_RING_OFFSET_2
679 0x00000000, // VGT_GSVS_RING_OFFSET_3
680 0x00000000, // VGT_GS_OUT_PRIM_TYPE
681 0x00000000, // IA_ENHANCE
682};
683static const u32 si_SECT_CONTEXT_def_5[] =
684{
685 0x00000000, // VGT_PRIMITIVEID_EN
686};
687static const u32 si_SECT_CONTEXT_def_6[] =
688{
689 0x00000000, // VGT_PRIMITIVEID_RESET
690};
691static const u32 si_SECT_CONTEXT_def_7[] =
692{
693 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
694 0, // HOLE
695 0, // HOLE
696 0x00000000, // VGT_INSTANCE_STEP_RATE_0
697 0x00000000, // VGT_INSTANCE_STEP_RATE_1
698 0x000000ff, // IA_MULTI_VGT_PARAM
699 0x00000000, // VGT_ESGS_RING_ITEMSIZE
700 0x00000000, // VGT_GSVS_RING_ITEMSIZE
701 0x00000000, // VGT_REUSE_OFF
702 0x00000000, // VGT_VTX_CNT_EN
703 0x00000000, // DB_HTILE_SURFACE
704 0x00000000, // DB_SRESULTS_COMPARE_STATE0
705 0x00000000, // DB_SRESULTS_COMPARE_STATE1
706 0x00000000, // DB_PRELOAD_CONTROL
707 0, // HOLE
708 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
709 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
710 0, // HOLE
711 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
712 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
713 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
714 0, // HOLE
715 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
716 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
717 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
718 0, // HOLE
719 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
720 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
721 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
722 0, // HOLE
723 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
724 0, // HOLE
725 0, // HOLE
726 0, // HOLE
727 0, // HOLE
728 0, // HOLE
729 0, // HOLE
730 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
731 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
732 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
733 0, // HOLE
734 0x00000000, // VGT_GS_MAX_VERT_OUT
735 0, // HOLE
736 0, // HOLE
737 0, // HOLE
738 0, // HOLE
739 0, // HOLE
740 0, // HOLE
741 0x00000000, // VGT_SHADER_STAGES_EN
742 0x00000000, // VGT_LS_HS_CONFIG
743 0x00000000, // VGT_GS_VERT_ITEMSIZE
744 0x00000000, // VGT_GS_VERT_ITEMSIZE_1
745 0x00000000, // VGT_GS_VERT_ITEMSIZE_2
746 0x00000000, // VGT_GS_VERT_ITEMSIZE_3
747 0x00000000, // VGT_TF_PARAM
748 0x00000000, // DB_ALPHA_TO_MASK
749 0, // HOLE
750 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
751 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
752 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
753 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
754 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
755 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
756 0x00000000, // VGT_GS_INSTANCE_CNT
757 0x00000000, // VGT_STRMOUT_CONFIG
758 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
759 0, // HOLE
760 0, // HOLE
761 0, // HOLE
762 0, // HOLE
763 0, // HOLE
764 0, // HOLE
765 0, // HOLE
766 0, // HOLE
767 0, // HOLE
768 0, // HOLE
769 0, // HOLE
770 0, // HOLE
771 0, // HOLE
772 0, // HOLE
773 0x00000000, // PA_SC_CENTROID_PRIORITY_0
774 0x00000000, // PA_SC_CENTROID_PRIORITY_1
775 0x00001000, // PA_SC_LINE_CNTL
776 0x00000000, // PA_SC_AA_CONFIG
777 0x00000005, // PA_SU_VTX_CNTL
778 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
779 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
780 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
781 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
782 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
783 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
784 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
785 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
786 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
787 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
788 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
789 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
790 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
791 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
792 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
793 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
794 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
795 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
796 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
797 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
798 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
799 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
800 0, // HOLE
801 0, // HOLE
802 0, // HOLE
803 0, // HOLE
804 0, // HOLE
805 0, // HOLE
806 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
807 0x00000010, // VGT_OUT_DEALLOC_CNTL
808 0x00000000, // CB_COLOR0_BASE
809 0x00000000, // CB_COLOR0_PITCH
810 0x00000000, // CB_COLOR0_SLICE
811 0x00000000, // CB_COLOR0_VIEW
812 0x00000000, // CB_COLOR0_INFO
813 0x00000000, // CB_COLOR0_ATTRIB
814 0, // HOLE
815 0x00000000, // CB_COLOR0_CMASK
816 0x00000000, // CB_COLOR0_CMASK_SLICE
817 0x00000000, // CB_COLOR0_FMASK
818 0x00000000, // CB_COLOR0_FMASK_SLICE
819 0x00000000, // CB_COLOR0_CLEAR_WORD0
820 0x00000000, // CB_COLOR0_CLEAR_WORD1
821 0, // HOLE
822 0, // HOLE
823 0x00000000, // CB_COLOR1_BASE
824 0x00000000, // CB_COLOR1_PITCH
825 0x00000000, // CB_COLOR1_SLICE
826 0x00000000, // CB_COLOR1_VIEW
827 0x00000000, // CB_COLOR1_INFO
828 0x00000000, // CB_COLOR1_ATTRIB
829 0, // HOLE
830 0x00000000, // CB_COLOR1_CMASK
831 0x00000000, // CB_COLOR1_CMASK_SLICE
832 0x00000000, // CB_COLOR1_FMASK
833 0x00000000, // CB_COLOR1_FMASK_SLICE
834 0x00000000, // CB_COLOR1_CLEAR_WORD0
835 0x00000000, // CB_COLOR1_CLEAR_WORD1
836 0, // HOLE
837 0, // HOLE
838 0x00000000, // CB_COLOR2_BASE
839 0x00000000, // CB_COLOR2_PITCH
840 0x00000000, // CB_COLOR2_SLICE
841 0x00000000, // CB_COLOR2_VIEW
842 0x00000000, // CB_COLOR2_INFO
843 0x00000000, // CB_COLOR2_ATTRIB
844 0, // HOLE
845 0x00000000, // CB_COLOR2_CMASK
846 0x00000000, // CB_COLOR2_CMASK_SLICE
847 0x00000000, // CB_COLOR2_FMASK
848 0x00000000, // CB_COLOR2_FMASK_SLICE
849 0x00000000, // CB_COLOR2_CLEAR_WORD0
850 0x00000000, // CB_COLOR2_CLEAR_WORD1
851 0, // HOLE
852 0, // HOLE
853 0x00000000, // CB_COLOR3_BASE
854 0x00000000, // CB_COLOR3_PITCH
855 0x00000000, // CB_COLOR3_SLICE
856 0x00000000, // CB_COLOR3_VIEW
857 0x00000000, // CB_COLOR3_INFO
858 0x00000000, // CB_COLOR3_ATTRIB
859 0, // HOLE
860 0x00000000, // CB_COLOR3_CMASK
861 0x00000000, // CB_COLOR3_CMASK_SLICE
862 0x00000000, // CB_COLOR3_FMASK
863 0x00000000, // CB_COLOR3_FMASK_SLICE
864 0x00000000, // CB_COLOR3_CLEAR_WORD0
865 0x00000000, // CB_COLOR3_CLEAR_WORD1
866 0, // HOLE
867 0, // HOLE
868 0x00000000, // CB_COLOR4_BASE
869 0x00000000, // CB_COLOR4_PITCH
870 0x00000000, // CB_COLOR4_SLICE
871 0x00000000, // CB_COLOR4_VIEW
872 0x00000000, // CB_COLOR4_INFO
873 0x00000000, // CB_COLOR4_ATTRIB
874 0, // HOLE
875 0x00000000, // CB_COLOR4_CMASK
876 0x00000000, // CB_COLOR4_CMASK_SLICE
877 0x00000000, // CB_COLOR4_FMASK
878 0x00000000, // CB_COLOR4_FMASK_SLICE
879 0x00000000, // CB_COLOR4_CLEAR_WORD0
880 0x00000000, // CB_COLOR4_CLEAR_WORD1
881 0, // HOLE
882 0, // HOLE
883 0x00000000, // CB_COLOR5_BASE
884 0x00000000, // CB_COLOR5_PITCH
885 0x00000000, // CB_COLOR5_SLICE
886 0x00000000, // CB_COLOR5_VIEW
887 0x00000000, // CB_COLOR5_INFO
888 0x00000000, // CB_COLOR5_ATTRIB
889 0, // HOLE
890 0x00000000, // CB_COLOR5_CMASK
891 0x00000000, // CB_COLOR5_CMASK_SLICE
892 0x00000000, // CB_COLOR5_FMASK
893 0x00000000, // CB_COLOR5_FMASK_SLICE
894 0x00000000, // CB_COLOR5_CLEAR_WORD0
895 0x00000000, // CB_COLOR5_CLEAR_WORD1
896 0, // HOLE
897 0, // HOLE
898 0x00000000, // CB_COLOR6_BASE
899 0x00000000, // CB_COLOR6_PITCH
900 0x00000000, // CB_COLOR6_SLICE
901 0x00000000, // CB_COLOR6_VIEW
902 0x00000000, // CB_COLOR6_INFO
903 0x00000000, // CB_COLOR6_ATTRIB
904 0, // HOLE
905 0x00000000, // CB_COLOR6_CMASK
906 0x00000000, // CB_COLOR6_CMASK_SLICE
907 0x00000000, // CB_COLOR6_FMASK
908 0x00000000, // CB_COLOR6_FMASK_SLICE
909 0x00000000, // CB_COLOR6_CLEAR_WORD0
910 0x00000000, // CB_COLOR6_CLEAR_WORD1
911 0, // HOLE
912 0, // HOLE
913 0x00000000, // CB_COLOR7_BASE
914 0x00000000, // CB_COLOR7_PITCH
915 0x00000000, // CB_COLOR7_SLICE
916 0x00000000, // CB_COLOR7_VIEW
917 0x00000000, // CB_COLOR7_INFO
918 0x00000000, // CB_COLOR7_ATTRIB
919 0, // HOLE
920 0x00000000, // CB_COLOR7_CMASK
921 0x00000000, // CB_COLOR7_CMASK_SLICE
922 0x00000000, // CB_COLOR7_FMASK
923 0x00000000, // CB_COLOR7_FMASK_SLICE
924 0x00000000, // CB_COLOR7_CLEAR_WORD0
925 0x00000000, // CB_COLOR7_CLEAR_WORD1
926};
927static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
928{
929 {si_SECT_CONTEXT_def_1, 0x0000a000, 212 },
930 {si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
931 {si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
932 {si_SECT_CONTEXT_def_4, 0x0000a200, 157 },
933 {si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 },
934 {si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
935 {si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
936 { 0, 0, 0 }
937};
938static const struct cs_section_def si_cs_data[] = {
939 { si_SECT_CONTEXT_defs, SECT_CONTEXT },
940 { 0, SECT_NONE }
941};
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
new file mode 100644
index 000000000000..5ada922e5cec
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -0,0 +1,2176 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "evergreend.h"
28#include "r600_dpm.h"
29#include "cypress_dpm.h"
30#include "atom.h"
31
32#define SMC_RAM_END 0x8000
33
34#define MC_CG_ARB_FREQ_F0 0x0a
35#define MC_CG_ARB_FREQ_F1 0x0b
36#define MC_CG_ARB_FREQ_F2 0x0c
37#define MC_CG_ARB_FREQ_F3 0x0d
38
39#define MC_CG_SEQ_DRAMCONF_S0 0x05
40#define MC_CG_SEQ_DRAMCONF_S1 0x06
41#define MC_CG_SEQ_YCLK_SUSPEND 0x04
42#define MC_CG_SEQ_YCLK_RESUME 0x0a
43
44struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps);
45struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
46struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
47
48static void cypress_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
49 bool enable)
50{
51 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
52 u32 tmp, bif;
53
54 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
55 if (enable) {
56 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
57 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
58 if (!pi->boot_in_gen2) {
59 bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
60 bif |= CG_CLIENT_REQ(0xd);
61 WREG32(CG_BIF_REQ_AND_RSP, bif);
62
63 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
64 tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
65 tmp |= LC_GEN2_EN_STRAP;
66
67 tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT;
68 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
69 udelay(10);
70 tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
71 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
72 }
73 }
74 } else {
75 if (!pi->boot_in_gen2) {
76 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
77 tmp &= ~LC_GEN2_EN_STRAP;
78 }
79 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
80 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
81 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
82 }
83}
84
85static void cypress_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
86 bool enable)
87{
88 cypress_enable_bif_dynamic_pcie_gen2(rdev, enable);
89
90 if (enable)
91 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
92 else
93 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
94}
95
96#if 0
97static int cypress_enter_ulp_state(struct radeon_device *rdev)
98{
99 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
100
101 if (pi->gfx_clock_gating) {
102 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
103 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
104 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
105
106 RREG32(GB_ADDR_CONFIG);
107 }
108
109 WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
110 ~HOST_SMC_MSG_MASK);
111
112 udelay(7000);
113
114 return 0;
115}
116#endif
117
118static void cypress_gfx_clock_gating_enable(struct radeon_device *rdev,
119 bool enable)
120{
121 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
122
123 if (enable) {
124 if (eg_pi->light_sleep) {
125 WREG32(GRBM_GFX_INDEX, 0xC0000000);
126
127 WREG32_CG(CG_CGLS_TILE_0, 0xFFFFFFFF);
128 WREG32_CG(CG_CGLS_TILE_1, 0xFFFFFFFF);
129 WREG32_CG(CG_CGLS_TILE_2, 0xFFFFFFFF);
130 WREG32_CG(CG_CGLS_TILE_3, 0xFFFFFFFF);
131 WREG32_CG(CG_CGLS_TILE_4, 0xFFFFFFFF);
132 WREG32_CG(CG_CGLS_TILE_5, 0xFFFFFFFF);
133 WREG32_CG(CG_CGLS_TILE_6, 0xFFFFFFFF);
134 WREG32_CG(CG_CGLS_TILE_7, 0xFFFFFFFF);
135 WREG32_CG(CG_CGLS_TILE_8, 0xFFFFFFFF);
136 WREG32_CG(CG_CGLS_TILE_9, 0xFFFFFFFF);
137 WREG32_CG(CG_CGLS_TILE_10, 0xFFFFFFFF);
138 WREG32_CG(CG_CGLS_TILE_11, 0xFFFFFFFF);
139
140 WREG32_P(SCLK_PWRMGT_CNTL, DYN_LIGHT_SLEEP_EN, ~DYN_LIGHT_SLEEP_EN);
141 }
142 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
143 } else {
144 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
145 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
146 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
147 RREG32(GB_ADDR_CONFIG);
148
149 if (eg_pi->light_sleep) {
150 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_LIGHT_SLEEP_EN);
151
152 WREG32(GRBM_GFX_INDEX, 0xC0000000);
153
154 WREG32_CG(CG_CGLS_TILE_0, 0);
155 WREG32_CG(CG_CGLS_TILE_1, 0);
156 WREG32_CG(CG_CGLS_TILE_2, 0);
157 WREG32_CG(CG_CGLS_TILE_3, 0);
158 WREG32_CG(CG_CGLS_TILE_4, 0);
159 WREG32_CG(CG_CGLS_TILE_5, 0);
160 WREG32_CG(CG_CGLS_TILE_6, 0);
161 WREG32_CG(CG_CGLS_TILE_7, 0);
162 WREG32_CG(CG_CGLS_TILE_8, 0);
163 WREG32_CG(CG_CGLS_TILE_9, 0);
164 WREG32_CG(CG_CGLS_TILE_10, 0);
165 WREG32_CG(CG_CGLS_TILE_11, 0);
166 }
167 }
168}
169
170static void cypress_mg_clock_gating_enable(struct radeon_device *rdev,
171 bool enable)
172{
173 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
174 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
175
176 if (enable) {
177 u32 cgts_sm_ctrl_reg;
178
179 if (rdev->family == CHIP_CEDAR)
180 cgts_sm_ctrl_reg = CEDAR_MGCGCGTSSMCTRL_DFLT;
181 else if (rdev->family == CHIP_REDWOOD)
182 cgts_sm_ctrl_reg = REDWOOD_MGCGCGTSSMCTRL_DFLT;
183 else
184 cgts_sm_ctrl_reg = CYPRESS_MGCGCGTSSMCTRL_DFLT;
185
186 WREG32(GRBM_GFX_INDEX, 0xC0000000);
187
188 WREG32_CG(CG_CGTT_LOCAL_0, CYPRESS_MGCGTTLOCAL0_DFLT);
189 WREG32_CG(CG_CGTT_LOCAL_1, CYPRESS_MGCGTTLOCAL1_DFLT & 0xFFFFCFFF);
190 WREG32_CG(CG_CGTT_LOCAL_2, CYPRESS_MGCGTTLOCAL2_DFLT);
191 WREG32_CG(CG_CGTT_LOCAL_3, CYPRESS_MGCGTTLOCAL3_DFLT);
192
193 if (pi->mgcgtssm)
194 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
195
196 if (eg_pi->mcls) {
197 WREG32_P(MC_CITF_MISC_RD_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
198 WREG32_P(MC_CITF_MISC_WR_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
199 WREG32_P(MC_CITF_MISC_VM_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
200 WREG32_P(MC_HUB_MISC_HUB_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
201 WREG32_P(MC_HUB_MISC_VM_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
202 WREG32_P(MC_HUB_MISC_SIP_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
203 WREG32_P(MC_XPB_CLK_GAT, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
204 WREG32_P(VM_L2_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
205 }
206 } else {
207 WREG32(GRBM_GFX_INDEX, 0xC0000000);
208
209 WREG32_CG(CG_CGTT_LOCAL_0, 0xFFFFFFFF);
210 WREG32_CG(CG_CGTT_LOCAL_1, 0xFFFFFFFF);
211 WREG32_CG(CG_CGTT_LOCAL_2, 0xFFFFFFFF);
212 WREG32_CG(CG_CGTT_LOCAL_3, 0xFFFFFFFF);
213
214 if (pi->mgcgtssm)
215 WREG32(CGTS_SM_CTRL_REG, 0x81f44bc0);
216 }
217}
218
219void cypress_enable_spread_spectrum(struct radeon_device *rdev,
220 bool enable)
221{
222 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
223
224 if (enable) {
225 if (pi->sclk_ss)
226 WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
227
228 if (pi->mclk_ss)
229 WREG32_P(MPLL_CNTL_MODE, SS_SSEN, ~SS_SSEN);
230 } else {
231 WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
232 WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
233 WREG32_P(MPLL_CNTL_MODE, 0, ~SS_SSEN);
234 WREG32_P(MPLL_CNTL_MODE, 0, ~SS_DSMODE_EN);
235 }
236}
237
238void cypress_start_dpm(struct radeon_device *rdev)
239{
240 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
241}
242
243void cypress_enable_sclk_control(struct radeon_device *rdev,
244 bool enable)
245{
246 if (enable)
247 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
248 else
249 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
250}
251
252void cypress_enable_mclk_control(struct radeon_device *rdev,
253 bool enable)
254{
255 if (enable)
256 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
257 else
258 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
259}
260
261int cypress_notify_smc_display_change(struct radeon_device *rdev,
262 bool has_display)
263{
264 PPSMC_Msg msg = has_display ?
265 (PPSMC_Msg)PPSMC_MSG_HasDisplay : (PPSMC_Msg)PPSMC_MSG_NoDisplay;
266
267 if (rv770_send_msg_to_smc(rdev, msg) != PPSMC_Result_OK)
268 return -EINVAL;
269
270 return 0;
271}
272
273void cypress_program_response_times(struct radeon_device *rdev)
274{
275 u32 reference_clock;
276 u32 mclk_switch_limit;
277
278 reference_clock = radeon_get_xclk(rdev);
279 mclk_switch_limit = (460 * reference_clock) / 100;
280
281 rv770_write_smc_soft_register(rdev,
282 RV770_SMC_SOFT_REGISTER_mclk_switch_lim,
283 mclk_switch_limit);
284
285 rv770_write_smc_soft_register(rdev,
286 RV770_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
287
288 rv770_write_smc_soft_register(rdev,
289 RV770_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
290
291 rv770_program_response_times(rdev);
292
293 if (ASIC_IS_LOMBOK(rdev))
294 rv770_write_smc_soft_register(rdev,
295 RV770_SMC_SOFT_REGISTER_is_asic_lombok, 1);
296
297}
298
299static int cypress_pcie_performance_request(struct radeon_device *rdev,
300 u8 perf_req, bool advertise)
301{
302 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
303 u32 tmp;
304
305 udelay(10);
306 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
307 if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) && (tmp & LC_CURRENT_DATA_RATE))
308 return 0;
309
310#if defined(CONFIG_ACPI)
311 if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
312 (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
313 eg_pi->pcie_performance_request_registered = true;
314 return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
315 } else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
316 eg_pi->pcie_performance_request_registered) {
317 eg_pi->pcie_performance_request_registered = false;
318 return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
319 }
320#endif
321
322 return 0;
323}
324
325void cypress_advertise_gen2_capability(struct radeon_device *rdev)
326{
327 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
328 u32 tmp;
329
330#if defined(CONFIG_ACPI)
331 radeon_acpi_pcie_notify_device_ready(rdev);
332#endif
333
334 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
335
336 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
337 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
338 pi->pcie_gen2 = true;
339 else
340 pi->pcie_gen2 = false;
341
342 if (!pi->pcie_gen2)
343 cypress_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, true);
344
345}
346
347static enum radeon_pcie_gen cypress_get_maximum_link_speed(struct radeon_ps *radeon_state)
348{
349 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
350
351 if (state->high.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
352 return 1;
353 return 0;
354}
355
356void cypress_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
357 struct radeon_ps *radeon_new_state,
358 struct radeon_ps *radeon_current_state)
359{
360 enum radeon_pcie_gen pcie_link_speed_target =
361 cypress_get_maximum_link_speed(radeon_new_state);
362 enum radeon_pcie_gen pcie_link_speed_current =
363 cypress_get_maximum_link_speed(radeon_current_state);
364 u8 request;
365
366 if (pcie_link_speed_target < pcie_link_speed_current) {
367 if (pcie_link_speed_target == RADEON_PCIE_GEN1)
368 request = PCIE_PERF_REQ_PECI_GEN1;
369 else if (pcie_link_speed_target == RADEON_PCIE_GEN2)
370 request = PCIE_PERF_REQ_PECI_GEN2;
371 else
372 request = PCIE_PERF_REQ_PECI_GEN3;
373
374 cypress_pcie_performance_request(rdev, request, false);
375 }
376}
377
378void cypress_notify_link_speed_change_before_state_change(struct radeon_device *rdev,
379 struct radeon_ps *radeon_new_state,
380 struct radeon_ps *radeon_current_state)
381{
382 enum radeon_pcie_gen pcie_link_speed_target =
383 cypress_get_maximum_link_speed(radeon_new_state);
384 enum radeon_pcie_gen pcie_link_speed_current =
385 cypress_get_maximum_link_speed(radeon_current_state);
386 u8 request;
387
388 if (pcie_link_speed_target > pcie_link_speed_current) {
389 if (pcie_link_speed_target == RADEON_PCIE_GEN1)
390 request = PCIE_PERF_REQ_PECI_GEN1;
391 else if (pcie_link_speed_target == RADEON_PCIE_GEN2)
392 request = PCIE_PERF_REQ_PECI_GEN2;
393 else
394 request = PCIE_PERF_REQ_PECI_GEN3;
395
396 cypress_pcie_performance_request(rdev, request, false);
397 }
398}
399
400static int cypress_populate_voltage_value(struct radeon_device *rdev,
401 struct atom_voltage_table *table,
402 u16 value, RV770_SMC_VOLTAGE_VALUE *voltage)
403{
404 unsigned int i;
405
406 for (i = 0; i < table->count; i++) {
407 if (value <= table->entries[i].value) {
408 voltage->index = (u8)i;
409 voltage->value = cpu_to_be16(table->entries[i].value);
410 break;
411 }
412 }
413
414 if (i == table->count)
415 return -EINVAL;
416
417 return 0;
418}
419
420u8 cypress_get_strobe_mode_settings(struct radeon_device *rdev, u32 mclk)
421{
422 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
423 u8 result = 0;
424 bool strobe_mode = false;
425
426 if (pi->mem_gddr5) {
427 if (mclk <= pi->mclk_strobe_mode_threshold)
428 strobe_mode = true;
429 result = cypress_get_mclk_frequency_ratio(rdev, mclk, strobe_mode);
430
431 if (strobe_mode)
432 result |= SMC_STROBE_ENABLE;
433 }
434
435 return result;
436}
437
438u32 cypress_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf)
439{
440 u32 ref_clk = rdev->clock.mpll.reference_freq;
441 u32 vco = clkf * ref_clk;
442
443 /* 100 Mhz ref clk */
444 if (ref_clk == 10000) {
445 if (vco > 500000)
446 return 0xC6;
447 if (vco > 400000)
448 return 0x9D;
449 if (vco > 330000)
450 return 0x6C;
451 if (vco > 250000)
452 return 0x2B;
453 if (vco > 160000)
454 return 0x5B;
455 if (vco > 120000)
456 return 0x0A;
457 return 0x4B;
458 }
459
460 /* 27 Mhz ref clk */
461 if (vco > 250000)
462 return 0x8B;
463 if (vco > 200000)
464 return 0xCC;
465 if (vco > 150000)
466 return 0x9B;
467 return 0x6B;
468}
469
470static int cypress_populate_mclk_value(struct radeon_device *rdev,
471 u32 engine_clock, u32 memory_clock,
472 RV7XX_SMC_MCLK_VALUE *mclk,
473 bool strobe_mode, bool dll_state_on)
474{
475 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
476
477 u32 mpll_ad_func_cntl =
478 pi->clk_regs.rv770.mpll_ad_func_cntl;
479 u32 mpll_ad_func_cntl_2 =
480 pi->clk_regs.rv770.mpll_ad_func_cntl_2;
481 u32 mpll_dq_func_cntl =
482 pi->clk_regs.rv770.mpll_dq_func_cntl;
483 u32 mpll_dq_func_cntl_2 =
484 pi->clk_regs.rv770.mpll_dq_func_cntl_2;
485 u32 mclk_pwrmgt_cntl =
486 pi->clk_regs.rv770.mclk_pwrmgt_cntl;
487 u32 dll_cntl =
488 pi->clk_regs.rv770.dll_cntl;
489 u32 mpll_ss1 = pi->clk_regs.rv770.mpll_ss1;
490 u32 mpll_ss2 = pi->clk_regs.rv770.mpll_ss2;
491 struct atom_clock_dividers dividers;
492 u32 ibias;
493 u32 dll_speed;
494 int ret;
495 u32 mc_seq_misc7;
496
497 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
498 memory_clock, strobe_mode, &dividers);
499 if (ret)
500 return ret;
501
502 if (!strobe_mode) {
503 mc_seq_misc7 = RREG32(MC_SEQ_MISC7);
504
505 if(mc_seq_misc7 & 0x8000000)
506 dividers.post_div = 1;
507 }
508
509 ibias = cypress_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
510
511 mpll_ad_func_cntl &= ~(CLKR_MASK |
512 YCLK_POST_DIV_MASK |
513 CLKF_MASK |
514 CLKFRAC_MASK |
515 IBIAS_MASK);
516 mpll_ad_func_cntl |= CLKR(dividers.ref_div);
517 mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
518 mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
519 mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
520 mpll_ad_func_cntl |= IBIAS(ibias);
521
522 if (dividers.vco_mode)
523 mpll_ad_func_cntl_2 |= VCO_MODE;
524 else
525 mpll_ad_func_cntl_2 &= ~VCO_MODE;
526
527 if (pi->mem_gddr5) {
528 mpll_dq_func_cntl &= ~(CLKR_MASK |
529 YCLK_POST_DIV_MASK |
530 CLKF_MASK |
531 CLKFRAC_MASK |
532 IBIAS_MASK);
533 mpll_dq_func_cntl |= CLKR(dividers.ref_div);
534 mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
535 mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
536 mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
537 mpll_dq_func_cntl |= IBIAS(ibias);
538
539 if (strobe_mode)
540 mpll_dq_func_cntl &= ~PDNB;
541 else
542 mpll_dq_func_cntl |= PDNB;
543
544 if (dividers.vco_mode)
545 mpll_dq_func_cntl_2 |= VCO_MODE;
546 else
547 mpll_dq_func_cntl_2 &= ~VCO_MODE;
548 }
549
550 if (pi->mclk_ss) {
551 struct radeon_atom_ss ss;
552 u32 vco_freq = memory_clock * dividers.post_div;
553
554 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
555 ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
556 u32 reference_clock = rdev->clock.mpll.reference_freq;
557 u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
558 u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
559 u32 clk_v = ss.percentage *
560 (0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
561
562 mpll_ss1 &= ~CLKV_MASK;
563 mpll_ss1 |= CLKV(clk_v);
564
565 mpll_ss2 &= ~CLKS_MASK;
566 mpll_ss2 |= CLKS(clk_s);
567 }
568 }
569
570 dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
571 memory_clock);
572
573 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
574 mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
575 if (dll_state_on)
576 mclk_pwrmgt_cntl |= (MRDCKA0_PDNB |
577 MRDCKA1_PDNB |
578 MRDCKB0_PDNB |
579 MRDCKB1_PDNB |
580 MRDCKC0_PDNB |
581 MRDCKC1_PDNB |
582 MRDCKD0_PDNB |
583 MRDCKD1_PDNB);
584 else
585 mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
586 MRDCKA1_PDNB |
587 MRDCKB0_PDNB |
588 MRDCKB1_PDNB |
589 MRDCKC0_PDNB |
590 MRDCKC1_PDNB |
591 MRDCKD0_PDNB |
592 MRDCKD1_PDNB);
593
594 mclk->mclk770.mclk_value = cpu_to_be32(memory_clock);
595 mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
596 mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
597 mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
598 mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
599 mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
600 mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
601 mclk->mclk770.vMPLL_SS = cpu_to_be32(mpll_ss1);
602 mclk->mclk770.vMPLL_SS2 = cpu_to_be32(mpll_ss2);
603
604 return 0;
605}
606
607u8 cypress_get_mclk_frequency_ratio(struct radeon_device *rdev,
608 u32 memory_clock, bool strobe_mode)
609{
610 u8 mc_para_index;
611
612 if (rdev->family >= CHIP_BARTS) {
613 if (strobe_mode) {
614 if (memory_clock < 10000)
615 mc_para_index = 0x00;
616 else if (memory_clock > 47500)
617 mc_para_index = 0x0f;
618 else
619 mc_para_index = (u8)((memory_clock - 10000) / 2500);
620 } else {
621 if (memory_clock < 65000)
622 mc_para_index = 0x00;
623 else if (memory_clock > 135000)
624 mc_para_index = 0x0f;
625 else
626 mc_para_index = (u8)((memory_clock - 60000) / 5000);
627 }
628 } else {
629 if (strobe_mode) {
630 if (memory_clock < 10000)
631 mc_para_index = 0x00;
632 else if (memory_clock > 47500)
633 mc_para_index = 0x0f;
634 else
635 mc_para_index = (u8)((memory_clock - 10000) / 2500);
636 } else {
637 if (memory_clock < 40000)
638 mc_para_index = 0x00;
639 else if (memory_clock > 115000)
640 mc_para_index = 0x0f;
641 else
642 mc_para_index = (u8)((memory_clock - 40000) / 5000);
643 }
644 }
645 return mc_para_index;
646}
647
648static int cypress_populate_mvdd_value(struct radeon_device *rdev,
649 u32 mclk,
650 RV770_SMC_VOLTAGE_VALUE *voltage)
651{
652 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
653 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
654
655 if (!pi->mvdd_control) {
656 voltage->index = eg_pi->mvdd_high_index;
657 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
658 return 0;
659 }
660
661 if (mclk <= pi->mvdd_split_frequency) {
662 voltage->index = eg_pi->mvdd_low_index;
663 voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
664 } else {
665 voltage->index = eg_pi->mvdd_high_index;
666 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
667 }
668
669 return 0;
670}
671
672int cypress_convert_power_level_to_smc(struct radeon_device *rdev,
673 struct rv7xx_pl *pl,
674 RV770_SMC_HW_PERFORMANCE_LEVEL *level,
675 u8 watermark_level)
676{
677 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
678 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
679 int ret;
680 bool dll_state_on;
681
682 level->gen2PCIE = pi->pcie_gen2 ?
683 ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
684 level->gen2XSP = (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0;
685 level->backbias = (pl->flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? 1 : 0;
686 level->displayWatermark = watermark_level;
687
688 ret = rv740_populate_sclk_value(rdev, pl->sclk, &level->sclk);
689 if (ret)
690 return ret;
691
692 level->mcFlags = 0;
693 if (pi->mclk_stutter_mode_threshold &&
694 (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
695 !eg_pi->uvd_enabled) {
696 level->mcFlags |= SMC_MC_STUTTER_EN;
697 if (eg_pi->sclk_deep_sleep)
698 level->stateFlags |= PPSMC_STATEFLAG_AUTO_PULSE_SKIP;
699 else
700 level->stateFlags &= ~PPSMC_STATEFLAG_AUTO_PULSE_SKIP;
701 }
702
703 if (pi->mem_gddr5) {
704 if (pl->mclk > pi->mclk_edc_enable_threshold)
705 level->mcFlags |= SMC_MC_EDC_RD_FLAG;
706
707 if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
708 level->mcFlags |= SMC_MC_EDC_WR_FLAG;
709
710 level->strobeMode = cypress_get_strobe_mode_settings(rdev, pl->mclk);
711
712 if (level->strobeMode & SMC_STROBE_ENABLE) {
713 if (cypress_get_mclk_frequency_ratio(rdev, pl->mclk, true) >=
714 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
715 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
716 else
717 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
718 } else
719 dll_state_on = eg_pi->dll_default_on;
720
721 ret = cypress_populate_mclk_value(rdev,
722 pl->sclk,
723 pl->mclk,
724 &level->mclk,
725 (level->strobeMode & SMC_STROBE_ENABLE) != 0,
726 dll_state_on);
727 } else {
728 ret = cypress_populate_mclk_value(rdev,
729 pl->sclk,
730 pl->mclk,
731 &level->mclk,
732 true,
733 true);
734 }
735 if (ret)
736 return ret;
737
738 ret = cypress_populate_voltage_value(rdev,
739 &eg_pi->vddc_voltage_table,
740 pl->vddc,
741 &level->vddc);
742 if (ret)
743 return ret;
744
745 if (eg_pi->vddci_control) {
746 ret = cypress_populate_voltage_value(rdev,
747 &eg_pi->vddci_voltage_table,
748 pl->vddci,
749 &level->vddci);
750 if (ret)
751 return ret;
752 }
753
754 ret = cypress_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
755
756 return ret;
757}
758
759static int cypress_convert_power_state_to_smc(struct radeon_device *rdev,
760 struct radeon_ps *radeon_state,
761 RV770_SMC_SWSTATE *smc_state)
762{
763 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
764 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
765 int ret;
766
767 if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
768 smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
769
770 ret = cypress_convert_power_level_to_smc(rdev,
771 &state->low,
772 &smc_state->levels[0],
773 PPSMC_DISPLAY_WATERMARK_LOW);
774 if (ret)
775 return ret;
776
777 ret = cypress_convert_power_level_to_smc(rdev,
778 &state->medium,
779 &smc_state->levels[1],
780 PPSMC_DISPLAY_WATERMARK_LOW);
781 if (ret)
782 return ret;
783
784 ret = cypress_convert_power_level_to_smc(rdev,
785 &state->high,
786 &smc_state->levels[2],
787 PPSMC_DISPLAY_WATERMARK_HIGH);
788 if (ret)
789 return ret;
790
791 smc_state->levels[0].arbValue = MC_CG_ARB_FREQ_F1;
792 smc_state->levels[1].arbValue = MC_CG_ARB_FREQ_F2;
793 smc_state->levels[2].arbValue = MC_CG_ARB_FREQ_F3;
794
795 if (eg_pi->dynamic_ac_timing) {
796 smc_state->levels[0].ACIndex = 2;
797 smc_state->levels[1].ACIndex = 3;
798 smc_state->levels[2].ACIndex = 4;
799 } else {
800 smc_state->levels[0].ACIndex = 0;
801 smc_state->levels[1].ACIndex = 0;
802 smc_state->levels[2].ACIndex = 0;
803 }
804
805 rv770_populate_smc_sp(rdev, radeon_state, smc_state);
806
807 return rv770_populate_smc_t(rdev, radeon_state, smc_state);
808}
809
810static void cypress_convert_mc_registers(struct evergreen_mc_reg_entry *entry,
811 SMC_Evergreen_MCRegisterSet *data,
812 u32 num_entries, u32 valid_flag)
813{
814 u32 i, j;
815
816 for (i = 0, j = 0; j < num_entries; j++) {
817 if (valid_flag & (1 << j)) {
818 data->value[i] = cpu_to_be32(entry->mc_data[j]);
819 i++;
820 }
821 }
822}
823
824static void cypress_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
825 struct rv7xx_pl *pl,
826 SMC_Evergreen_MCRegisterSet *mc_reg_table_data)
827{
828 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
829 u32 i = 0;
830
831 for (i = 0; i < eg_pi->mc_reg_table.num_entries; i++) {
832 if (pl->mclk <=
833 eg_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
834 break;
835 }
836
837 if ((i == eg_pi->mc_reg_table.num_entries) && (i > 0))
838 --i;
839
840 cypress_convert_mc_registers(&eg_pi->mc_reg_table.mc_reg_table_entry[i],
841 mc_reg_table_data,
842 eg_pi->mc_reg_table.last,
843 eg_pi->mc_reg_table.valid_flag);
844}
845
846static void cypress_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
847 struct radeon_ps *radeon_state,
848 SMC_Evergreen_MCRegisters *mc_reg_table)
849{
850 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
851
852 cypress_convert_mc_reg_table_entry_to_smc(rdev,
853 &state->low,
854 &mc_reg_table->data[2]);
855 cypress_convert_mc_reg_table_entry_to_smc(rdev,
856 &state->medium,
857 &mc_reg_table->data[3]);
858 cypress_convert_mc_reg_table_entry_to_smc(rdev,
859 &state->high,
860 &mc_reg_table->data[4]);
861}
862
863int cypress_upload_sw_state(struct radeon_device *rdev,
864 struct radeon_ps *radeon_new_state)
865{
866 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
867 u16 address = pi->state_table_start +
868 offsetof(RV770_SMC_STATETABLE, driverState);
869 RV770_SMC_SWSTATE state = { 0 };
870 int ret;
871
872 ret = cypress_convert_power_state_to_smc(rdev, radeon_new_state, &state);
873 if (ret)
874 return ret;
875
876 return rv770_copy_bytes_to_smc(rdev, address, (u8 *)&state,
877 sizeof(RV770_SMC_SWSTATE),
878 pi->sram_end);
879}
880
881int cypress_upload_mc_reg_table(struct radeon_device *rdev,
882 struct radeon_ps *radeon_new_state)
883{
884 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
885 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
886 SMC_Evergreen_MCRegisters mc_reg_table = { 0 };
887 u16 address;
888
889 cypress_convert_mc_reg_table_to_smc(rdev, radeon_new_state, &mc_reg_table);
890
891 address = eg_pi->mc_reg_table_start +
892 (u16)offsetof(SMC_Evergreen_MCRegisters, data[2]);
893
894 return rv770_copy_bytes_to_smc(rdev, address,
895 (u8 *)&mc_reg_table.data[2],
896 sizeof(SMC_Evergreen_MCRegisterSet) * 3,
897 pi->sram_end);
898}
899
900u32 cypress_calculate_burst_time(struct radeon_device *rdev,
901 u32 engine_clock, u32 memory_clock)
902{
903 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
904 u32 multiplier = pi->mem_gddr5 ? 1 : 2;
905 u32 result = (4 * multiplier * engine_clock) / (memory_clock / 2);
906 u32 burst_time;
907
908 if (result <= 4)
909 burst_time = 0;
910 else if (result < 8)
911 burst_time = result - 4;
912 else {
913 burst_time = result / 2 ;
914 if (burst_time > 18)
915 burst_time = 18;
916 }
917
918 return burst_time;
919}
920
921void cypress_program_memory_timing_parameters(struct radeon_device *rdev,
922 struct radeon_ps *radeon_new_state)
923{
924 struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state);
925 u32 mc_arb_burst_time = RREG32(MC_ARB_BURST_TIME);
926
927 mc_arb_burst_time &= ~(STATE1_MASK | STATE2_MASK | STATE3_MASK);
928
929 mc_arb_burst_time |= STATE1(cypress_calculate_burst_time(rdev,
930 new_state->low.sclk,
931 new_state->low.mclk));
932 mc_arb_burst_time |= STATE2(cypress_calculate_burst_time(rdev,
933 new_state->medium.sclk,
934 new_state->medium.mclk));
935 mc_arb_burst_time |= STATE3(cypress_calculate_burst_time(rdev,
936 new_state->high.sclk,
937 new_state->high.mclk));
938
939 rv730_program_memory_timing_parameters(rdev, radeon_new_state);
940
941 WREG32(MC_ARB_BURST_TIME, mc_arb_burst_time);
942}
943
944static void cypress_populate_mc_reg_addresses(struct radeon_device *rdev,
945 SMC_Evergreen_MCRegisters *mc_reg_table)
946{
947 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
948 u32 i, j;
949
950 for (i = 0, j = 0; j < eg_pi->mc_reg_table.last; j++) {
951 if (eg_pi->mc_reg_table.valid_flag & (1 << j)) {
952 mc_reg_table->address[i].s0 =
953 cpu_to_be16(eg_pi->mc_reg_table.mc_reg_address[j].s0);
954 mc_reg_table->address[i].s1 =
955 cpu_to_be16(eg_pi->mc_reg_table.mc_reg_address[j].s1);
956 i++;
957 }
958 }
959
960 mc_reg_table->last = (u8)i;
961}
962
963static void cypress_set_mc_reg_address_table(struct radeon_device *rdev)
964{
965 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
966 u32 i = 0;
967
968 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RAS_TIMING_LP >> 2;
969 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RAS_TIMING >> 2;
970 i++;
971
972 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_CAS_TIMING_LP >> 2;
973 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_CAS_TIMING >> 2;
974 i++;
975
976 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC_TIMING_LP >> 2;
977 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC_TIMING >> 2;
978 i++;
979
980 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC_TIMING2_LP >> 2;
981 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC_TIMING2 >> 2;
982 i++;
983
984 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RD_CTL_D0_LP >> 2;
985 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RD_CTL_D0 >> 2;
986 i++;
987
988 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RD_CTL_D1_LP >> 2;
989 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RD_CTL_D1 >> 2;
990 i++;
991
992 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_WR_CTL_D0_LP >> 2;
993 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_WR_CTL_D0 >> 2;
994 i++;
995
996 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_WR_CTL_D1_LP >> 2;
997 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_WR_CTL_D1 >> 2;
998 i++;
999
1000 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
1001 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_EMRS >> 2;
1002 i++;
1003
1004 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
1005 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_MRS >> 2;
1006 i++;
1007
1008 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
1009 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_MRS1 >> 2;
1010 i++;
1011
1012 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC1 >> 2;
1013 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC1 >> 2;
1014 i++;
1015
1016 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RESERVE_M >> 2;
1017 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RESERVE_M >> 2;
1018 i++;
1019
1020 eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC3 >> 2;
1021 eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC3 >> 2;
1022 i++;
1023
1024 eg_pi->mc_reg_table.last = (u8)i;
1025}
1026
1027static void cypress_retrieve_ac_timing_for_one_entry(struct radeon_device *rdev,
1028 struct evergreen_mc_reg_entry *entry)
1029{
1030 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1031 u32 i;
1032
1033 for (i = 0; i < eg_pi->mc_reg_table.last; i++)
1034 entry->mc_data[i] =
1035 RREG32(eg_pi->mc_reg_table.mc_reg_address[i].s1 << 2);
1036
1037}
1038
1039static void cypress_retrieve_ac_timing_for_all_ranges(struct radeon_device *rdev,
1040 struct atom_memory_clock_range_table *range_table)
1041{
1042 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1043 u32 i, j;
1044
1045 for (i = 0; i < range_table->num_entries; i++) {
1046 eg_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max =
1047 range_table->mclk[i];
1048 radeon_atom_set_ac_timing(rdev, range_table->mclk[i]);
1049 cypress_retrieve_ac_timing_for_one_entry(rdev,
1050 &eg_pi->mc_reg_table.mc_reg_table_entry[i]);
1051 }
1052
1053 eg_pi->mc_reg_table.num_entries = range_table->num_entries;
1054 eg_pi->mc_reg_table.valid_flag = 0;
1055
1056 for (i = 0; i < eg_pi->mc_reg_table.last; i++) {
1057 for (j = 1; j < range_table->num_entries; j++) {
1058 if (eg_pi->mc_reg_table.mc_reg_table_entry[j-1].mc_data[i] !=
1059 eg_pi->mc_reg_table.mc_reg_table_entry[j].mc_data[i]) {
1060 eg_pi->mc_reg_table.valid_flag |= (1 << i);
1061 break;
1062 }
1063 }
1064 }
1065}
1066
1067static int cypress_initialize_mc_reg_table(struct radeon_device *rdev)
1068{
1069 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1070 u8 module_index = rv770_get_memory_module_index(rdev);
1071 struct atom_memory_clock_range_table range_table = { 0 };
1072 int ret;
1073
1074 ret = radeon_atom_get_mclk_range_table(rdev,
1075 pi->mem_gddr5,
1076 module_index, &range_table);
1077 if (ret)
1078 return ret;
1079
1080 cypress_retrieve_ac_timing_for_all_ranges(rdev, &range_table);
1081
1082 return 0;
1083}
1084
1085static void cypress_wait_for_mc_sequencer(struct radeon_device *rdev, u8 value)
1086{
1087 u32 i, j;
1088 u32 channels = 2;
1089
1090 if ((rdev->family == CHIP_CYPRESS) ||
1091 (rdev->family == CHIP_HEMLOCK))
1092 channels = 4;
1093 else if (rdev->family == CHIP_CEDAR)
1094 channels = 1;
1095
1096 for (i = 0; i < channels; i++) {
1097 if ((rdev->family == CHIP_CYPRESS) ||
1098 (rdev->family == CHIP_HEMLOCK)) {
1099 WREG32_P(MC_CONFIG_MCD, MC_RD_ENABLE_MCD(i), ~MC_RD_ENABLE_MCD_MASK);
1100 WREG32_P(MC_CG_CONFIG_MCD, MC_RD_ENABLE_MCD(i), ~MC_RD_ENABLE_MCD_MASK);
1101 } else {
1102 WREG32_P(MC_CONFIG, MC_RD_ENABLE(i), ~MC_RD_ENABLE_MASK);
1103 WREG32_P(MC_CG_CONFIG, MC_RD_ENABLE(i), ~MC_RD_ENABLE_MASK);
1104 }
1105 for (j = 0; j < rdev->usec_timeout; j++) {
1106 if (((RREG32(MC_SEQ_CG) & CG_SEQ_RESP_MASK) >> CG_SEQ_RESP_SHIFT) == value)
1107 break;
1108 udelay(1);
1109 }
1110 }
1111}
1112
1113static void cypress_force_mc_use_s1(struct radeon_device *rdev,
1114 struct radeon_ps *radeon_boot_state)
1115{
1116 struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1117 u32 strobe_mode;
1118 u32 mc_seq_cg;
1119 int i;
1120
1121 if (RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE)
1122 return;
1123
1124 radeon_atom_set_ac_timing(rdev, boot_state->low.mclk);
1125 radeon_mc_wait_for_idle(rdev);
1126
1127 if ((rdev->family == CHIP_CYPRESS) ||
1128 (rdev->family == CHIP_HEMLOCK)) {
1129 WREG32(MC_CONFIG_MCD, 0xf);
1130 WREG32(MC_CG_CONFIG_MCD, 0xf);
1131 } else {
1132 WREG32(MC_CONFIG, 0xf);
1133 WREG32(MC_CG_CONFIG, 0xf);
1134 }
1135
1136 for (i = 0; i < rdev->num_crtc; i++)
1137 radeon_wait_for_vblank(rdev, i);
1138
1139 WREG32(MC_SEQ_CG, MC_CG_SEQ_YCLK_SUSPEND);
1140 cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_SUSPEND);
1141
1142 strobe_mode = cypress_get_strobe_mode_settings(rdev,
1143 boot_state->low.mclk);
1144
1145 mc_seq_cg = CG_SEQ_REQ(MC_CG_SEQ_DRAMCONF_S1);
1146 mc_seq_cg |= SEQ_CG_RESP(strobe_mode);
1147 WREG32(MC_SEQ_CG, mc_seq_cg);
1148
1149 for (i = 0; i < rdev->usec_timeout; i++) {
1150 if (RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE)
1151 break;
1152 udelay(1);
1153 }
1154
1155 mc_seq_cg &= ~CG_SEQ_REQ_MASK;
1156 mc_seq_cg |= CG_SEQ_REQ(MC_CG_SEQ_YCLK_RESUME);
1157 WREG32(MC_SEQ_CG, mc_seq_cg);
1158
1159 cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_RESUME);
1160}
1161
1162static void cypress_copy_ac_timing_from_s1_to_s0(struct radeon_device *rdev)
1163{
1164 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1165 u32 value;
1166 u32 i;
1167
1168 for (i = 0; i < eg_pi->mc_reg_table.last; i++) {
1169 value = RREG32(eg_pi->mc_reg_table.mc_reg_address[i].s1 << 2);
1170 WREG32(eg_pi->mc_reg_table.mc_reg_address[i].s0 << 2, value);
1171 }
1172}
1173
1174static void cypress_force_mc_use_s0(struct radeon_device *rdev,
1175 struct radeon_ps *radeon_boot_state)
1176{
1177 struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1178 u32 strobe_mode;
1179 u32 mc_seq_cg;
1180 int i;
1181
1182 cypress_copy_ac_timing_from_s1_to_s0(rdev);
1183 radeon_mc_wait_for_idle(rdev);
1184
1185 if ((rdev->family == CHIP_CYPRESS) ||
1186 (rdev->family == CHIP_HEMLOCK)) {
1187 WREG32(MC_CONFIG_MCD, 0xf);
1188 WREG32(MC_CG_CONFIG_MCD, 0xf);
1189 } else {
1190 WREG32(MC_CONFIG, 0xf);
1191 WREG32(MC_CG_CONFIG, 0xf);
1192 }
1193
1194 for (i = 0; i < rdev->num_crtc; i++)
1195 radeon_wait_for_vblank(rdev, i);
1196
1197 WREG32(MC_SEQ_CG, MC_CG_SEQ_YCLK_SUSPEND);
1198 cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_SUSPEND);
1199
1200 strobe_mode = cypress_get_strobe_mode_settings(rdev,
1201 boot_state->low.mclk);
1202
1203 mc_seq_cg = CG_SEQ_REQ(MC_CG_SEQ_DRAMCONF_S0);
1204 mc_seq_cg |= SEQ_CG_RESP(strobe_mode);
1205 WREG32(MC_SEQ_CG, mc_seq_cg);
1206
1207 for (i = 0; i < rdev->usec_timeout; i++) {
1208 if (!(RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE))
1209 break;
1210 udelay(1);
1211 }
1212
1213 mc_seq_cg &= ~CG_SEQ_REQ_MASK;
1214 mc_seq_cg |= CG_SEQ_REQ(MC_CG_SEQ_YCLK_RESUME);
1215 WREG32(MC_SEQ_CG, mc_seq_cg);
1216
1217 cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_RESUME);
1218}
1219
1220static int cypress_populate_initial_mvdd_value(struct radeon_device *rdev,
1221 RV770_SMC_VOLTAGE_VALUE *voltage)
1222{
1223 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1224
1225 voltage->index = eg_pi->mvdd_high_index;
1226 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1227
1228 return 0;
1229}
1230
1231int cypress_populate_smc_initial_state(struct radeon_device *rdev,
1232 struct radeon_ps *radeon_initial_state,
1233 RV770_SMC_STATETABLE *table)
1234{
1235 struct rv7xx_ps *initial_state = rv770_get_ps(radeon_initial_state);
1236 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1237 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1238 u32 a_t;
1239
1240 table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
1241 cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl);
1242 table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
1243 cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl_2);
1244 table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
1245 cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl);
1246 table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
1247 cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl_2);
1248 table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
1249 cpu_to_be32(pi->clk_regs.rv770.mclk_pwrmgt_cntl);
1250 table->initialState.levels[0].mclk.mclk770.vDLL_CNTL =
1251 cpu_to_be32(pi->clk_regs.rv770.dll_cntl);
1252
1253 table->initialState.levels[0].mclk.mclk770.vMPLL_SS =
1254 cpu_to_be32(pi->clk_regs.rv770.mpll_ss1);
1255 table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 =
1256 cpu_to_be32(pi->clk_regs.rv770.mpll_ss2);
1257
1258 table->initialState.levels[0].mclk.mclk770.mclk_value =
1259 cpu_to_be32(initial_state->low.mclk);
1260
1261 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
1262 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl);
1263 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
1264 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_2);
1265 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
1266 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_3);
1267 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
1268 cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum);
1269 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
1270 cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum_2);
1271
1272 table->initialState.levels[0].sclk.sclk_value =
1273 cpu_to_be32(initial_state->low.sclk);
1274
1275 table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
1276
1277 table->initialState.levels[0].ACIndex = 0;
1278
1279 cypress_populate_voltage_value(rdev,
1280 &eg_pi->vddc_voltage_table,
1281 initial_state->low.vddc,
1282 &table->initialState.levels[0].vddc);
1283
1284 if (eg_pi->vddci_control)
1285 cypress_populate_voltage_value(rdev,
1286 &eg_pi->vddci_voltage_table,
1287 initial_state->low.vddci,
1288 &table->initialState.levels[0].vddci);
1289
1290 cypress_populate_initial_mvdd_value(rdev,
1291 &table->initialState.levels[0].mvdd);
1292
1293 a_t = CG_R(0xffff) | CG_L(0);
1294 table->initialState.levels[0].aT = cpu_to_be32(a_t);
1295
1296 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
1297
1298
1299 if (pi->boot_in_gen2)
1300 table->initialState.levels[0].gen2PCIE = 1;
1301 else
1302 table->initialState.levels[0].gen2PCIE = 0;
1303 if (initial_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
1304 table->initialState.levels[0].gen2XSP = 1;
1305 else
1306 table->initialState.levels[0].gen2XSP = 0;
1307
1308 if (pi->mem_gddr5) {
1309 table->initialState.levels[0].strobeMode =
1310 cypress_get_strobe_mode_settings(rdev,
1311 initial_state->low.mclk);
1312
1313 if (initial_state->low.mclk > pi->mclk_edc_enable_threshold)
1314 table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
1315 else
1316 table->initialState.levels[0].mcFlags = 0;
1317 }
1318
1319 table->initialState.levels[1] = table->initialState.levels[0];
1320 table->initialState.levels[2] = table->initialState.levels[0];
1321
1322 table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
1323
1324 return 0;
1325}
1326
1327int cypress_populate_smc_acpi_state(struct radeon_device *rdev,
1328 RV770_SMC_STATETABLE *table)
1329{
1330 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1331 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1332 u32 mpll_ad_func_cntl =
1333 pi->clk_regs.rv770.mpll_ad_func_cntl;
1334 u32 mpll_ad_func_cntl_2 =
1335 pi->clk_regs.rv770.mpll_ad_func_cntl_2;
1336 u32 mpll_dq_func_cntl =
1337 pi->clk_regs.rv770.mpll_dq_func_cntl;
1338 u32 mpll_dq_func_cntl_2 =
1339 pi->clk_regs.rv770.mpll_dq_func_cntl_2;
1340 u32 spll_func_cntl =
1341 pi->clk_regs.rv770.cg_spll_func_cntl;
1342 u32 spll_func_cntl_2 =
1343 pi->clk_regs.rv770.cg_spll_func_cntl_2;
1344 u32 spll_func_cntl_3 =
1345 pi->clk_regs.rv770.cg_spll_func_cntl_3;
1346 u32 mclk_pwrmgt_cntl =
1347 pi->clk_regs.rv770.mclk_pwrmgt_cntl;
1348 u32 dll_cntl =
1349 pi->clk_regs.rv770.dll_cntl;
1350
1351 table->ACPIState = table->initialState;
1352
1353 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
1354
1355 if (pi->acpi_vddc) {
1356 cypress_populate_voltage_value(rdev,
1357 &eg_pi->vddc_voltage_table,
1358 pi->acpi_vddc,
1359 &table->ACPIState.levels[0].vddc);
1360 if (pi->pcie_gen2) {
1361 if (pi->acpi_pcie_gen2)
1362 table->ACPIState.levels[0].gen2PCIE = 1;
1363 else
1364 table->ACPIState.levels[0].gen2PCIE = 0;
1365 } else
1366 table->ACPIState.levels[0].gen2PCIE = 0;
1367 if (pi->acpi_pcie_gen2)
1368 table->ACPIState.levels[0].gen2XSP = 1;
1369 else
1370 table->ACPIState.levels[0].gen2XSP = 0;
1371 } else {
1372 cypress_populate_voltage_value(rdev,
1373 &eg_pi->vddc_voltage_table,
1374 pi->min_vddc_in_table,
1375 &table->ACPIState.levels[0].vddc);
1376 table->ACPIState.levels[0].gen2PCIE = 0;
1377 }
1378
1379 if (eg_pi->acpi_vddci) {
1380 if (eg_pi->vddci_control) {
1381 cypress_populate_voltage_value(rdev,
1382 &eg_pi->vddci_voltage_table,
1383 eg_pi->acpi_vddci,
1384 &table->ACPIState.levels[0].vddci);
1385 }
1386 }
1387
1388 mpll_ad_func_cntl &= ~PDNB;
1389
1390 mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
1391
1392 if (pi->mem_gddr5)
1393 mpll_dq_func_cntl &= ~PDNB;
1394 mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
1395
1396 mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
1397 MRDCKA1_RESET |
1398 MRDCKB0_RESET |
1399 MRDCKB1_RESET |
1400 MRDCKC0_RESET |
1401 MRDCKC1_RESET |
1402 MRDCKD0_RESET |
1403 MRDCKD1_RESET);
1404
1405 mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
1406 MRDCKA1_PDNB |
1407 MRDCKB0_PDNB |
1408 MRDCKB1_PDNB |
1409 MRDCKC0_PDNB |
1410 MRDCKC1_PDNB |
1411 MRDCKD0_PDNB |
1412 MRDCKD1_PDNB);
1413
1414 dll_cntl |= (MRDCKA0_BYPASS |
1415 MRDCKA1_BYPASS |
1416 MRDCKB0_BYPASS |
1417 MRDCKB1_BYPASS |
1418 MRDCKC0_BYPASS |
1419 MRDCKC1_BYPASS |
1420 MRDCKD0_BYPASS |
1421 MRDCKD1_BYPASS);
1422
1423 /* evergreen only */
1424 if (rdev->family <= CHIP_HEMLOCK)
1425 spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN;
1426
1427 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
1428 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
1429
1430 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
1431 cpu_to_be32(mpll_ad_func_cntl);
1432 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
1433 cpu_to_be32(mpll_ad_func_cntl_2);
1434 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
1435 cpu_to_be32(mpll_dq_func_cntl);
1436 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
1437 cpu_to_be32(mpll_dq_func_cntl_2);
1438 table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
1439 cpu_to_be32(mclk_pwrmgt_cntl);
1440 table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
1441
1442 table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
1443
1444 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
1445 cpu_to_be32(spll_func_cntl);
1446 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
1447 cpu_to_be32(spll_func_cntl_2);
1448 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
1449 cpu_to_be32(spll_func_cntl_3);
1450
1451 table->ACPIState.levels[0].sclk.sclk_value = 0;
1452
1453 cypress_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
1454
1455 if (eg_pi->dynamic_ac_timing)
1456 table->ACPIState.levels[0].ACIndex = 1;
1457
1458 table->ACPIState.levels[1] = table->ACPIState.levels[0];
1459 table->ACPIState.levels[2] = table->ACPIState.levels[0];
1460
1461 return 0;
1462}
1463
1464static void cypress_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
1465 struct atom_voltage_table *voltage_table)
1466{
1467 unsigned int i, diff;
1468
1469 if (voltage_table->count <= MAX_NO_VREG_STEPS)
1470 return;
1471
1472 diff = voltage_table->count - MAX_NO_VREG_STEPS;
1473
1474 for (i= 0; i < MAX_NO_VREG_STEPS; i++)
1475 voltage_table->entries[i] = voltage_table->entries[i + diff];
1476
1477 voltage_table->count = MAX_NO_VREG_STEPS;
1478}
1479
1480int cypress_construct_voltage_tables(struct radeon_device *rdev)
1481{
1482 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1483 int ret;
1484
1485 ret = radeon_atom_get_voltage_table(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0,
1486 &eg_pi->vddc_voltage_table);
1487 if (ret)
1488 return ret;
1489
1490 if (eg_pi->vddc_voltage_table.count > MAX_NO_VREG_STEPS)
1491 cypress_trim_voltage_table_to_fit_state_table(rdev,
1492 &eg_pi->vddc_voltage_table);
1493
1494 if (eg_pi->vddci_control) {
1495 ret = radeon_atom_get_voltage_table(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0,
1496 &eg_pi->vddci_voltage_table);
1497 if (ret)
1498 return ret;
1499
1500 if (eg_pi->vddci_voltage_table.count > MAX_NO_VREG_STEPS)
1501 cypress_trim_voltage_table_to_fit_state_table(rdev,
1502 &eg_pi->vddci_voltage_table);
1503 }
1504
1505 return 0;
1506}
1507
1508static void cypress_populate_smc_voltage_table(struct radeon_device *rdev,
1509 struct atom_voltage_table *voltage_table,
1510 RV770_SMC_STATETABLE *table)
1511{
1512 unsigned int i;
1513
1514 for (i = 0; i < voltage_table->count; i++) {
1515 table->highSMIO[i] = 0;
1516 table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
1517 }
1518}
1519
1520int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
1521 RV770_SMC_STATETABLE *table)
1522{
1523 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1524 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1525 unsigned char i;
1526
1527 if (eg_pi->vddc_voltage_table.count) {
1528 cypress_populate_smc_voltage_table(rdev,
1529 &eg_pi->vddc_voltage_table,
1530 table);
1531
1532 table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0;
1533 table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] =
1534 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
1535
1536 for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
1537 if (pi->max_vddc_in_table <=
1538 eg_pi->vddc_voltage_table.entries[i].value) {
1539 table->maxVDDCIndexInPPTable = i;
1540 break;
1541 }
1542 }
1543 }
1544
1545 if (eg_pi->vddci_voltage_table.count) {
1546 cypress_populate_smc_voltage_table(rdev,
1547 &eg_pi->vddci_voltage_table,
1548 table);
1549
1550 table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
1551 table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
1552 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
1553 }
1554
1555 return 0;
1556}
1557
1558static u32 cypress_get_mclk_split_point(struct atom_memory_info *memory_info)
1559{
1560 if ((memory_info->mem_type == MEM_TYPE_GDDR3) ||
1561 (memory_info->mem_type == MEM_TYPE_DDR3))
1562 return 30000;
1563
1564 return 0;
1565}
1566
1567int cypress_get_mvdd_configuration(struct radeon_device *rdev)
1568{
1569 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1570 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1571 u8 module_index;
1572 struct atom_memory_info memory_info;
1573 u32 tmp = RREG32(GENERAL_PWRMGT);
1574
1575 if (!(tmp & BACKBIAS_PAD_EN)) {
1576 eg_pi->mvdd_high_index = 0;
1577 eg_pi->mvdd_low_index = 1;
1578 pi->mvdd_control = false;
1579 return 0;
1580 }
1581
1582 if (tmp & BACKBIAS_VALUE)
1583 eg_pi->mvdd_high_index = 1;
1584 else
1585 eg_pi->mvdd_high_index = 0;
1586
1587 eg_pi->mvdd_low_index =
1588 (eg_pi->mvdd_high_index == 0) ? 1 : 0;
1589
1590 module_index = rv770_get_memory_module_index(rdev);
1591
1592 if (radeon_atom_get_memory_info(rdev, module_index, &memory_info)) {
1593 pi->mvdd_control = false;
1594 return 0;
1595 }
1596
1597 pi->mvdd_split_frequency =
1598 cypress_get_mclk_split_point(&memory_info);
1599
1600 if (pi->mvdd_split_frequency == 0) {
1601 pi->mvdd_control = false;
1602 return 0;
1603 }
1604
1605 return 0;
1606}
1607
1608static int cypress_init_smc_table(struct radeon_device *rdev,
1609 struct radeon_ps *radeon_boot_state)
1610{
1611 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1612 RV770_SMC_STATETABLE *table = &pi->smc_statetable;
1613 int ret;
1614
1615 memset(table, 0, sizeof(RV770_SMC_STATETABLE));
1616
1617 cypress_populate_smc_voltage_tables(rdev, table);
1618
1619 switch (rdev->pm.int_thermal_type) {
1620 case THERMAL_TYPE_EVERGREEN:
1621 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
1622 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
1623 break;
1624 case THERMAL_TYPE_NONE:
1625 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
1626 break;
1627 default:
1628 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
1629 break;
1630 }
1631
1632 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
1633 table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1634
1635 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1636 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
1637
1638 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1639 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1640
1641 if (pi->mem_gddr5)
1642 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1643
1644 ret = cypress_populate_smc_initial_state(rdev, radeon_boot_state, table);
1645 if (ret)
1646 return ret;
1647
1648 ret = cypress_populate_smc_acpi_state(rdev, table);
1649 if (ret)
1650 return ret;
1651
1652 table->driverState = table->initialState;
1653
1654 return rv770_copy_bytes_to_smc(rdev,
1655 pi->state_table_start,
1656 (u8 *)table, sizeof(RV770_SMC_STATETABLE),
1657 pi->sram_end);
1658}
1659
1660int cypress_populate_mc_reg_table(struct radeon_device *rdev,
1661 struct radeon_ps *radeon_boot_state)
1662{
1663 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1664 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1665 struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1666 SMC_Evergreen_MCRegisters mc_reg_table = { 0 };
1667
1668 rv770_write_smc_soft_register(rdev,
1669 RV770_SMC_SOFT_REGISTER_seq_index, 1);
1670
1671 cypress_populate_mc_reg_addresses(rdev, &mc_reg_table);
1672
1673 cypress_convert_mc_reg_table_entry_to_smc(rdev,
1674 &boot_state->low,
1675 &mc_reg_table.data[0]);
1676
1677 cypress_convert_mc_registers(&eg_pi->mc_reg_table.mc_reg_table_entry[0],
1678 &mc_reg_table.data[1], eg_pi->mc_reg_table.last,
1679 eg_pi->mc_reg_table.valid_flag);
1680
1681 cypress_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, &mc_reg_table);
1682
1683 return rv770_copy_bytes_to_smc(rdev, eg_pi->mc_reg_table_start,
1684 (u8 *)&mc_reg_table, sizeof(SMC_Evergreen_MCRegisters),
1685 pi->sram_end);
1686}
1687
1688int cypress_get_table_locations(struct radeon_device *rdev)
1689{
1690 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1691 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1692 u32 tmp;
1693 int ret;
1694
1695 ret = rv770_read_smc_sram_dword(rdev,
1696 EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
1697 EVERGREEN_SMC_FIRMWARE_HEADER_stateTable,
1698 &tmp, pi->sram_end);
1699 if (ret)
1700 return ret;
1701
1702 pi->state_table_start = (u16)tmp;
1703
1704 ret = rv770_read_smc_sram_dword(rdev,
1705 EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
1706 EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters,
1707 &tmp, pi->sram_end);
1708 if (ret)
1709 return ret;
1710
1711 pi->soft_regs_start = (u16)tmp;
1712
1713 ret = rv770_read_smc_sram_dword(rdev,
1714 EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
1715 EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable,
1716 &tmp, pi->sram_end);
1717 if (ret)
1718 return ret;
1719
1720 eg_pi->mc_reg_table_start = (u16)tmp;
1721
1722 return 0;
1723}
1724
1725void cypress_enable_display_gap(struct radeon_device *rdev)
1726{
1727 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
1728
1729 tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
1730 tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1731 DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
1732
1733 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
1734 tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
1735 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
1736 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
1737}
1738
1739static void cypress_program_display_gap(struct radeon_device *rdev)
1740{
1741 u32 tmp, pipe;
1742 int i;
1743
1744 tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
1745 if (rdev->pm.dpm.new_active_crtc_count > 0)
1746 tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1747 else
1748 tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1749
1750 if (rdev->pm.dpm.new_active_crtc_count > 1)
1751 tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1752 else
1753 tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1754
1755 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
1756
1757 tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
1758 pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
1759
1760 if ((rdev->pm.dpm.new_active_crtc_count > 0) &&
1761 (!(rdev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
1762 /* find the first active crtc */
1763 for (i = 0; i < rdev->num_crtc; i++) {
1764 if (rdev->pm.dpm.new_active_crtcs & (1 << i))
1765 break;
1766 }
1767 if (i == rdev->num_crtc)
1768 pipe = 0;
1769 else
1770 pipe = i;
1771
1772 tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
1773 tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
1774 WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
1775 }
1776
1777 cypress_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
1778}
1779
1780void cypress_dpm_setup_asic(struct radeon_device *rdev)
1781{
1782 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1783
1784 rv740_read_clock_registers(rdev);
1785 rv770_read_voltage_smio_registers(rdev);
1786 rv770_get_max_vddc(rdev);
1787 rv770_get_memory_type(rdev);
1788
1789 if (eg_pi->pcie_performance_request)
1790 eg_pi->pcie_performance_request_registered = false;
1791
1792 if (eg_pi->pcie_performance_request)
1793 cypress_advertise_gen2_capability(rdev);
1794
1795 rv770_get_pcie_gen2_status(rdev);
1796
1797 rv770_enable_acpi_pm(rdev);
1798}
1799
1800int cypress_dpm_enable(struct radeon_device *rdev)
1801{
1802 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1803 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1804 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1805 int ret;
1806
1807 if (pi->gfx_clock_gating)
1808 rv770_restore_cgcg(rdev);
1809
1810 if (rv770_dpm_enabled(rdev))
1811 return -EINVAL;
1812
1813 if (pi->voltage_control) {
1814 rv770_enable_voltage_control(rdev, true);
1815 ret = cypress_construct_voltage_tables(rdev);
1816 if (ret) {
1817 DRM_ERROR("cypress_construct_voltage_tables failed\n");
1818 return ret;
1819 }
1820 }
1821
1822 if (pi->mvdd_control) {
1823 ret = cypress_get_mvdd_configuration(rdev);
1824 if (ret) {
1825 DRM_ERROR("cypress_get_mvdd_configuration failed\n");
1826 return ret;
1827 }
1828 }
1829
1830 if (eg_pi->dynamic_ac_timing) {
1831 cypress_set_mc_reg_address_table(rdev);
1832 cypress_force_mc_use_s0(rdev, boot_ps);
1833 ret = cypress_initialize_mc_reg_table(rdev);
1834 if (ret)
1835 eg_pi->dynamic_ac_timing = false;
1836 cypress_force_mc_use_s1(rdev, boot_ps);
1837 }
1838
1839 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1840 rv770_enable_backbias(rdev, true);
1841
1842 if (pi->dynamic_ss)
1843 cypress_enable_spread_spectrum(rdev, true);
1844
1845 if (pi->thermal_protection)
1846 rv770_enable_thermal_protection(rdev, true);
1847
1848 rv770_setup_bsp(rdev);
1849 rv770_program_git(rdev);
1850 rv770_program_tp(rdev);
1851 rv770_program_tpp(rdev);
1852 rv770_program_sstp(rdev);
1853 rv770_program_engine_speed_parameters(rdev);
1854 cypress_enable_display_gap(rdev);
1855 rv770_program_vc(rdev);
1856
1857 if (pi->dynamic_pcie_gen2)
1858 cypress_enable_dynamic_pcie_gen2(rdev, true);
1859
1860 ret = rv770_upload_firmware(rdev);
1861 if (ret) {
1862 DRM_ERROR("rv770_upload_firmware failed\n");
1863 return ret;
1864 }
1865
1866 ret = cypress_get_table_locations(rdev);
1867 if (ret) {
1868 DRM_ERROR("cypress_get_table_locations failed\n");
1869 return ret;
1870 }
1871 ret = cypress_init_smc_table(rdev, boot_ps);
1872 if (ret) {
1873 DRM_ERROR("cypress_init_smc_table failed\n");
1874 return ret;
1875 }
1876 if (eg_pi->dynamic_ac_timing) {
1877 ret = cypress_populate_mc_reg_table(rdev, boot_ps);
1878 if (ret) {
1879 DRM_ERROR("cypress_populate_mc_reg_table failed\n");
1880 return ret;
1881 }
1882 }
1883
1884 cypress_program_response_times(rdev);
1885
1886 r7xx_start_smc(rdev);
1887
1888 ret = cypress_notify_smc_display_change(rdev, false);
1889 if (ret) {
1890 DRM_ERROR("cypress_notify_smc_display_change failed\n");
1891 return ret;
1892 }
1893 cypress_enable_sclk_control(rdev, true);
1894
1895 if (eg_pi->memory_transition)
1896 cypress_enable_mclk_control(rdev, true);
1897
1898 cypress_start_dpm(rdev);
1899
1900 if (pi->gfx_clock_gating)
1901 cypress_gfx_clock_gating_enable(rdev, true);
1902
1903 if (pi->mg_clock_gating)
1904 cypress_mg_clock_gating_enable(rdev, true);
1905
1906 if (rdev->irq.installed &&
1907 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1908 PPSMC_Result result;
1909
1910 ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1911 if (ret)
1912 return ret;
1913 rdev->irq.dpm_thermal = true;
1914 radeon_irq_set(rdev);
1915 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
1916
1917 if (result != PPSMC_Result_OK)
1918 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1919 }
1920
1921 rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
1922
1923 return 0;
1924}
1925
1926void cypress_dpm_disable(struct radeon_device *rdev)
1927{
1928 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1929 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1930 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1931
1932 if (!rv770_dpm_enabled(rdev))
1933 return;
1934
1935 rv770_clear_vc(rdev);
1936
1937 if (pi->thermal_protection)
1938 rv770_enable_thermal_protection(rdev, false);
1939
1940 if (pi->dynamic_pcie_gen2)
1941 cypress_enable_dynamic_pcie_gen2(rdev, false);
1942
1943 if (rdev->irq.installed &&
1944 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1945 rdev->irq.dpm_thermal = false;
1946 radeon_irq_set(rdev);
1947 }
1948
1949 if (pi->gfx_clock_gating)
1950 cypress_gfx_clock_gating_enable(rdev, false);
1951
1952 if (pi->mg_clock_gating)
1953 cypress_mg_clock_gating_enable(rdev, false);
1954
1955 rv770_stop_dpm(rdev);
1956 r7xx_stop_smc(rdev);
1957
1958 cypress_enable_spread_spectrum(rdev, false);
1959
1960 if (eg_pi->dynamic_ac_timing)
1961 cypress_force_mc_use_s1(rdev, boot_ps);
1962
1963 rv770_reset_smio_status(rdev);
1964}
1965
1966int cypress_dpm_set_power_state(struct radeon_device *rdev)
1967{
1968 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1969 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
1970 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
1971 int ret;
1972
1973 ret = rv770_restrict_performance_levels_before_switch(rdev);
1974 if (ret) {
1975 DRM_ERROR("rv770_restrict_performance_levels_before_switch failed\n");
1976 return ret;
1977 }
1978 if (eg_pi->pcie_performance_request)
1979 cypress_notify_link_speed_change_before_state_change(rdev, new_ps, old_ps);
1980
1981 rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1982 ret = rv770_halt_smc(rdev);
1983 if (ret) {
1984 DRM_ERROR("rv770_halt_smc failed\n");
1985 return ret;
1986 }
1987 ret = cypress_upload_sw_state(rdev, new_ps);
1988 if (ret) {
1989 DRM_ERROR("cypress_upload_sw_state failed\n");
1990 return ret;
1991 }
1992 if (eg_pi->dynamic_ac_timing) {
1993 ret = cypress_upload_mc_reg_table(rdev, new_ps);
1994 if (ret) {
1995 DRM_ERROR("cypress_upload_mc_reg_table failed\n");
1996 return ret;
1997 }
1998 }
1999
2000 cypress_program_memory_timing_parameters(rdev, new_ps);
2001
2002 ret = rv770_resume_smc(rdev);
2003 if (ret) {
2004 DRM_ERROR("rv770_resume_smc failed\n");
2005 return ret;
2006 }
2007 ret = rv770_set_sw_state(rdev);
2008 if (ret) {
2009 DRM_ERROR("rv770_set_sw_state failed\n");
2010 return ret;
2011 }
2012 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
2013
2014 if (eg_pi->pcie_performance_request)
2015 cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
2016
2017 ret = rv770_unrestrict_performance_levels_after_switch(rdev);
2018 if (ret) {
2019 DRM_ERROR("rv770_unrestrict_performance_levels_after_switch failed\n");
2020 return ret;
2021 }
2022
2023 return 0;
2024}
2025
2026void cypress_dpm_reset_asic(struct radeon_device *rdev)
2027{
2028 rv770_restrict_performance_levels_before_switch(rdev);
2029 rv770_set_boot_state(rdev);
2030}
2031
2032void cypress_dpm_display_configuration_changed(struct radeon_device *rdev)
2033{
2034 cypress_program_display_gap(rdev);
2035}
2036
2037int cypress_dpm_init(struct radeon_device *rdev)
2038{
2039 struct rv7xx_power_info *pi;
2040 struct evergreen_power_info *eg_pi;
2041 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
2042 uint16_t data_offset, size;
2043 uint8_t frev, crev;
2044 struct atom_clock_dividers dividers;
2045 int ret;
2046
2047 eg_pi = kzalloc(sizeof(struct evergreen_power_info), GFP_KERNEL);
2048 if (eg_pi == NULL)
2049 return -ENOMEM;
2050 rdev->pm.dpm.priv = eg_pi;
2051 pi = &eg_pi->rv7xx;
2052
2053 rv770_get_max_vddc(rdev);
2054
2055 eg_pi->ulv.supported = false;
2056 pi->acpi_vddc = 0;
2057 eg_pi->acpi_vddci = 0;
2058 pi->min_vddc_in_table = 0;
2059 pi->max_vddc_in_table = 0;
2060
2061 ret = rv7xx_parse_power_table(rdev);
2062 if (ret)
2063 return ret;
2064
2065 if (rdev->pm.dpm.voltage_response_time == 0)
2066 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
2067 if (rdev->pm.dpm.backbias_response_time == 0)
2068 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
2069
2070 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
2071 0, false, &dividers);
2072 if (ret)
2073 pi->ref_div = dividers.ref_div + 1;
2074 else
2075 pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
2076
2077 pi->mclk_strobe_mode_threshold = 40000;
2078 pi->mclk_edc_enable_threshold = 40000;
2079 eg_pi->mclk_edc_wr_enable_threshold = 40000;
2080
2081 pi->rlp = RV770_RLP_DFLT;
2082 pi->rmp = RV770_RMP_DFLT;
2083 pi->lhp = RV770_LHP_DFLT;
2084 pi->lmp = RV770_LMP_DFLT;
2085
2086 pi->voltage_control =
2087 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
2088
2089 pi->mvdd_control =
2090 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
2091
2092 eg_pi->vddci_control =
2093 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
2094
2095 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
2096 &frev, &crev, &data_offset)) {
2097 pi->sclk_ss = true;
2098 pi->mclk_ss = true;
2099 pi->dynamic_ss = true;
2100 } else {
2101 pi->sclk_ss = false;
2102 pi->mclk_ss = false;
2103 pi->dynamic_ss = true;
2104 }
2105
2106 pi->asi = RV770_ASI_DFLT;
2107 pi->pasi = CYPRESS_HASI_DFLT;
2108 pi->vrc = CYPRESS_VRC_DFLT;
2109
2110 pi->power_gating = false;
2111
2112 if ((rdev->family == CHIP_CYPRESS) ||
2113 (rdev->family == CHIP_HEMLOCK))
2114 pi->gfx_clock_gating = false;
2115 else
2116 pi->gfx_clock_gating = true;
2117
2118 pi->mg_clock_gating = true;
2119 pi->mgcgtssm = true;
2120 eg_pi->ls_clock_gating = false;
2121 eg_pi->sclk_deep_sleep = false;
2122
2123 pi->dynamic_pcie_gen2 = true;
2124
2125 if (pi->gfx_clock_gating &&
2126 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2127 pi->thermal_protection = true;
2128 else
2129 pi->thermal_protection = false;
2130
2131 pi->display_gap = true;
2132
2133 if (rdev->flags & RADEON_IS_MOBILITY)
2134 pi->dcodt = true;
2135 else
2136 pi->dcodt = false;
2137
2138 pi->ulps = true;
2139
2140 eg_pi->dynamic_ac_timing = true;
2141 eg_pi->abm = true;
2142 eg_pi->mcls = true;
2143 eg_pi->light_sleep = true;
2144 eg_pi->memory_transition = true;
2145#if defined(CONFIG_ACPI)
2146 eg_pi->pcie_performance_request =
2147 radeon_acpi_is_pcie_performance_request_supported(rdev);
2148#else
2149 eg_pi->pcie_performance_request = false;
2150#endif
2151
2152 if ((rdev->family == CHIP_CYPRESS) ||
2153 (rdev->family == CHIP_HEMLOCK) ||
2154 (rdev->family == CHIP_JUNIPER))
2155 eg_pi->dll_default_on = true;
2156 else
2157 eg_pi->dll_default_on = false;
2158
2159 eg_pi->sclk_deep_sleep = false;
2160 pi->mclk_stutter_mode_threshold = 0;
2161
2162 pi->sram_end = SMC_RAM_END;
2163
2164 return 0;
2165}
2166
2167void cypress_dpm_fini(struct radeon_device *rdev)
2168{
2169 int i;
2170
2171 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2172 kfree(rdev->pm.dpm.ps[i].ps_priv);
2173 }
2174 kfree(rdev->pm.dpm.ps);
2175 kfree(rdev->pm.dpm.priv);
2176}
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.h b/drivers/gpu/drm/radeon/cypress_dpm.h
new file mode 100644
index 000000000000..4c3f18c69f4f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cypress_dpm.h
@@ -0,0 +1,160 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __CYPRESS_DPM_H__
24#define __CYPRESS_DPM_H__
25
26#include "rv770_dpm.h"
27#include "evergreen_smc.h"
28
29struct evergreen_mc_reg_entry {
30 u32 mclk_max;
31 u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
32};
33
34struct evergreen_mc_reg_table {
35 u8 last;
36 u8 num_entries;
37 u16 valid_flag;
38 struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
39 SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
40};
41
42struct evergreen_ulv_param {
43 bool supported;
44 struct rv7xx_pl *pl;
45};
46
47struct evergreen_arb_registers {
48 u32 mc_arb_dram_timing;
49 u32 mc_arb_dram_timing2;
50 u32 mc_arb_rfsh_rate;
51 u32 mc_arb_burst_time;
52};
53
54struct at {
55 u32 rlp;
56 u32 rmp;
57 u32 lhp;
58 u32 lmp;
59};
60
61struct evergreen_power_info {
62 /* must be first! */
63 struct rv7xx_power_info rv7xx;
64 /* flags */
65 bool vddci_control;
66 bool dynamic_ac_timing;
67 bool abm;
68 bool mcls;
69 bool light_sleep;
70 bool memory_transition;
71 bool pcie_performance_request;
72 bool pcie_performance_request_registered;
73 bool sclk_deep_sleep;
74 bool dll_default_on;
75 bool ls_clock_gating;
76 bool smu_uvd_hs;
77 bool uvd_enabled;
78 /* stored values */
79 u16 acpi_vddci;
80 u8 mvdd_high_index;
81 u8 mvdd_low_index;
82 u32 mclk_edc_wr_enable_threshold;
83 struct evergreen_mc_reg_table mc_reg_table;
84 struct atom_voltage_table vddc_voltage_table;
85 struct atom_voltage_table vddci_voltage_table;
86 struct evergreen_arb_registers bootup_arb_registers;
87 struct evergreen_ulv_param ulv;
88 struct at ats[2];
89 /* smc offsets */
90 u16 mc_reg_table_start;
91 struct radeon_ps current_rps;
92 struct rv7xx_ps current_ps;
93 struct radeon_ps requested_rps;
94 struct rv7xx_ps requested_ps;
95};
96
97#define CYPRESS_HASI_DFLT 400000
98#define CYPRESS_MGCGTTLOCAL0_DFLT 0x00000000
99#define CYPRESS_MGCGTTLOCAL1_DFLT 0x00000000
100#define CYPRESS_MGCGTTLOCAL2_DFLT 0x00000000
101#define CYPRESS_MGCGTTLOCAL3_DFLT 0x00000000
102#define CYPRESS_MGCGCGTSSMCTRL_DFLT 0x81944bc0
103#define REDWOOD_MGCGCGTSSMCTRL_DFLT 0x6e944040
104#define CEDAR_MGCGCGTSSMCTRL_DFLT 0x46944040
105#define CYPRESS_VRC_DFLT 0xC00033
106
107#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
108#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
109#define PCIE_PERF_REQ_PECI_GEN1 2
110#define PCIE_PERF_REQ_PECI_GEN2 3
111#define PCIE_PERF_REQ_PECI_GEN3 4
112
113int cypress_convert_power_level_to_smc(struct radeon_device *rdev,
114 struct rv7xx_pl *pl,
115 RV770_SMC_HW_PERFORMANCE_LEVEL *level,
116 u8 watermark_level);
117int cypress_populate_smc_acpi_state(struct radeon_device *rdev,
118 RV770_SMC_STATETABLE *table);
119int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
120 RV770_SMC_STATETABLE *table);
121int cypress_populate_smc_initial_state(struct radeon_device *rdev,
122 struct radeon_ps *radeon_initial_state,
123 RV770_SMC_STATETABLE *table);
124u32 cypress_calculate_burst_time(struct radeon_device *rdev,
125 u32 engine_clock, u32 memory_clock);
126void cypress_notify_link_speed_change_before_state_change(struct radeon_device *rdev,
127 struct radeon_ps *radeon_new_state,
128 struct radeon_ps *radeon_current_state);
129int cypress_upload_sw_state(struct radeon_device *rdev,
130 struct radeon_ps *radeon_new_state);
131int cypress_upload_mc_reg_table(struct radeon_device *rdev,
132 struct radeon_ps *radeon_new_state);
133void cypress_program_memory_timing_parameters(struct radeon_device *rdev,
134 struct radeon_ps *radeon_new_state);
135void cypress_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
136 struct radeon_ps *radeon_new_state,
137 struct radeon_ps *radeon_current_state);
138int cypress_construct_voltage_tables(struct radeon_device *rdev);
139int cypress_get_mvdd_configuration(struct radeon_device *rdev);
140void cypress_enable_spread_spectrum(struct radeon_device *rdev,
141 bool enable);
142void cypress_enable_display_gap(struct radeon_device *rdev);
143int cypress_get_table_locations(struct radeon_device *rdev);
144int cypress_populate_mc_reg_table(struct radeon_device *rdev,
145 struct radeon_ps *radeon_boot_state);
146void cypress_program_response_times(struct radeon_device *rdev);
147int cypress_notify_smc_display_change(struct radeon_device *rdev,
148 bool has_display);
149void cypress_enable_sclk_control(struct radeon_device *rdev,
150 bool enable);
151void cypress_enable_mclk_control(struct radeon_device *rdev,
152 bool enable);
153void cypress_start_dpm(struct radeon_device *rdev);
154void cypress_advertise_gen2_capability(struct radeon_device *rdev);
155u32 cypress_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf);
156u8 cypress_get_mclk_frequency_ratio(struct radeon_device *rdev,
157 u32 memory_clock, bool strobe_mode);
158u8 cypress_get_strobe_mode_settings(struct radeon_device *rdev, u32 mclk);
159
160#endif
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0f89ce3d02b9..2e1de4fd2975 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -33,9 +33,7 @@
33#include "avivod.h" 33#include "avivod.h"
34#include "evergreen_reg.h" 34#include "evergreen_reg.h"
35#include "evergreen_blit_shaders.h" 35#include "evergreen_blit_shaders.h"
36 36#include "radeon_ucode.h"
37#define EVERGREEN_PFP_UCODE_SIZE 1120
38#define EVERGREEN_PM4_UCODE_SIZE 1376
39 37
40static const u32 crtc_offsets[6] = 38static const u32 crtc_offsets[6] =
41{ 39{
@@ -47,9 +45,98 @@ static const u32 crtc_offsets[6] =
47 EVERGREEN_CRTC5_REGISTER_OFFSET 45 EVERGREEN_CRTC5_REGISTER_OFFSET
48}; 46};
49 47
48#include "clearstate_evergreen.h"
49
50static u32 sumo_rlc_save_restore_register_list[] =
51{
52 0x98fc,
53 0x9830,
54 0x9834,
55 0x9838,
56 0x9870,
57 0x9874,
58 0x8a14,
59 0x8b24,
60 0x8bcc,
61 0x8b10,
62 0x8d00,
63 0x8d04,
64 0x8c00,
65 0x8c04,
66 0x8c08,
67 0x8c0c,
68 0x8d8c,
69 0x8c20,
70 0x8c24,
71 0x8c28,
72 0x8c18,
73 0x8c1c,
74 0x8cf0,
75 0x8e2c,
76 0x8e38,
77 0x8c30,
78 0x9508,
79 0x9688,
80 0x9608,
81 0x960c,
82 0x9610,
83 0x9614,
84 0x88c4,
85 0x88d4,
86 0xa008,
87 0x900c,
88 0x9100,
89 0x913c,
90 0x98f8,
91 0x98f4,
92 0x9b7c,
93 0x3f8c,
94 0x8950,
95 0x8954,
96 0x8a18,
97 0x8b28,
98 0x9144,
99 0x9148,
100 0x914c,
101 0x3f90,
102 0x3f94,
103 0x915c,
104 0x9160,
105 0x9178,
106 0x917c,
107 0x9180,
108 0x918c,
109 0x9190,
110 0x9194,
111 0x9198,
112 0x919c,
113 0x91a8,
114 0x91ac,
115 0x91b0,
116 0x91b4,
117 0x91b8,
118 0x91c4,
119 0x91c8,
120 0x91cc,
121 0x91d0,
122 0x91d4,
123 0x91e0,
124 0x91e4,
125 0x91ec,
126 0x91f0,
127 0x91f4,
128 0x9200,
129 0x9204,
130 0x929c,
131 0x9150,
132 0x802c,
133};
134static u32 sumo_rlc_save_restore_register_list_size = ARRAY_SIZE(sumo_rlc_save_restore_register_list);
135
50static void evergreen_gpu_init(struct radeon_device *rdev); 136static void evergreen_gpu_init(struct radeon_device *rdev);
51void evergreen_fini(struct radeon_device *rdev); 137void evergreen_fini(struct radeon_device *rdev);
52void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 138void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
139void evergreen_program_aspm(struct radeon_device *rdev);
53extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, 140extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
54 int ring, u32 cp_int_cntl); 141 int ring, u32 cp_int_cntl);
55 142
@@ -2036,7 +2123,8 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
2036 u32 lb_size, u32 num_heads) 2123 u32 lb_size, u32 num_heads)
2037{ 2124{
2038 struct drm_display_mode *mode = &radeon_crtc->base.mode; 2125 struct drm_display_mode *mode = &radeon_crtc->base.mode;
2039 struct evergreen_wm_params wm; 2126 struct evergreen_wm_params wm_low, wm_high;
2127 u32 dram_channels;
2040 u32 pixel_period; 2128 u32 pixel_period;
2041 u32 line_time = 0; 2129 u32 line_time = 0;
2042 u32 latency_watermark_a = 0, latency_watermark_b = 0; 2130 u32 latency_watermark_a = 0, latency_watermark_b = 0;
@@ -2052,39 +2140,81 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
2052 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 2140 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
2053 priority_a_cnt = 0; 2141 priority_a_cnt = 0;
2054 priority_b_cnt = 0; 2142 priority_b_cnt = 0;
2143 dram_channels = evergreen_get_number_of_dram_channels(rdev);
2144
2145 /* watermark for high clocks */
2146 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2147 wm_high.yclk =
2148 radeon_dpm_get_mclk(rdev, false) * 10;
2149 wm_high.sclk =
2150 radeon_dpm_get_sclk(rdev, false) * 10;
2151 } else {
2152 wm_high.yclk = rdev->pm.current_mclk * 10;
2153 wm_high.sclk = rdev->pm.current_sclk * 10;
2154 }
2055 2155
2056 wm.yclk = rdev->pm.current_mclk * 10; 2156 wm_high.disp_clk = mode->clock;
2057 wm.sclk = rdev->pm.current_sclk * 10; 2157 wm_high.src_width = mode->crtc_hdisplay;
2058 wm.disp_clk = mode->clock; 2158 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
2059 wm.src_width = mode->crtc_hdisplay; 2159 wm_high.blank_time = line_time - wm_high.active_time;
2060 wm.active_time = mode->crtc_hdisplay * pixel_period; 2160 wm_high.interlaced = false;
2061 wm.blank_time = line_time - wm.active_time;
2062 wm.interlaced = false;
2063 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 2161 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2064 wm.interlaced = true; 2162 wm_high.interlaced = true;
2065 wm.vsc = radeon_crtc->vsc; 2163 wm_high.vsc = radeon_crtc->vsc;
2066 wm.vtaps = 1; 2164 wm_high.vtaps = 1;
2067 if (radeon_crtc->rmx_type != RMX_OFF) 2165 if (radeon_crtc->rmx_type != RMX_OFF)
2068 wm.vtaps = 2; 2166 wm_high.vtaps = 2;
2069 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ 2167 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
2070 wm.lb_size = lb_size; 2168 wm_high.lb_size = lb_size;
2071 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); 2169 wm_high.dram_channels = dram_channels;
2072 wm.num_heads = num_heads; 2170 wm_high.num_heads = num_heads;
2171
2172 /* watermark for low clocks */
2173 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2174 wm_low.yclk =
2175 radeon_dpm_get_mclk(rdev, true) * 10;
2176 wm_low.sclk =
2177 radeon_dpm_get_sclk(rdev, true) * 10;
2178 } else {
2179 wm_low.yclk = rdev->pm.current_mclk * 10;
2180 wm_low.sclk = rdev->pm.current_sclk * 10;
2181 }
2182
2183 wm_low.disp_clk = mode->clock;
2184 wm_low.src_width = mode->crtc_hdisplay;
2185 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
2186 wm_low.blank_time = line_time - wm_low.active_time;
2187 wm_low.interlaced = false;
2188 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2189 wm_low.interlaced = true;
2190 wm_low.vsc = radeon_crtc->vsc;
2191 wm_low.vtaps = 1;
2192 if (radeon_crtc->rmx_type != RMX_OFF)
2193 wm_low.vtaps = 2;
2194 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
2195 wm_low.lb_size = lb_size;
2196 wm_low.dram_channels = dram_channels;
2197 wm_low.num_heads = num_heads;
2073 2198
2074 /* set for high clocks */ 2199 /* set for high clocks */
2075 latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535); 2200 latency_watermark_a = min(evergreen_latency_watermark(&wm_high), (u32)65535);
2076 /* set for low clocks */ 2201 /* set for low clocks */
2077 /* wm.yclk = low clk; wm.sclk = low clk */ 2202 latency_watermark_b = min(evergreen_latency_watermark(&wm_low), (u32)65535);
2078 latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
2079 2203
2080 /* possibly force display priority to high */ 2204 /* possibly force display priority to high */
2081 /* should really do this at mode validation time... */ 2205 /* should really do this at mode validation time... */
2082 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || 2206 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
2083 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || 2207 !evergreen_average_bandwidth_vs_available_bandwidth(&wm_high) ||
2084 !evergreen_check_latency_hiding(&wm) || 2208 !evergreen_check_latency_hiding(&wm_high) ||
2085 (rdev->disp_priority == 2)) { 2209 (rdev->disp_priority == 2)) {
2086 DRM_DEBUG_KMS("force priority to high\n"); 2210 DRM_DEBUG_KMS("force priority a to high\n");
2087 priority_a_cnt |= PRIORITY_ALWAYS_ON; 2211 priority_a_cnt |= PRIORITY_ALWAYS_ON;
2212 }
2213 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
2214 !evergreen_average_bandwidth_vs_available_bandwidth(&wm_low) ||
2215 !evergreen_check_latency_hiding(&wm_low) ||
2216 (rdev->disp_priority == 2)) {
2217 DRM_DEBUG_KMS("force priority b to high\n");
2088 priority_b_cnt |= PRIORITY_ALWAYS_ON; 2218 priority_b_cnt |= PRIORITY_ALWAYS_ON;
2089 } 2219 }
2090 2220
@@ -2137,6 +2267,10 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
2137 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); 2267 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
2138 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); 2268 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
2139 2269
2270 /* save values for DPM */
2271 radeon_crtc->line_time = line_time;
2272 radeon_crtc->wm_high = latency_watermark_a;
2273 radeon_crtc->wm_low = latency_watermark_b;
2140} 2274}
2141 2275
2142/** 2276/**
@@ -3120,10 +3254,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
3120 u32 efuse_straps_4; 3254 u32 efuse_straps_4;
3121 u32 efuse_straps_3; 3255 u32 efuse_straps_3;
3122 3256
3123 WREG32(RCU_IND_INDEX, 0x204); 3257 efuse_straps_4 = RREG32_RCU(0x204);
3124 efuse_straps_4 = RREG32(RCU_IND_DATA); 3258 efuse_straps_3 = RREG32_RCU(0x203);
3125 WREG32(RCU_IND_INDEX, 0x203);
3126 efuse_straps_3 = RREG32(RCU_IND_DATA);
3127 tmp = (((efuse_straps_4 & 0xf) << 4) | 3259 tmp = (((efuse_straps_4 & 0xf) << 4) |
3128 ((efuse_straps_3 & 0xf0000000) >> 28)); 3260 ((efuse_straps_3 & 0xf0000000) >> 28));
3129 } else { 3261 } else {
@@ -3727,6 +3859,262 @@ bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
3727 return radeon_ring_test_lockup(rdev, ring); 3859 return radeon_ring_test_lockup(rdev, ring);
3728} 3860}
3729 3861
3862/*
3863 * RLC
3864 */
3865#define RLC_SAVE_RESTORE_LIST_END_MARKER 0x00000000
3866#define RLC_CLEAR_STATE_END_MARKER 0x00000001
3867
3868void sumo_rlc_fini(struct radeon_device *rdev)
3869{
3870 int r;
3871
3872 /* save restore block */
3873 if (rdev->rlc.save_restore_obj) {
3874 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
3875 if (unlikely(r != 0))
3876 dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
3877 radeon_bo_unpin(rdev->rlc.save_restore_obj);
3878 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
3879
3880 radeon_bo_unref(&rdev->rlc.save_restore_obj);
3881 rdev->rlc.save_restore_obj = NULL;
3882 }
3883
3884 /* clear state block */
3885 if (rdev->rlc.clear_state_obj) {
3886 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
3887 if (unlikely(r != 0))
3888 dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
3889 radeon_bo_unpin(rdev->rlc.clear_state_obj);
3890 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
3891
3892 radeon_bo_unref(&rdev->rlc.clear_state_obj);
3893 rdev->rlc.clear_state_obj = NULL;
3894 }
3895}
3896
3897int sumo_rlc_init(struct radeon_device *rdev)
3898{
3899 u32 *src_ptr;
3900 volatile u32 *dst_ptr;
3901 u32 dws, data, i, j, k, reg_num;
3902 u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index;
3903 u64 reg_list_mc_addr;
3904 struct cs_section_def *cs_data;
3905 int r;
3906
3907 src_ptr = rdev->rlc.reg_list;
3908 dws = rdev->rlc.reg_list_size;
3909 cs_data = rdev->rlc.cs_data;
3910
3911 /* save restore block */
3912 if (rdev->rlc.save_restore_obj == NULL) {
3913 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
3914 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
3915 if (r) {
3916 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
3917 return r;
3918 }
3919 }
3920
3921 r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
3922 if (unlikely(r != 0)) {
3923 sumo_rlc_fini(rdev);
3924 return r;
3925 }
3926 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
3927 &rdev->rlc.save_restore_gpu_addr);
3928 if (r) {
3929 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
3930 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
3931 sumo_rlc_fini(rdev);
3932 return r;
3933 }
3934 r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
3935 if (r) {
3936 dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
3937 sumo_rlc_fini(rdev);
3938 return r;
3939 }
3940 /* write the sr buffer */
3941 dst_ptr = rdev->rlc.sr_ptr;
3942 /* format:
3943 * dw0: (reg2 << 16) | reg1
3944 * dw1: reg1 save space
3945 * dw2: reg2 save space
3946 */
3947 for (i = 0; i < dws; i++) {
3948 data = src_ptr[i] >> 2;
3949 i++;
3950 if (i < dws)
3951 data |= (src_ptr[i] >> 2) << 16;
3952 j = (((i - 1) * 3) / 2);
3953 dst_ptr[j] = data;
3954 }
3955 j = ((i * 3) / 2);
3956 dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
3957
3958 radeon_bo_kunmap(rdev->rlc.save_restore_obj);
3959 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
3960
3961 /* clear state block */
3962 reg_list_num = 0;
3963 dws = 0;
3964 for (i = 0; cs_data[i].section != NULL; i++) {
3965 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
3966 reg_list_num++;
3967 dws += cs_data[i].section[j].reg_count;
3968 }
3969 }
3970 reg_list_blk_index = (3 * reg_list_num + 2);
3971 dws += reg_list_blk_index;
3972
3973 if (rdev->rlc.clear_state_obj == NULL) {
3974 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
3975 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
3976 if (r) {
3977 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
3978 sumo_rlc_fini(rdev);
3979 return r;
3980 }
3981 }
3982 r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
3983 if (unlikely(r != 0)) {
3984 sumo_rlc_fini(rdev);
3985 return r;
3986 }
3987 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
3988 &rdev->rlc.clear_state_gpu_addr);
3989 if (r) {
3990
3991 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
3992 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
3993 sumo_rlc_fini(rdev);
3994 return r;
3995 }
3996 r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
3997 if (r) {
3998 dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
3999 sumo_rlc_fini(rdev);
4000 return r;
4001 }
4002 /* set up the cs buffer */
4003 dst_ptr = rdev->rlc.cs_ptr;
4004 reg_list_hdr_blk_index = 0;
4005 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
4006 data = upper_32_bits(reg_list_mc_addr);
4007 dst_ptr[reg_list_hdr_blk_index] = data;
4008 reg_list_hdr_blk_index++;
4009 for (i = 0; cs_data[i].section != NULL; i++) {
4010 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
4011 reg_num = cs_data[i].section[j].reg_count;
4012 data = reg_list_mc_addr & 0xffffffff;
4013 dst_ptr[reg_list_hdr_blk_index] = data;
4014 reg_list_hdr_blk_index++;
4015
4016 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
4017 dst_ptr[reg_list_hdr_blk_index] = data;
4018 reg_list_hdr_blk_index++;
4019
4020 data = 0x08000000 | (reg_num * 4);
4021 dst_ptr[reg_list_hdr_blk_index] = data;
4022 reg_list_hdr_blk_index++;
4023
4024 for (k = 0; k < reg_num; k++) {
4025 data = cs_data[i].section[j].extent[k];
4026 dst_ptr[reg_list_blk_index + k] = data;
4027 }
4028 reg_list_mc_addr += reg_num * 4;
4029 reg_list_blk_index += reg_num;
4030 }
4031 }
4032 dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
4033
4034 radeon_bo_kunmap(rdev->rlc.clear_state_obj);
4035 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4036
4037 return 0;
4038}
4039
4040static void evergreen_rlc_start(struct radeon_device *rdev)
4041{
4042 u32 mask = RLC_ENABLE;
4043
4044 if (rdev->flags & RADEON_IS_IGP) {
4045 mask |= GFX_POWER_GATING_ENABLE | GFX_POWER_GATING_SRC;
4046 }
4047
4048 WREG32(RLC_CNTL, mask);
4049}
4050
4051int evergreen_rlc_resume(struct radeon_device *rdev)
4052{
4053 u32 i;
4054 const __be32 *fw_data;
4055
4056 if (!rdev->rlc_fw)
4057 return -EINVAL;
4058
4059 r600_rlc_stop(rdev);
4060
4061 WREG32(RLC_HB_CNTL, 0);
4062
4063 if (rdev->flags & RADEON_IS_IGP) {
4064 if (rdev->family == CHIP_ARUBA) {
4065 u32 always_on_bitmap =
4066 3 | (3 << (16 * rdev->config.cayman.max_shader_engines));
4067 /* find out the number of active simds */
4068 u32 tmp = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
4069 tmp |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
4070 tmp = hweight32(~tmp);
4071 if (tmp == rdev->config.cayman.max_simds_per_se) {
4072 WREG32(TN_RLC_LB_ALWAYS_ACTIVE_SIMD_MASK, always_on_bitmap);
4073 WREG32(TN_RLC_LB_PARAMS, 0x00601004);
4074 WREG32(TN_RLC_LB_INIT_SIMD_MASK, 0xffffffff);
4075 WREG32(TN_RLC_LB_CNTR_INIT, 0x00000000);
4076 WREG32(TN_RLC_LB_CNTR_MAX, 0x00002000);
4077 }
4078 } else {
4079 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
4080 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
4081 }
4082 WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
4083 WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
4084 } else {
4085 WREG32(RLC_HB_BASE, 0);
4086 WREG32(RLC_HB_RPTR, 0);
4087 WREG32(RLC_HB_WPTR, 0);
4088 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
4089 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
4090 }
4091 WREG32(RLC_MC_CNTL, 0);
4092 WREG32(RLC_UCODE_CNTL, 0);
4093
4094 fw_data = (const __be32 *)rdev->rlc_fw->data;
4095 if (rdev->family >= CHIP_ARUBA) {
4096 for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
4097 WREG32(RLC_UCODE_ADDR, i);
4098 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
4099 }
4100 } else if (rdev->family >= CHIP_CAYMAN) {
4101 for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
4102 WREG32(RLC_UCODE_ADDR, i);
4103 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
4104 }
4105 } else {
4106 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
4107 WREG32(RLC_UCODE_ADDR, i);
4108 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
4109 }
4110 }
4111 WREG32(RLC_UCODE_ADDR, 0);
4112
4113 evergreen_rlc_start(rdev);
4114
4115 return 0;
4116}
4117
3730/* Interrupts */ 4118/* Interrupts */
3731 4119
3732u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc) 4120u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
@@ -3805,6 +4193,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
3805 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 4193 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
3806 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; 4194 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
3807 u32 dma_cntl, dma_cntl1 = 0; 4195 u32 dma_cntl, dma_cntl1 = 0;
4196 u32 thermal_int = 0;
3808 4197
3809 if (!rdev->irq.installed) { 4198 if (!rdev->irq.installed) {
3810 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 4199 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3824,6 +4213,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
3824 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; 4213 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3825 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 4214 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3826 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 4215 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
4216 if (rdev->family == CHIP_ARUBA)
4217 thermal_int = RREG32(TN_CG_THERMAL_INT_CTRL) &
4218 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
4219 else
4220 thermal_int = RREG32(CG_THERMAL_INT) &
4221 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3827 4222
3828 afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 4223 afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3829 afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 4224 afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
@@ -3869,6 +4264,11 @@ int evergreen_irq_set(struct radeon_device *rdev)
3869 } 4264 }
3870 } 4265 }
3871 4266
4267 if (rdev->irq.dpm_thermal) {
4268 DRM_DEBUG("dpm thermal\n");
4269 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
4270 }
4271
3872 if (rdev->irq.crtc_vblank_int[0] || 4272 if (rdev->irq.crtc_vblank_int[0] ||
3873 atomic_read(&rdev->irq.pflip[0])) { 4273 atomic_read(&rdev->irq.pflip[0])) {
3874 DRM_DEBUG("evergreen_irq_set: vblank 0\n"); 4274 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -3990,6 +4390,10 @@ int evergreen_irq_set(struct radeon_device *rdev)
3990 WREG32(DC_HPD4_INT_CONTROL, hpd4); 4390 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3991 WREG32(DC_HPD5_INT_CONTROL, hpd5); 4391 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3992 WREG32(DC_HPD6_INT_CONTROL, hpd6); 4392 WREG32(DC_HPD6_INT_CONTROL, hpd6);
4393 if (rdev->family == CHIP_ARUBA)
4394 WREG32(TN_CG_THERMAL_INT_CTRL, thermal_int);
4395 else
4396 WREG32(CG_THERMAL_INT, thermal_int);
3993 4397
3994 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1); 4398 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
3995 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2); 4399 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
@@ -4181,6 +4585,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
4181 u32 ring_index; 4585 u32 ring_index;
4182 bool queue_hotplug = false; 4586 bool queue_hotplug = false;
4183 bool queue_hdmi = false; 4587 bool queue_hdmi = false;
4588 bool queue_thermal = false;
4184 4589
4185 if (!rdev->ih.enabled || rdev->shutdown) 4590 if (!rdev->ih.enabled || rdev->shutdown)
4186 return IRQ_NONE; 4591 return IRQ_NONE;
@@ -4502,6 +4907,16 @@ restart_ih:
4502 DRM_DEBUG("IH: DMA trap\n"); 4907 DRM_DEBUG("IH: DMA trap\n");
4503 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); 4908 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4504 break; 4909 break;
4910 case 230: /* thermal low to high */
4911 DRM_DEBUG("IH: thermal low to high\n");
4912 rdev->pm.dpm.thermal.high_to_low = false;
4913 queue_thermal = true;
4914 break;
4915 case 231: /* thermal high to low */
4916 DRM_DEBUG("IH: thermal high to low\n");
4917 rdev->pm.dpm.thermal.high_to_low = true;
4918 queue_thermal = true;
4919 break;
4505 case 233: /* GUI IDLE */ 4920 case 233: /* GUI IDLE */
4506 DRM_DEBUG("IH: GUI idle\n"); 4921 DRM_DEBUG("IH: GUI idle\n");
4507 break; 4922 break;
@@ -4524,6 +4939,8 @@ restart_ih:
4524 schedule_work(&rdev->hotplug_work); 4939 schedule_work(&rdev->hotplug_work);
4525 if (queue_hdmi) 4940 if (queue_hdmi)
4526 schedule_work(&rdev->audio_work); 4941 schedule_work(&rdev->audio_work);
4942 if (queue_thermal && rdev->pm.dpm_enabled)
4943 schedule_work(&rdev->pm.dpm.thermal.work);
4527 rdev->ih.rptr = rptr; 4944 rdev->ih.rptr = rptr;
4528 WREG32(IH_RB_RPTR, rdev->ih.rptr); 4945 WREG32(IH_RB_RPTR, rdev->ih.rptr);
4529 atomic_set(&rdev->ih.lock, 0); 4946 atomic_set(&rdev->ih.lock, 0);
@@ -4680,6 +5097,8 @@ static int evergreen_startup(struct radeon_device *rdev)
4680 5097
4681 /* enable pcie gen2 link */ 5098 /* enable pcie gen2 link */
4682 evergreen_pcie_gen2_enable(rdev); 5099 evergreen_pcie_gen2_enable(rdev);
5100 /* enable aspm */
5101 evergreen_program_aspm(rdev);
4683 5102
4684 if (ASIC_IS_DCE5(rdev)) { 5103 if (ASIC_IS_DCE5(rdev)) {
4685 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 5104 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
@@ -4725,6 +5144,18 @@ static int evergreen_startup(struct radeon_device *rdev)
4725 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 5144 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
4726 } 5145 }
4727 5146
5147 /* allocate rlc buffers */
5148 if (rdev->flags & RADEON_IS_IGP) {
5149 rdev->rlc.reg_list = sumo_rlc_save_restore_register_list;
5150 rdev->rlc.reg_list_size = sumo_rlc_save_restore_register_list_size;
5151 rdev->rlc.cs_data = evergreen_cs_data;
5152 r = sumo_rlc_init(rdev);
5153 if (r) {
5154 DRM_ERROR("Failed to init rlc BOs!\n");
5155 return r;
5156 }
5157 }
5158
4728 /* allocate wb buffer */ 5159 /* allocate wb buffer */
4729 r = radeon_wb_init(rdev); 5160 r = radeon_wb_init(rdev);
4730 if (r) 5161 if (r)
@@ -4956,6 +5387,8 @@ int evergreen_init(struct radeon_device *rdev)
4956 r700_cp_fini(rdev); 5387 r700_cp_fini(rdev);
4957 r600_dma_fini(rdev); 5388 r600_dma_fini(rdev);
4958 r600_irq_fini(rdev); 5389 r600_irq_fini(rdev);
5390 if (rdev->flags & RADEON_IS_IGP)
5391 sumo_rlc_fini(rdev);
4959 radeon_wb_fini(rdev); 5392 radeon_wb_fini(rdev);
4960 radeon_ib_pool_fini(rdev); 5393 radeon_ib_pool_fini(rdev);
4961 radeon_irq_kms_fini(rdev); 5394 radeon_irq_kms_fini(rdev);
@@ -4984,6 +5417,8 @@ void evergreen_fini(struct radeon_device *rdev)
4984 r700_cp_fini(rdev); 5417 r700_cp_fini(rdev);
4985 r600_dma_fini(rdev); 5418 r600_dma_fini(rdev);
4986 r600_irq_fini(rdev); 5419 r600_irq_fini(rdev);
5420 if (rdev->flags & RADEON_IS_IGP)
5421 sumo_rlc_fini(rdev);
4987 radeon_wb_fini(rdev); 5422 radeon_wb_fini(rdev);
4988 radeon_ib_pool_fini(rdev); 5423 radeon_ib_pool_fini(rdev);
4989 radeon_irq_kms_fini(rdev); 5424 radeon_irq_kms_fini(rdev);
@@ -5061,3 +5496,150 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
5061 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 5496 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
5062 } 5497 }
5063} 5498}
5499
5500void evergreen_program_aspm(struct radeon_device *rdev)
5501{
5502 u32 data, orig;
5503 u32 pcie_lc_cntl, pcie_lc_cntl_old;
5504 bool disable_l0s, disable_l1 = false, disable_plloff_in_l1 = false;
5505 /* fusion_platform = true
5506 * if the system is a fusion system
5507 * (APU or DGPU in a fusion system).
5508 * todo: check if the system is a fusion platform.
5509 */
5510 bool fusion_platform = false;
5511
5512 if (!(rdev->flags & RADEON_IS_PCIE))
5513 return;
5514
5515 switch (rdev->family) {
5516 case CHIP_CYPRESS:
5517 case CHIP_HEMLOCK:
5518 case CHIP_JUNIPER:
5519 case CHIP_REDWOOD:
5520 case CHIP_CEDAR:
5521 case CHIP_SUMO:
5522 case CHIP_SUMO2:
5523 case CHIP_PALM:
5524 case CHIP_ARUBA:
5525 disable_l0s = true;
5526 break;
5527 default:
5528 disable_l0s = false;
5529 break;
5530 }
5531
5532 if (rdev->flags & RADEON_IS_IGP)
5533 fusion_platform = true; /* XXX also dGPUs in a fusion system */
5534
5535 data = orig = RREG32_PIF_PHY0(PB0_PIF_PAIRING);
5536 if (fusion_platform)
5537 data &= ~MULTI_PIF;
5538 else
5539 data |= MULTI_PIF;
5540 if (data != orig)
5541 WREG32_PIF_PHY0(PB0_PIF_PAIRING, data);
5542
5543 data = orig = RREG32_PIF_PHY1(PB1_PIF_PAIRING);
5544 if (fusion_platform)
5545 data &= ~MULTI_PIF;
5546 else
5547 data |= MULTI_PIF;
5548 if (data != orig)
5549 WREG32_PIF_PHY1(PB1_PIF_PAIRING, data);
5550
5551 pcie_lc_cntl = pcie_lc_cntl_old = RREG32_PCIE_PORT(PCIE_LC_CNTL);
5552 pcie_lc_cntl &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
5553 if (!disable_l0s) {
5554 if (rdev->family >= CHIP_BARTS)
5555 pcie_lc_cntl |= LC_L0S_INACTIVITY(7);
5556 else
5557 pcie_lc_cntl |= LC_L0S_INACTIVITY(3);
5558 }
5559
5560 if (!disable_l1) {
5561 if (rdev->family >= CHIP_BARTS)
5562 pcie_lc_cntl |= LC_L1_INACTIVITY(7);
5563 else
5564 pcie_lc_cntl |= LC_L1_INACTIVITY(8);
5565
5566 if (!disable_plloff_in_l1) {
5567 data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
5568 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
5569 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
5570 if (data != orig)
5571 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
5572
5573 data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
5574 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
5575 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
5576 if (data != orig)
5577 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
5578
5579 data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
5580 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
5581 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
5582 if (data != orig)
5583 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
5584
5585 data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
5586 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
5587 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
5588 if (data != orig)
5589 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
5590
5591 if (rdev->family >= CHIP_BARTS) {
5592 data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
5593 data &= ~PLL_RAMP_UP_TIME_0_MASK;
5594 data |= PLL_RAMP_UP_TIME_0(4);
5595 if (data != orig)
5596 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
5597
5598 data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
5599 data &= ~PLL_RAMP_UP_TIME_1_MASK;
5600 data |= PLL_RAMP_UP_TIME_1(4);
5601 if (data != orig)
5602 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
5603
5604 data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
5605 data &= ~PLL_RAMP_UP_TIME_0_MASK;
5606 data |= PLL_RAMP_UP_TIME_0(4);
5607 if (data != orig)
5608 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
5609
5610 data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
5611 data &= ~PLL_RAMP_UP_TIME_1_MASK;
5612 data |= PLL_RAMP_UP_TIME_1(4);
5613 if (data != orig)
5614 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
5615 }
5616
5617 data = orig = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
5618 data &= ~LC_DYN_LANES_PWR_STATE_MASK;
5619 data |= LC_DYN_LANES_PWR_STATE(3);
5620 if (data != orig)
5621 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
5622
5623 if (rdev->family >= CHIP_BARTS) {
5624 data = orig = RREG32_PIF_PHY0(PB0_PIF_CNTL);
5625 data &= ~LS2_EXIT_TIME_MASK;
5626 data |= LS2_EXIT_TIME(1);
5627 if (data != orig)
5628 WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
5629
5630 data = orig = RREG32_PIF_PHY1(PB1_PIF_CNTL);
5631 data &= ~LS2_EXIT_TIME_MASK;
5632 data |= LS2_EXIT_TIME(1);
5633 if (data != orig)
5634 WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
5635 }
5636 }
5637 }
5638
5639 /* evergreen parts only */
5640 if (rdev->family < CHIP_BARTS)
5641 pcie_lc_cntl |= LC_PMI_TO_L1_DIS;
5642
5643 if (pcie_lc_cntl != pcie_lc_cntl_old)
5644 WREG32_PCIE_PORT(PCIE_LC_CNTL, pcie_lc_cntl);
5645}
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index ed7c8a768092..b9c6f7675e59 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -128,14 +128,7 @@ static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
128 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 128 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
129 uint32_t offset = dig->afmt->offset; 129 uint32_t offset = dig->afmt->offset;
130 uint8_t *frame = buffer + 3; 130 uint8_t *frame = buffer + 3;
131 131 uint8_t *header = buffer;
132 /* Our header values (type, version, length) should be alright, Intel
133 * is using the same. Checksum function also seems to be OK, it works
134 * fine for audio infoframe. However calculated value is always lower
135 * by 2 in comparison to fglrx. It breaks displaying anything in case
136 * of TVs that strictly check the checksum. Hack it manually here to
137 * workaround this issue. */
138 frame[0x0] += 2;
139 132
140 WREG32(AFMT_AVI_INFO0 + offset, 133 WREG32(AFMT_AVI_INFO0 + offset,
141 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 134 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -144,7 +137,7 @@ static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
144 WREG32(AFMT_AVI_INFO2 + offset, 137 WREG32(AFMT_AVI_INFO2 + offset,
145 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); 138 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
146 WREG32(AFMT_AVI_INFO3 + offset, 139 WREG32(AFMT_AVI_INFO3 + offset,
147 frame[0xC] | (frame[0xD] << 8)); 140 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
148} 141}
149 142
150static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock) 143static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 881aba23c477..8a4e641f0e3c 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -24,7 +24,16 @@
24#ifndef __EVERGREEN_REG_H__ 24#ifndef __EVERGREEN_REG_H__
25#define __EVERGREEN_REG_H__ 25#define __EVERGREEN_REG_H__
26 26
27/* trinity */
28#define TN_SMC_IND_INDEX_0 0x200
29#define TN_SMC_IND_DATA_0 0x204
30
27/* evergreen */ 31/* evergreen */
32#define EVERGREEN_PIF_PHY0_INDEX 0x8
33#define EVERGREEN_PIF_PHY0_DATA 0xc
34#define EVERGREEN_PIF_PHY1_INDEX 0x10
35#define EVERGREEN_PIF_PHY1_DATA 0x14
36
28#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310 37#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
29#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324 38#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
30#define EVERGREEN_D3VGA_CONTROL 0x3e0 39#define EVERGREEN_D3VGA_CONTROL 0x3e0
@@ -40,6 +49,9 @@
40#define EVERGREEN_AUDIO_PLL1_DIV 0x5b4 49#define EVERGREEN_AUDIO_PLL1_DIV 0x5b4
41#define EVERGREEN_AUDIO_PLL1_UNK 0x5bc 50#define EVERGREEN_AUDIO_PLL1_UNK 0x5bc
42 51
52#define EVERGREEN_CG_IND_ADDR 0x8f8
53#define EVERGREEN_CG_IND_DATA 0x8fc
54
43#define EVERGREEN_AUDIO_ENABLE 0x5e78 55#define EVERGREEN_AUDIO_ENABLE 0x5e78
44#define EVERGREEN_AUDIO_VENDOR_ID 0x5ec0 56#define EVERGREEN_AUDIO_VENDOR_ID 0x5ec0
45 57
diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
new file mode 100644
index 000000000000..76ada8cfe902
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_smc.h
@@ -0,0 +1,67 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __EVERGREEN_SMC_H__
24#define __EVERGREEN_SMC_H__
25
26#include "rv770_smc.h"
27
28#pragma pack(push, 1)
29
30#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
31
32struct SMC_Evergreen_MCRegisterAddress
33{
34 uint16_t s0;
35 uint16_t s1;
36};
37
38typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
39
40
41struct SMC_Evergreen_MCRegisterSet
42{
43 uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
44};
45
46typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
47
48struct SMC_Evergreen_MCRegisters
49{
50 uint8_t last;
51 uint8_t reserved[3];
52 SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
53 SMC_Evergreen_MCRegisterSet data[5];
54};
55
56typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
57
58#define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
59
60#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0
61#define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC
62#define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
63
64
65#pragma pack(pop)
66
67#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 75c05631146d..a7baf67aef6c 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -48,6 +48,293 @@
48#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002 48#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002
49#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002 49#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002
50 50
51/* pm registers */
52#define SMC_MSG 0x20c
53#define HOST_SMC_MSG(x) ((x) << 0)
54#define HOST_SMC_MSG_MASK (0xff << 0)
55#define HOST_SMC_MSG_SHIFT 0
56#define HOST_SMC_RESP(x) ((x) << 8)
57#define HOST_SMC_RESP_MASK (0xff << 8)
58#define HOST_SMC_RESP_SHIFT 8
59#define SMC_HOST_MSG(x) ((x) << 16)
60#define SMC_HOST_MSG_MASK (0xff << 16)
61#define SMC_HOST_MSG_SHIFT 16
62#define SMC_HOST_RESP(x) ((x) << 24)
63#define SMC_HOST_RESP_MASK (0xff << 24)
64#define SMC_HOST_RESP_SHIFT 24
65
66#define DCCG_DISP_SLOW_SELECT_REG 0x4fc
67#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
68#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
69#define DCCG_DISP1_SLOW_SELECT_SHIFT 0
70#define DCCG_DISP2_SLOW_SELECT(x) ((x) << 4)
71#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
72#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
73
74#define CG_SPLL_FUNC_CNTL 0x600
75#define SPLL_RESET (1 << 0)
76#define SPLL_SLEEP (1 << 1)
77#define SPLL_BYPASS_EN (1 << 3)
78#define SPLL_REF_DIV(x) ((x) << 4)
79#define SPLL_REF_DIV_MASK (0x3f << 4)
80#define SPLL_PDIV_A(x) ((x) << 20)
81#define SPLL_PDIV_A_MASK (0x7f << 20)
82#define CG_SPLL_FUNC_CNTL_2 0x604
83#define SCLK_MUX_SEL(x) ((x) << 0)
84#define SCLK_MUX_SEL_MASK (0x1ff << 0)
85#define CG_SPLL_FUNC_CNTL_3 0x608
86#define SPLL_FB_DIV(x) ((x) << 0)
87#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
88#define SPLL_DITHEN (1 << 28)
89
90#define MPLL_CNTL_MODE 0x61c
91# define SS_SSEN (1 << 24)
92# define SS_DSMODE_EN (1 << 25)
93
94#define MPLL_AD_FUNC_CNTL 0x624
95#define CLKF(x) ((x) << 0)
96#define CLKF_MASK (0x7f << 0)
97#define CLKR(x) ((x) << 7)
98#define CLKR_MASK (0x1f << 7)
99#define CLKFRAC(x) ((x) << 12)
100#define CLKFRAC_MASK (0x1f << 12)
101#define YCLK_POST_DIV(x) ((x) << 17)
102#define YCLK_POST_DIV_MASK (3 << 17)
103#define IBIAS(x) ((x) << 20)
104#define IBIAS_MASK (0x3ff << 20)
105#define RESET (1 << 30)
106#define PDNB (1 << 31)
107#define MPLL_AD_FUNC_CNTL_2 0x628
108#define BYPASS (1 << 19)
109#define BIAS_GEN_PDNB (1 << 24)
110#define RESET_EN (1 << 25)
111#define VCO_MODE (1 << 29)
112#define MPLL_DQ_FUNC_CNTL 0x62c
113#define MPLL_DQ_FUNC_CNTL_2 0x630
114
115#define GENERAL_PWRMGT 0x63c
116# define GLOBAL_PWRMGT_EN (1 << 0)
117# define STATIC_PM_EN (1 << 1)
118# define THERMAL_PROTECTION_DIS (1 << 2)
119# define THERMAL_PROTECTION_TYPE (1 << 3)
120# define ENABLE_GEN2PCIE (1 << 4)
121# define ENABLE_GEN2XSP (1 << 5)
122# define SW_SMIO_INDEX(x) ((x) << 6)
123# define SW_SMIO_INDEX_MASK (3 << 6)
124# define SW_SMIO_INDEX_SHIFT 6
125# define LOW_VOLT_D2_ACPI (1 << 8)
126# define LOW_VOLT_D3_ACPI (1 << 9)
127# define VOLT_PWRMGT_EN (1 << 10)
128# define BACKBIAS_PAD_EN (1 << 18)
129# define BACKBIAS_VALUE (1 << 19)
130# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
131# define AC_DC_SW (1 << 24)
132
133#define SCLK_PWRMGT_CNTL 0x644
134# define SCLK_PWRMGT_OFF (1 << 0)
135# define SCLK_LOW_D1 (1 << 1)
136# define FIR_RESET (1 << 4)
137# define FIR_FORCE_TREND_SEL (1 << 5)
138# define FIR_TREND_MODE (1 << 6)
139# define DYN_GFX_CLK_OFF_EN (1 << 7)
140# define GFX_CLK_FORCE_ON (1 << 8)
141# define GFX_CLK_REQUEST_OFF (1 << 9)
142# define GFX_CLK_FORCE_OFF (1 << 10)
143# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
144# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
145# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
146# define DYN_LIGHT_SLEEP_EN (1 << 14)
147#define MCLK_PWRMGT_CNTL 0x648
148# define DLL_SPEED(x) ((x) << 0)
149# define DLL_SPEED_MASK (0x1f << 0)
150# define MPLL_PWRMGT_OFF (1 << 5)
151# define DLL_READY (1 << 6)
152# define MC_INT_CNTL (1 << 7)
153# define MRDCKA0_PDNB (1 << 8)
154# define MRDCKA1_PDNB (1 << 9)
155# define MRDCKB0_PDNB (1 << 10)
156# define MRDCKB1_PDNB (1 << 11)
157# define MRDCKC0_PDNB (1 << 12)
158# define MRDCKC1_PDNB (1 << 13)
159# define MRDCKD0_PDNB (1 << 14)
160# define MRDCKD1_PDNB (1 << 15)
161# define MRDCKA0_RESET (1 << 16)
162# define MRDCKA1_RESET (1 << 17)
163# define MRDCKB0_RESET (1 << 18)
164# define MRDCKB1_RESET (1 << 19)
165# define MRDCKC0_RESET (1 << 20)
166# define MRDCKC1_RESET (1 << 21)
167# define MRDCKD0_RESET (1 << 22)
168# define MRDCKD1_RESET (1 << 23)
169# define DLL_READY_READ (1 << 24)
170# define USE_DISPLAY_GAP (1 << 25)
171# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
172# define MPLL_TURNOFF_D2 (1 << 28)
173#define DLL_CNTL 0x64c
174# define MRDCKA0_BYPASS (1 << 24)
175# define MRDCKA1_BYPASS (1 << 25)
176# define MRDCKB0_BYPASS (1 << 26)
177# define MRDCKB1_BYPASS (1 << 27)
178# define MRDCKC0_BYPASS (1 << 28)
179# define MRDCKC1_BYPASS (1 << 29)
180# define MRDCKD0_BYPASS (1 << 30)
181# define MRDCKD1_BYPASS (1 << 31)
182
183#define CG_AT 0x6d4
184# define CG_R(x) ((x) << 0)
185# define CG_R_MASK (0xffff << 0)
186# define CG_L(x) ((x) << 16)
187# define CG_L_MASK (0xffff << 16)
188
189#define CG_DISPLAY_GAP_CNTL 0x714
190# define DISP1_GAP(x) ((x) << 0)
191# define DISP1_GAP_MASK (3 << 0)
192# define DISP2_GAP(x) ((x) << 2)
193# define DISP2_GAP_MASK (3 << 2)
194# define VBI_TIMER_COUNT(x) ((x) << 4)
195# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
196# define VBI_TIMER_UNIT(x) ((x) << 20)
197# define VBI_TIMER_UNIT_MASK (7 << 20)
198# define DISP1_GAP_MCHG(x) ((x) << 24)
199# define DISP1_GAP_MCHG_MASK (3 << 24)
200# define DISP2_GAP_MCHG(x) ((x) << 26)
201# define DISP2_GAP_MCHG_MASK (3 << 26)
202
203#define CG_BIF_REQ_AND_RSP 0x7f4
204#define CG_CLIENT_REQ(x) ((x) << 0)
205#define CG_CLIENT_REQ_MASK (0xff << 0)
206#define CG_CLIENT_REQ_SHIFT 0
207#define CG_CLIENT_RESP(x) ((x) << 8)
208#define CG_CLIENT_RESP_MASK (0xff << 8)
209#define CG_CLIENT_RESP_SHIFT 8
210#define CLIENT_CG_REQ(x) ((x) << 16)
211#define CLIENT_CG_REQ_MASK (0xff << 16)
212#define CLIENT_CG_REQ_SHIFT 16
213#define CLIENT_CG_RESP(x) ((x) << 24)
214#define CLIENT_CG_RESP_MASK (0xff << 24)
215#define CLIENT_CG_RESP_SHIFT 24
216
217#define CG_SPLL_SPREAD_SPECTRUM 0x790
218#define SSEN (1 << 0)
219#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
220
221#define MPLL_SS1 0x85c
222#define CLKV(x) ((x) << 0)
223#define CLKV_MASK (0x3ffffff << 0)
224#define MPLL_SS2 0x860
225#define CLKS(x) ((x) << 0)
226#define CLKS_MASK (0xfff << 0)
227
228#define CG_IND_ADDR 0x8f8
229#define CG_IND_DATA 0x8fc
230/* CGIND regs */
231#define CG_CGTT_LOCAL_0 0x00
232#define CG_CGTT_LOCAL_1 0x01
233#define CG_CGTT_LOCAL_2 0x02
234#define CG_CGTT_LOCAL_3 0x03
235#define CG_CGLS_TILE_0 0x20
236#define CG_CGLS_TILE_1 0x21
237#define CG_CGLS_TILE_2 0x22
238#define CG_CGLS_TILE_3 0x23
239#define CG_CGLS_TILE_4 0x24
240#define CG_CGLS_TILE_5 0x25
241#define CG_CGLS_TILE_6 0x26
242#define CG_CGLS_TILE_7 0x27
243#define CG_CGLS_TILE_8 0x28
244#define CG_CGLS_TILE_9 0x29
245#define CG_CGLS_TILE_10 0x2a
246#define CG_CGLS_TILE_11 0x2b
247
248#define VM_L2_CG 0x15c0
249
250#define MC_CONFIG 0x2000
251
252#define MC_CONFIG_MCD 0x20a0
253#define MC_CG_CONFIG_MCD 0x20a4
254#define MC_RD_ENABLE_MCD(x) ((x) << 8)
255#define MC_RD_ENABLE_MCD_MASK (7 << 8)
256
257#define MC_HUB_MISC_HUB_CG 0x20b8
258#define MC_HUB_MISC_VM_CG 0x20bc
259#define MC_HUB_MISC_SIP_CG 0x20c0
260
261#define MC_XPB_CLK_GAT 0x2478
262
263#define MC_CG_CONFIG 0x25bc
264#define MC_RD_ENABLE(x) ((x) << 4)
265#define MC_RD_ENABLE_MASK (3 << 4)
266
267#define MC_CITF_MISC_RD_CG 0x2648
268#define MC_CITF_MISC_WR_CG 0x264c
269#define MC_CITF_MISC_VM_CG 0x2650
270# define MEM_LS_ENABLE (1 << 19)
271
272#define MC_ARB_BURST_TIME 0x2808
273#define STATE0(x) ((x) << 0)
274#define STATE0_MASK (0x1f << 0)
275#define STATE1(x) ((x) << 5)
276#define STATE1_MASK (0x1f << 5)
277#define STATE2(x) ((x) << 10)
278#define STATE2_MASK (0x1f << 10)
279#define STATE3(x) ((x) << 15)
280#define STATE3_MASK (0x1f << 15)
281
282#define MC_SEQ_RAS_TIMING 0x28a0
283#define MC_SEQ_CAS_TIMING 0x28a4
284#define MC_SEQ_MISC_TIMING 0x28a8
285#define MC_SEQ_MISC_TIMING2 0x28ac
286
287#define MC_SEQ_RD_CTL_D0 0x28b4
288#define MC_SEQ_RD_CTL_D1 0x28b8
289#define MC_SEQ_WR_CTL_D0 0x28bc
290#define MC_SEQ_WR_CTL_D1 0x28c0
291
292#define MC_SEQ_STATUS_M 0x29f4
293# define PMG_PWRSTATE (1 << 16)
294
295#define MC_SEQ_MISC1 0x2a04
296#define MC_SEQ_RESERVE_M 0x2a08
297#define MC_PMG_CMD_EMRS 0x2a0c
298
299#define MC_SEQ_MISC3 0x2a2c
300
301#define MC_SEQ_MISC5 0x2a54
302#define MC_SEQ_MISC6 0x2a58
303
304#define MC_SEQ_MISC7 0x2a64
305
306#define MC_SEQ_CG 0x2a68
307#define CG_SEQ_REQ(x) ((x) << 0)
308#define CG_SEQ_REQ_MASK (0xff << 0)
309#define CG_SEQ_REQ_SHIFT 0
310#define CG_SEQ_RESP(x) ((x) << 8)
311#define CG_SEQ_RESP_MASK (0xff << 8)
312#define CG_SEQ_RESP_SHIFT 8
313#define SEQ_CG_REQ(x) ((x) << 16)
314#define SEQ_CG_REQ_MASK (0xff << 16)
315#define SEQ_CG_REQ_SHIFT 16
316#define SEQ_CG_RESP(x) ((x) << 24)
317#define SEQ_CG_RESP_MASK (0xff << 24)
318#define SEQ_CG_RESP_SHIFT 24
319#define MC_SEQ_RAS_TIMING_LP 0x2a6c
320#define MC_SEQ_CAS_TIMING_LP 0x2a70
321#define MC_SEQ_MISC_TIMING_LP 0x2a74
322#define MC_SEQ_MISC_TIMING2_LP 0x2a78
323#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
324#define MC_SEQ_WR_CTL_D1_LP 0x2a80
325#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
326#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
327
328#define MC_PMG_CMD_MRS 0x2aac
329
330#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
331#define MC_SEQ_RD_CTL_D1_LP 0x2b20
332
333#define MC_PMG_CMD_MRS1 0x2b44
334#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
335
336#define CGTS_SM_CTRL_REG 0x9150
337
51/* Registers */ 338/* Registers */
52 339
53#define RCU_IND_INDEX 0x100 340#define RCU_IND_INDEX 0x100
@@ -90,6 +377,34 @@
90#define CG_VCLK_STATUS 0x61c 377#define CG_VCLK_STATUS 0x61c
91#define CG_SCRATCH1 0x820 378#define CG_SCRATCH1 0x820
92 379
380#define RLC_CNTL 0x3f00
381# define RLC_ENABLE (1 << 0)
382# define GFX_POWER_GATING_ENABLE (1 << 7)
383# define GFX_POWER_GATING_SRC (1 << 8)
384# define DYN_PER_SIMD_PG_ENABLE (1 << 27)
385# define LB_CNT_SPIM_ACTIVE (1 << 30)
386# define LOAD_BALANCE_ENABLE (1 << 31)
387
388#define RLC_HB_BASE 0x3f10
389#define RLC_HB_CNTL 0x3f0c
390#define RLC_HB_RPTR 0x3f20
391#define RLC_HB_WPTR 0x3f1c
392#define RLC_HB_WPTR_LSB_ADDR 0x3f14
393#define RLC_HB_WPTR_MSB_ADDR 0x3f18
394#define RLC_MC_CNTL 0x3f44
395#define RLC_UCODE_CNTL 0x3f48
396#define RLC_UCODE_ADDR 0x3f2c
397#define RLC_UCODE_DATA 0x3f30
398
399/* new for TN */
400#define TN_RLC_SAVE_AND_RESTORE_BASE 0x3f10
401#define TN_RLC_LB_CNTR_MAX 0x3f14
402#define TN_RLC_LB_CNTR_INIT 0x3f18
403#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
404#define TN_RLC_LB_INIT_SIMD_MASK 0x3fe4
405#define TN_RLC_LB_ALWAYS_ACTIVE_SIMD_MASK 0x3fe8
406#define TN_RLC_LB_PARAMS 0x3fec
407
93#define GRBM_GFX_INDEX 0x802C 408#define GRBM_GFX_INDEX 0x802C
94#define INSTANCE_INDEX(x) ((x) << 0) 409#define INSTANCE_INDEX(x) ((x) << 0)
95#define SE_INDEX(x) ((x) << 16) 410#define SE_INDEX(x) ((x) << 16)
@@ -503,6 +818,30 @@
503#define CG_THERMAL_CTRL 0x72c 818#define CG_THERMAL_CTRL 0x72c
504#define TOFFSET_MASK 0x00003FE0 819#define TOFFSET_MASK 0x00003FE0
505#define TOFFSET_SHIFT 5 820#define TOFFSET_SHIFT 5
821#define DIG_THERM_DPM(x) ((x) << 14)
822#define DIG_THERM_DPM_MASK 0x003FC000
823#define DIG_THERM_DPM_SHIFT 14
824
825#define CG_THERMAL_INT 0x734
826#define DIG_THERM_INTH(x) ((x) << 8)
827#define DIG_THERM_INTH_MASK 0x0000FF00
828#define DIG_THERM_INTH_SHIFT 8
829#define DIG_THERM_INTL(x) ((x) << 16)
830#define DIG_THERM_INTL_MASK 0x00FF0000
831#define DIG_THERM_INTL_SHIFT 16
832#define THERM_INT_MASK_HIGH (1 << 24)
833#define THERM_INT_MASK_LOW (1 << 25)
834
835#define TN_CG_THERMAL_INT_CTRL 0x738
836#define TN_DIG_THERM_INTH(x) ((x) << 0)
837#define TN_DIG_THERM_INTH_MASK 0x000000FF
838#define TN_DIG_THERM_INTH_SHIFT 0
839#define TN_DIG_THERM_INTL(x) ((x) << 8)
840#define TN_DIG_THERM_INTL_MASK 0x0000FF00
841#define TN_DIG_THERM_INTL_SHIFT 8
842#define TN_THERM_INT_MASK_HIGH (1 << 24)
843#define TN_THERM_INT_MASK_LOW (1 << 25)
844
506#define CG_MULT_THERMAL_STATUS 0x740 845#define CG_MULT_THERMAL_STATUS 0x740
507#define ASIC_T(x) ((x) << 16) 846#define ASIC_T(x) ((x) << 16)
508#define ASIC_T_MASK 0x07FF0000 847#define ASIC_T_MASK 0x07FF0000
@@ -510,6 +849,7 @@
510#define CG_TS0_STATUS 0x760 849#define CG_TS0_STATUS 0x760
511#define TS0_ADC_DOUT_MASK 0x000003FF 850#define TS0_ADC_DOUT_MASK 0x000003FF
512#define TS0_ADC_DOUT_SHIFT 0 851#define TS0_ADC_DOUT_SHIFT 0
852
513/* APU */ 853/* APU */
514#define CG_THERMAL_STATUS 0x678 854#define CG_THERMAL_STATUS 0x678
515 855
@@ -992,7 +1332,48 @@
992#define DMA_PACKET_CONSTANT_FILL 0xd 1332#define DMA_PACKET_CONSTANT_FILL 0xd
993#define DMA_PACKET_NOP 0xf 1333#define DMA_PACKET_NOP 0xf
994 1334
995/* PCIE link stuff */ 1335/* PIF PHY0 indirect regs */
1336#define PB0_PIF_CNTL 0x10
1337# define LS2_EXIT_TIME(x) ((x) << 17)
1338# define LS2_EXIT_TIME_MASK (0x7 << 17)
1339# define LS2_EXIT_TIME_SHIFT 17
1340#define PB0_PIF_PAIRING 0x11
1341# define MULTI_PIF (1 << 25)
1342#define PB0_PIF_PWRDOWN_0 0x12
1343# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
1344# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
1345# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
1346# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
1347# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
1348# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
1349# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
1350# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
1351# define PLL_RAMP_UP_TIME_0_SHIFT 24
1352#define PB0_PIF_PWRDOWN_1 0x13
1353# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
1354# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
1355# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
1356# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
1357# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
1358# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
1359# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
1360# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
1361# define PLL_RAMP_UP_TIME_1_SHIFT 24
1362/* PIF PHY1 indirect regs */
1363#define PB1_PIF_CNTL 0x10
1364#define PB1_PIF_PAIRING 0x11
1365#define PB1_PIF_PWRDOWN_0 0x12
1366#define PB1_PIF_PWRDOWN_1 0x13
1367/* PCIE PORT indirect regs */
1368#define PCIE_LC_CNTL 0xa0
1369# define LC_L0S_INACTIVITY(x) ((x) << 8)
1370# define LC_L0S_INACTIVITY_MASK (0xf << 8)
1371# define LC_L0S_INACTIVITY_SHIFT 8
1372# define LC_L1_INACTIVITY(x) ((x) << 12)
1373# define LC_L1_INACTIVITY_MASK (0xf << 12)
1374# define LC_L1_INACTIVITY_SHIFT 12
1375# define LC_PMI_TO_L1_DIS (1 << 16)
1376# define LC_ASPM_TO_L1_DIS (1 << 24)
996#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ 1377#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
997#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ 1378#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
998# define LC_LINK_WIDTH_SHIFT 0 1379# define LC_LINK_WIDTH_SHIFT 0
@@ -1012,6 +1393,9 @@
1012# define LC_SHORT_RECONFIG_EN (1 << 11) 1393# define LC_SHORT_RECONFIG_EN (1 << 11)
1013# define LC_UPCONFIGURE_SUPPORT (1 << 12) 1394# define LC_UPCONFIGURE_SUPPORT (1 << 12)
1014# define LC_UPCONFIGURE_DIS (1 << 13) 1395# define LC_UPCONFIGURE_DIS (1 << 13)
1396# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
1397# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
1398# define LC_DYN_LANES_PWR_STATE_SHIFT 21
1015#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ 1399#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
1016# define LC_GEN2_EN_STRAP (1 << 0) 1400# define LC_GEN2_EN_STRAP (1 << 0)
1017# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) 1401# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
@@ -1020,6 +1404,9 @@
1020# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) 1404# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
1021# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 1405# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
1022# define LC_CURRENT_DATA_RATE (1 << 11) 1406# define LC_CURRENT_DATA_RATE (1 << 11)
1407# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
1408# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
1409# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
1023# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) 1410# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
1024# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) 1411# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
1025# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) 1412# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 84583302b081..f30127cb30ef 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -33,6 +33,135 @@
33#include "atom.h" 33#include "atom.h"
34#include "ni_reg.h" 34#include "ni_reg.h"
35#include "cayman_blit_shaders.h" 35#include "cayman_blit_shaders.h"
36#include "radeon_ucode.h"
37#include "clearstate_cayman.h"
38
39static u32 tn_rlc_save_restore_register_list[] =
40{
41 0x98fc,
42 0x98f0,
43 0x9834,
44 0x9838,
45 0x9870,
46 0x9874,
47 0x8a14,
48 0x8b24,
49 0x8bcc,
50 0x8b10,
51 0x8c30,
52 0x8d00,
53 0x8d04,
54 0x8c00,
55 0x8c04,
56 0x8c10,
57 0x8c14,
58 0x8d8c,
59 0x8cf0,
60 0x8e38,
61 0x9508,
62 0x9688,
63 0x9608,
64 0x960c,
65 0x9610,
66 0x9614,
67 0x88c4,
68 0x8978,
69 0x88d4,
70 0x900c,
71 0x9100,
72 0x913c,
73 0x90e8,
74 0x9354,
75 0xa008,
76 0x98f8,
77 0x9148,
78 0x914c,
79 0x3f94,
80 0x98f4,
81 0x9b7c,
82 0x3f8c,
83 0x8950,
84 0x8954,
85 0x8a18,
86 0x8b28,
87 0x9144,
88 0x3f90,
89 0x915c,
90 0x9160,
91 0x9178,
92 0x917c,
93 0x9180,
94 0x918c,
95 0x9190,
96 0x9194,
97 0x9198,
98 0x919c,
99 0x91a8,
100 0x91ac,
101 0x91b0,
102 0x91b4,
103 0x91b8,
104 0x91c4,
105 0x91c8,
106 0x91cc,
107 0x91d0,
108 0x91d4,
109 0x91e0,
110 0x91e4,
111 0x91ec,
112 0x91f0,
113 0x91f4,
114 0x9200,
115 0x9204,
116 0x929c,
117 0x8030,
118 0x9150,
119 0x9a60,
120 0x920c,
121 0x9210,
122 0x9228,
123 0x922c,
124 0x9244,
125 0x9248,
126 0x91e8,
127 0x9294,
128 0x9208,
129 0x9224,
130 0x9240,
131 0x9220,
132 0x923c,
133 0x9258,
134 0x9744,
135 0xa200,
136 0xa204,
137 0xa208,
138 0xa20c,
139 0x8d58,
140 0x9030,
141 0x9034,
142 0x9038,
143 0x903c,
144 0x9040,
145 0x9654,
146 0x897c,
147 0xa210,
148 0xa214,
149 0x9868,
150 0xa02c,
151 0x9664,
152 0x9698,
153 0x949c,
154 0x8e10,
155 0x8e18,
156 0x8c50,
157 0x8c58,
158 0x8c60,
159 0x8c68,
160 0x89b4,
161 0x9830,
162 0x802c,
163};
164static u32 tn_rlc_save_restore_register_list_size = ARRAY_SIZE(tn_rlc_save_restore_register_list);
36 165
37extern bool evergreen_is_display_hung(struct radeon_device *rdev); 166extern bool evergreen_is_display_hung(struct radeon_device *rdev);
38extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); 167extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
@@ -44,36 +173,29 @@ extern void evergreen_irq_suspend(struct radeon_device *rdev);
44extern int evergreen_mc_init(struct radeon_device *rdev); 173extern int evergreen_mc_init(struct radeon_device *rdev);
45extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); 174extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
46extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 175extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
47extern void si_rlc_fini(struct radeon_device *rdev); 176extern void evergreen_program_aspm(struct radeon_device *rdev);
48extern int si_rlc_init(struct radeon_device *rdev); 177extern void sumo_rlc_fini(struct radeon_device *rdev);
49 178extern int sumo_rlc_init(struct radeon_device *rdev);
50#define EVERGREEN_PFP_UCODE_SIZE 1120
51#define EVERGREEN_PM4_UCODE_SIZE 1376
52#define EVERGREEN_RLC_UCODE_SIZE 768
53#define BTC_MC_UCODE_SIZE 6024
54
55#define CAYMAN_PFP_UCODE_SIZE 2176
56#define CAYMAN_PM4_UCODE_SIZE 2176
57#define CAYMAN_RLC_UCODE_SIZE 1024
58#define CAYMAN_MC_UCODE_SIZE 6037
59
60#define ARUBA_RLC_UCODE_SIZE 1536
61 179
62/* Firmware Names */ 180/* Firmware Names */
63MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); 181MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
64MODULE_FIRMWARE("radeon/BARTS_me.bin"); 182MODULE_FIRMWARE("radeon/BARTS_me.bin");
65MODULE_FIRMWARE("radeon/BARTS_mc.bin"); 183MODULE_FIRMWARE("radeon/BARTS_mc.bin");
184MODULE_FIRMWARE("radeon/BARTS_smc.bin");
66MODULE_FIRMWARE("radeon/BTC_rlc.bin"); 185MODULE_FIRMWARE("radeon/BTC_rlc.bin");
67MODULE_FIRMWARE("radeon/TURKS_pfp.bin"); 186MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
68MODULE_FIRMWARE("radeon/TURKS_me.bin"); 187MODULE_FIRMWARE("radeon/TURKS_me.bin");
69MODULE_FIRMWARE("radeon/TURKS_mc.bin"); 188MODULE_FIRMWARE("radeon/TURKS_mc.bin");
189MODULE_FIRMWARE("radeon/TURKS_smc.bin");
70MODULE_FIRMWARE("radeon/CAICOS_pfp.bin"); 190MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
71MODULE_FIRMWARE("radeon/CAICOS_me.bin"); 191MODULE_FIRMWARE("radeon/CAICOS_me.bin");
72MODULE_FIRMWARE("radeon/CAICOS_mc.bin"); 192MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
193MODULE_FIRMWARE("radeon/CAICOS_smc.bin");
73MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin"); 194MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
74MODULE_FIRMWARE("radeon/CAYMAN_me.bin"); 195MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
75MODULE_FIRMWARE("radeon/CAYMAN_mc.bin"); 196MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
76MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin"); 197MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
198MODULE_FIRMWARE("radeon/CAYMAN_smc.bin");
77MODULE_FIRMWARE("radeon/ARUBA_pfp.bin"); 199MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
78MODULE_FIRMWARE("radeon/ARUBA_me.bin"); 200MODULE_FIRMWARE("radeon/ARUBA_me.bin");
79MODULE_FIRMWARE("radeon/ARUBA_rlc.bin"); 201MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
@@ -566,6 +688,7 @@ int ni_init_microcode(struct radeon_device *rdev)
566 const char *chip_name; 688 const char *chip_name;
567 const char *rlc_chip_name; 689 const char *rlc_chip_name;
568 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; 690 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
691 size_t smc_req_size = 0;
569 char fw_name[30]; 692 char fw_name[30];
570 int err; 693 int err;
571 694
@@ -586,6 +709,7 @@ int ni_init_microcode(struct radeon_device *rdev)
586 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 709 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
587 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 710 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
588 mc_req_size = BTC_MC_UCODE_SIZE * 4; 711 mc_req_size = BTC_MC_UCODE_SIZE * 4;
712 smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4);
589 break; 713 break;
590 case CHIP_TURKS: 714 case CHIP_TURKS:
591 chip_name = "TURKS"; 715 chip_name = "TURKS";
@@ -594,6 +718,7 @@ int ni_init_microcode(struct radeon_device *rdev)
594 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 718 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
595 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 719 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
596 mc_req_size = BTC_MC_UCODE_SIZE * 4; 720 mc_req_size = BTC_MC_UCODE_SIZE * 4;
721 smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4);
597 break; 722 break;
598 case CHIP_CAICOS: 723 case CHIP_CAICOS:
599 chip_name = "CAICOS"; 724 chip_name = "CAICOS";
@@ -602,6 +727,7 @@ int ni_init_microcode(struct radeon_device *rdev)
602 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 727 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
603 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 728 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
604 mc_req_size = BTC_MC_UCODE_SIZE * 4; 729 mc_req_size = BTC_MC_UCODE_SIZE * 4;
730 smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4);
605 break; 731 break;
606 case CHIP_CAYMAN: 732 case CHIP_CAYMAN:
607 chip_name = "CAYMAN"; 733 chip_name = "CAYMAN";
@@ -610,6 +736,7 @@ int ni_init_microcode(struct radeon_device *rdev)
610 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 736 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
611 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; 737 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
612 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; 738 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
739 smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4);
613 break; 740 break;
614 case CHIP_ARUBA: 741 case CHIP_ARUBA:
615 chip_name = "ARUBA"; 742 chip_name = "ARUBA";
@@ -672,6 +799,20 @@ int ni_init_microcode(struct radeon_device *rdev)
672 err = -EINVAL; 799 err = -EINVAL;
673 } 800 }
674 } 801 }
802
803 if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
804 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
805 err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev);
806 if (err)
807 goto out;
808 if (rdev->smc_fw->size != smc_req_size) {
809 printk(KERN_ERR
810 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
811 rdev->mc_fw->size, fw_name);
812 err = -EINVAL;
813 }
814 }
815
675out: 816out:
676 platform_device_unregister(pdev); 817 platform_device_unregister(pdev);
677 818
@@ -692,6 +833,14 @@ out:
692 return err; 833 return err;
693} 834}
694 835
836int tn_get_temp(struct radeon_device *rdev)
837{
838 u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
839 int actual_temp = (temp / 8) - 49;
840
841 return actual_temp * 1000;
842}
843
695/* 844/*
696 * Core functions 845 * Core functions
697 */ 846 */
@@ -1027,6 +1176,16 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1027 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 1176 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1028 1177
1029 udelay(50); 1178 udelay(50);
1179
1180 /* set clockgating golden values on TN */
1181 if (rdev->family == CHIP_ARUBA) {
1182 tmp = RREG32_CG(CG_CGTT_LOCAL_0);
1183 tmp &= ~0x00380000;
1184 WREG32_CG(CG_CGTT_LOCAL_0, tmp);
1185 tmp = RREG32_CG(CG_CGTT_LOCAL_1);
1186 tmp &= ~0x0e000000;
1187 WREG32_CG(CG_CGTT_LOCAL_1, tmp);
1188 }
1030} 1189}
1031 1190
1032/* 1191/*
@@ -1928,6 +2087,8 @@ static int cayman_startup(struct radeon_device *rdev)
1928 2087
1929 /* enable pcie gen2 link */ 2088 /* enable pcie gen2 link */
1930 evergreen_pcie_gen2_enable(rdev); 2089 evergreen_pcie_gen2_enable(rdev);
2090 /* enable aspm */
2091 evergreen_program_aspm(rdev);
1931 2092
1932 if (rdev->flags & RADEON_IS_IGP) { 2093 if (rdev->flags & RADEON_IS_IGP) {
1933 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 2094 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
@@ -1972,7 +2133,10 @@ static int cayman_startup(struct radeon_device *rdev)
1972 2133
1973 /* allocate rlc buffers */ 2134 /* allocate rlc buffers */
1974 if (rdev->flags & RADEON_IS_IGP) { 2135 if (rdev->flags & RADEON_IS_IGP) {
1975 r = si_rlc_init(rdev); 2136 rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
2137 rdev->rlc.reg_list_size = tn_rlc_save_restore_register_list_size;
2138 rdev->rlc.cs_data = cayman_cs_data;
2139 r = sumo_rlc_init(rdev);
1976 if (r) { 2140 if (r) {
1977 DRM_ERROR("Failed to init rlc BOs!\n"); 2141 DRM_ERROR("Failed to init rlc BOs!\n");
1978 return r; 2142 return r;
@@ -2229,7 +2393,7 @@ int cayman_init(struct radeon_device *rdev)
2229 cayman_dma_fini(rdev); 2393 cayman_dma_fini(rdev);
2230 r600_irq_fini(rdev); 2394 r600_irq_fini(rdev);
2231 if (rdev->flags & RADEON_IS_IGP) 2395 if (rdev->flags & RADEON_IS_IGP)
2232 si_rlc_fini(rdev); 2396 sumo_rlc_fini(rdev);
2233 radeon_wb_fini(rdev); 2397 radeon_wb_fini(rdev);
2234 radeon_ib_pool_fini(rdev); 2398 radeon_ib_pool_fini(rdev);
2235 radeon_vm_manager_fini(rdev); 2399 radeon_vm_manager_fini(rdev);
@@ -2260,7 +2424,7 @@ void cayman_fini(struct radeon_device *rdev)
2260 cayman_dma_fini(rdev); 2424 cayman_dma_fini(rdev);
2261 r600_irq_fini(rdev); 2425 r600_irq_fini(rdev);
2262 if (rdev->flags & RADEON_IS_IGP) 2426 if (rdev->flags & RADEON_IS_IGP)
2263 si_rlc_fini(rdev); 2427 sumo_rlc_fini(rdev);
2264 radeon_wb_fini(rdev); 2428 radeon_wb_fini(rdev);
2265 radeon_vm_manager_fini(rdev); 2429 radeon_vm_manager_fini(rdev);
2266 radeon_ib_pool_fini(rdev); 2430 radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
new file mode 100644
index 000000000000..a4cb99c2da85
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -0,0 +1,4332 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "nid.h"
27#include "r600_dpm.h"
28#include "ni_dpm.h"
29#include "atom.h"
30#include <linux/math64.h>
31#include <linux/seq_file.h>
32
33#define MC_CG_ARB_FREQ_F0 0x0a
34#define MC_CG_ARB_FREQ_F1 0x0b
35#define MC_CG_ARB_FREQ_F2 0x0c
36#define MC_CG_ARB_FREQ_F3 0x0d
37
38#define SMC_RAM_END 0xC000
39
40static const struct ni_cac_weights cac_weights_cayman_xt =
41{
42 0x15,
43 0x2,
44 0x19,
45 0x2,
46 0x8,
47 0x14,
48 0x2,
49 0x16,
50 0xE,
51 0x17,
52 0x13,
53 0x2B,
54 0x10,
55 0x7,
56 0x5,
57 0x5,
58 0x5,
59 0x2,
60 0x3,
61 0x9,
62 0x10,
63 0x10,
64 0x2B,
65 0xA,
66 0x9,
67 0x4,
68 0xD,
69 0xD,
70 0x3E,
71 0x18,
72 0x14,
73 0,
74 0x3,
75 0x3,
76 0x5,
77 0,
78 0x2,
79 0,
80 0,
81 0,
82 0,
83 0,
84 0,
85 0,
86 0,
87 0,
88 0x1CC,
89 0,
90 0x164,
91 1,
92 1,
93 1,
94 1,
95 12,
96 12,
97 12,
98 0x12,
99 0x1F,
100 132,
101 5,
102 7,
103 0,
104 { 0, 0, 0, 0, 0, 0, 0, 0 },
105 { 0, 0, 0, 0 },
106 true
107};
108
109static const struct ni_cac_weights cac_weights_cayman_pro =
110{
111 0x16,
112 0x4,
113 0x10,
114 0x2,
115 0xA,
116 0x16,
117 0x2,
118 0x18,
119 0x10,
120 0x1A,
121 0x16,
122 0x2D,
123 0x12,
124 0xA,
125 0x6,
126 0x6,
127 0x6,
128 0x2,
129 0x4,
130 0xB,
131 0x11,
132 0x11,
133 0x2D,
134 0xC,
135 0xC,
136 0x7,
137 0x10,
138 0x10,
139 0x3F,
140 0x1A,
141 0x16,
142 0,
143 0x7,
144 0x4,
145 0x6,
146 1,
147 0x2,
148 0x1,
149 0,
150 0,
151 0,
152 0,
153 0,
154 0,
155 0x30,
156 0,
157 0x1CF,
158 0,
159 0x166,
160 1,
161 1,
162 1,
163 1,
164 12,
165 12,
166 12,
167 0x15,
168 0x1F,
169 132,
170 6,
171 6,
172 0,
173 { 0, 0, 0, 0, 0, 0, 0, 0 },
174 { 0, 0, 0, 0 },
175 true
176};
177
178static const struct ni_cac_weights cac_weights_cayman_le =
179{
180 0x7,
181 0xE,
182 0x1,
183 0xA,
184 0x1,
185 0x3F,
186 0x2,
187 0x18,
188 0x10,
189 0x1A,
190 0x1,
191 0x3F,
192 0x1,
193 0xE,
194 0x6,
195 0x6,
196 0x6,
197 0x2,
198 0x4,
199 0x9,
200 0x1A,
201 0x1A,
202 0x2C,
203 0xA,
204 0x11,
205 0x8,
206 0x19,
207 0x19,
208 0x1,
209 0x1,
210 0x1A,
211 0,
212 0x8,
213 0x5,
214 0x8,
215 0x1,
216 0x3,
217 0x1,
218 0,
219 0,
220 0,
221 0,
222 0,
223 0,
224 0x38,
225 0x38,
226 0x239,
227 0x3,
228 0x18A,
229 1,
230 1,
231 1,
232 1,
233 12,
234 12,
235 12,
236 0x15,
237 0x22,
238 132,
239 6,
240 6,
241 0,
242 { 0, 0, 0, 0, 0, 0, 0, 0 },
243 { 0, 0, 0, 0 },
244 true
245};
246
247#define NISLANDS_MGCG_SEQUENCE 300
248
249static const u32 cayman_cgcg_cgls_default[] =
250{
251 0x000008f8, 0x00000010, 0xffffffff,
252 0x000008fc, 0x00000000, 0xffffffff,
253 0x000008f8, 0x00000011, 0xffffffff,
254 0x000008fc, 0x00000000, 0xffffffff,
255 0x000008f8, 0x00000012, 0xffffffff,
256 0x000008fc, 0x00000000, 0xffffffff,
257 0x000008f8, 0x00000013, 0xffffffff,
258 0x000008fc, 0x00000000, 0xffffffff,
259 0x000008f8, 0x00000014, 0xffffffff,
260 0x000008fc, 0x00000000, 0xffffffff,
261 0x000008f8, 0x00000015, 0xffffffff,
262 0x000008fc, 0x00000000, 0xffffffff,
263 0x000008f8, 0x00000016, 0xffffffff,
264 0x000008fc, 0x00000000, 0xffffffff,
265 0x000008f8, 0x00000017, 0xffffffff,
266 0x000008fc, 0x00000000, 0xffffffff,
267 0x000008f8, 0x00000018, 0xffffffff,
268 0x000008fc, 0x00000000, 0xffffffff,
269 0x000008f8, 0x00000019, 0xffffffff,
270 0x000008fc, 0x00000000, 0xffffffff,
271 0x000008f8, 0x0000001a, 0xffffffff,
272 0x000008fc, 0x00000000, 0xffffffff,
273 0x000008f8, 0x0000001b, 0xffffffff,
274 0x000008fc, 0x00000000, 0xffffffff,
275 0x000008f8, 0x00000020, 0xffffffff,
276 0x000008fc, 0x00000000, 0xffffffff,
277 0x000008f8, 0x00000021, 0xffffffff,
278 0x000008fc, 0x00000000, 0xffffffff,
279 0x000008f8, 0x00000022, 0xffffffff,
280 0x000008fc, 0x00000000, 0xffffffff,
281 0x000008f8, 0x00000023, 0xffffffff,
282 0x000008fc, 0x00000000, 0xffffffff,
283 0x000008f8, 0x00000024, 0xffffffff,
284 0x000008fc, 0x00000000, 0xffffffff,
285 0x000008f8, 0x00000025, 0xffffffff,
286 0x000008fc, 0x00000000, 0xffffffff,
287 0x000008f8, 0x00000026, 0xffffffff,
288 0x000008fc, 0x00000000, 0xffffffff,
289 0x000008f8, 0x00000027, 0xffffffff,
290 0x000008fc, 0x00000000, 0xffffffff,
291 0x000008f8, 0x00000028, 0xffffffff,
292 0x000008fc, 0x00000000, 0xffffffff,
293 0x000008f8, 0x00000029, 0xffffffff,
294 0x000008fc, 0x00000000, 0xffffffff,
295 0x000008f8, 0x0000002a, 0xffffffff,
296 0x000008fc, 0x00000000, 0xffffffff,
297 0x000008f8, 0x0000002b, 0xffffffff,
298 0x000008fc, 0x00000000, 0xffffffff
299};
300#define CAYMAN_CGCG_CGLS_DEFAULT_LENGTH sizeof(cayman_cgcg_cgls_default) / (3 * sizeof(u32))
301
302static const u32 cayman_cgcg_cgls_disable[] =
303{
304 0x000008f8, 0x00000010, 0xffffffff,
305 0x000008fc, 0xffffffff, 0xffffffff,
306 0x000008f8, 0x00000011, 0xffffffff,
307 0x000008fc, 0xffffffff, 0xffffffff,
308 0x000008f8, 0x00000012, 0xffffffff,
309 0x000008fc, 0xffffffff, 0xffffffff,
310 0x000008f8, 0x00000013, 0xffffffff,
311 0x000008fc, 0xffffffff, 0xffffffff,
312 0x000008f8, 0x00000014, 0xffffffff,
313 0x000008fc, 0xffffffff, 0xffffffff,
314 0x000008f8, 0x00000015, 0xffffffff,
315 0x000008fc, 0xffffffff, 0xffffffff,
316 0x000008f8, 0x00000016, 0xffffffff,
317 0x000008fc, 0xffffffff, 0xffffffff,
318 0x000008f8, 0x00000017, 0xffffffff,
319 0x000008fc, 0xffffffff, 0xffffffff,
320 0x000008f8, 0x00000018, 0xffffffff,
321 0x000008fc, 0xffffffff, 0xffffffff,
322 0x000008f8, 0x00000019, 0xffffffff,
323 0x000008fc, 0xffffffff, 0xffffffff,
324 0x000008f8, 0x0000001a, 0xffffffff,
325 0x000008fc, 0xffffffff, 0xffffffff,
326 0x000008f8, 0x0000001b, 0xffffffff,
327 0x000008fc, 0xffffffff, 0xffffffff,
328 0x000008f8, 0x00000020, 0xffffffff,
329 0x000008fc, 0x00000000, 0xffffffff,
330 0x000008f8, 0x00000021, 0xffffffff,
331 0x000008fc, 0x00000000, 0xffffffff,
332 0x000008f8, 0x00000022, 0xffffffff,
333 0x000008fc, 0x00000000, 0xffffffff,
334 0x000008f8, 0x00000023, 0xffffffff,
335 0x000008fc, 0x00000000, 0xffffffff,
336 0x000008f8, 0x00000024, 0xffffffff,
337 0x000008fc, 0x00000000, 0xffffffff,
338 0x000008f8, 0x00000025, 0xffffffff,
339 0x000008fc, 0x00000000, 0xffffffff,
340 0x000008f8, 0x00000026, 0xffffffff,
341 0x000008fc, 0x00000000, 0xffffffff,
342 0x000008f8, 0x00000027, 0xffffffff,
343 0x000008fc, 0x00000000, 0xffffffff,
344 0x000008f8, 0x00000028, 0xffffffff,
345 0x000008fc, 0x00000000, 0xffffffff,
346 0x000008f8, 0x00000029, 0xffffffff,
347 0x000008fc, 0x00000000, 0xffffffff,
348 0x000008f8, 0x0000002a, 0xffffffff,
349 0x000008fc, 0x00000000, 0xffffffff,
350 0x000008f8, 0x0000002b, 0xffffffff,
351 0x000008fc, 0x00000000, 0xffffffff,
352 0x00000644, 0x000f7902, 0x001f4180,
353 0x00000644, 0x000f3802, 0x001f4180
354};
355#define CAYMAN_CGCG_CGLS_DISABLE_LENGTH sizeof(cayman_cgcg_cgls_disable) / (3 * sizeof(u32))
356
357static const u32 cayman_cgcg_cgls_enable[] =
358{
359 0x00000644, 0x000f7882, 0x001f4080,
360 0x000008f8, 0x00000010, 0xffffffff,
361 0x000008fc, 0x00000000, 0xffffffff,
362 0x000008f8, 0x00000011, 0xffffffff,
363 0x000008fc, 0x00000000, 0xffffffff,
364 0x000008f8, 0x00000012, 0xffffffff,
365 0x000008fc, 0x00000000, 0xffffffff,
366 0x000008f8, 0x00000013, 0xffffffff,
367 0x000008fc, 0x00000000, 0xffffffff,
368 0x000008f8, 0x00000014, 0xffffffff,
369 0x000008fc, 0x00000000, 0xffffffff,
370 0x000008f8, 0x00000015, 0xffffffff,
371 0x000008fc, 0x00000000, 0xffffffff,
372 0x000008f8, 0x00000016, 0xffffffff,
373 0x000008fc, 0x00000000, 0xffffffff,
374 0x000008f8, 0x00000017, 0xffffffff,
375 0x000008fc, 0x00000000, 0xffffffff,
376 0x000008f8, 0x00000018, 0xffffffff,
377 0x000008fc, 0x00000000, 0xffffffff,
378 0x000008f8, 0x00000019, 0xffffffff,
379 0x000008fc, 0x00000000, 0xffffffff,
380 0x000008f8, 0x0000001a, 0xffffffff,
381 0x000008fc, 0x00000000, 0xffffffff,
382 0x000008f8, 0x0000001b, 0xffffffff,
383 0x000008fc, 0x00000000, 0xffffffff,
384 0x000008f8, 0x00000020, 0xffffffff,
385 0x000008fc, 0xffffffff, 0xffffffff,
386 0x000008f8, 0x00000021, 0xffffffff,
387 0x000008fc, 0xffffffff, 0xffffffff,
388 0x000008f8, 0x00000022, 0xffffffff,
389 0x000008fc, 0xffffffff, 0xffffffff,
390 0x000008f8, 0x00000023, 0xffffffff,
391 0x000008fc, 0xffffffff, 0xffffffff,
392 0x000008f8, 0x00000024, 0xffffffff,
393 0x000008fc, 0xffffffff, 0xffffffff,
394 0x000008f8, 0x00000025, 0xffffffff,
395 0x000008fc, 0xffffffff, 0xffffffff,
396 0x000008f8, 0x00000026, 0xffffffff,
397 0x000008fc, 0xffffffff, 0xffffffff,
398 0x000008f8, 0x00000027, 0xffffffff,
399 0x000008fc, 0xffffffff, 0xffffffff,
400 0x000008f8, 0x00000028, 0xffffffff,
401 0x000008fc, 0xffffffff, 0xffffffff,
402 0x000008f8, 0x00000029, 0xffffffff,
403 0x000008fc, 0xffffffff, 0xffffffff,
404 0x000008f8, 0x0000002a, 0xffffffff,
405 0x000008fc, 0xffffffff, 0xffffffff,
406 0x000008f8, 0x0000002b, 0xffffffff,
407 0x000008fc, 0xffffffff, 0xffffffff
408};
409#define CAYMAN_CGCG_CGLS_ENABLE_LENGTH sizeof(cayman_cgcg_cgls_enable) / (3 * sizeof(u32))
410
411static const u32 cayman_mgcg_default[] =
412{
413 0x0000802c, 0xc0000000, 0xffffffff,
414 0x00003fc4, 0xc0000000, 0xffffffff,
415 0x00005448, 0x00000100, 0xffffffff,
416 0x000055e4, 0x00000100, 0xffffffff,
417 0x0000160c, 0x00000100, 0xffffffff,
418 0x00008984, 0x06000100, 0xffffffff,
419 0x0000c164, 0x00000100, 0xffffffff,
420 0x00008a18, 0x00000100, 0xffffffff,
421 0x0000897c, 0x06000100, 0xffffffff,
422 0x00008b28, 0x00000100, 0xffffffff,
423 0x00009144, 0x00800200, 0xffffffff,
424 0x00009a60, 0x00000100, 0xffffffff,
425 0x00009868, 0x00000100, 0xffffffff,
426 0x00008d58, 0x00000100, 0xffffffff,
427 0x00009510, 0x00000100, 0xffffffff,
428 0x0000949c, 0x00000100, 0xffffffff,
429 0x00009654, 0x00000100, 0xffffffff,
430 0x00009030, 0x00000100, 0xffffffff,
431 0x00009034, 0x00000100, 0xffffffff,
432 0x00009038, 0x00000100, 0xffffffff,
433 0x0000903c, 0x00000100, 0xffffffff,
434 0x00009040, 0x00000100, 0xffffffff,
435 0x0000a200, 0x00000100, 0xffffffff,
436 0x0000a204, 0x00000100, 0xffffffff,
437 0x0000a208, 0x00000100, 0xffffffff,
438 0x0000a20c, 0x00000100, 0xffffffff,
439 0x00009744, 0x00000100, 0xffffffff,
440 0x00003f80, 0x00000100, 0xffffffff,
441 0x0000a210, 0x00000100, 0xffffffff,
442 0x0000a214, 0x00000100, 0xffffffff,
443 0x000004d8, 0x00000100, 0xffffffff,
444 0x00009664, 0x00000100, 0xffffffff,
445 0x00009698, 0x00000100, 0xffffffff,
446 0x000004d4, 0x00000200, 0xffffffff,
447 0x000004d0, 0x00000000, 0xffffffff,
448 0x000030cc, 0x00000104, 0xffffffff,
449 0x0000d0c0, 0x00000100, 0xffffffff,
450 0x0000d8c0, 0x00000100, 0xffffffff,
451 0x0000802c, 0x40000000, 0xffffffff,
452 0x00003fc4, 0x40000000, 0xffffffff,
453 0x0000915c, 0x00010000, 0xffffffff,
454 0x00009160, 0x00030002, 0xffffffff,
455 0x00009164, 0x00050004, 0xffffffff,
456 0x00009168, 0x00070006, 0xffffffff,
457 0x00009178, 0x00070000, 0xffffffff,
458 0x0000917c, 0x00030002, 0xffffffff,
459 0x00009180, 0x00050004, 0xffffffff,
460 0x0000918c, 0x00010006, 0xffffffff,
461 0x00009190, 0x00090008, 0xffffffff,
462 0x00009194, 0x00070000, 0xffffffff,
463 0x00009198, 0x00030002, 0xffffffff,
464 0x0000919c, 0x00050004, 0xffffffff,
465 0x000091a8, 0x00010006, 0xffffffff,
466 0x000091ac, 0x00090008, 0xffffffff,
467 0x000091b0, 0x00070000, 0xffffffff,
468 0x000091b4, 0x00030002, 0xffffffff,
469 0x000091b8, 0x00050004, 0xffffffff,
470 0x000091c4, 0x00010006, 0xffffffff,
471 0x000091c8, 0x00090008, 0xffffffff,
472 0x000091cc, 0x00070000, 0xffffffff,
473 0x000091d0, 0x00030002, 0xffffffff,
474 0x000091d4, 0x00050004, 0xffffffff,
475 0x000091e0, 0x00010006, 0xffffffff,
476 0x000091e4, 0x00090008, 0xffffffff,
477 0x000091e8, 0x00000000, 0xffffffff,
478 0x000091ec, 0x00070000, 0xffffffff,
479 0x000091f0, 0x00030002, 0xffffffff,
480 0x000091f4, 0x00050004, 0xffffffff,
481 0x00009200, 0x00010006, 0xffffffff,
482 0x00009204, 0x00090008, 0xffffffff,
483 0x00009208, 0x00070000, 0xffffffff,
484 0x0000920c, 0x00030002, 0xffffffff,
485 0x00009210, 0x00050004, 0xffffffff,
486 0x0000921c, 0x00010006, 0xffffffff,
487 0x00009220, 0x00090008, 0xffffffff,
488 0x00009224, 0x00070000, 0xffffffff,
489 0x00009228, 0x00030002, 0xffffffff,
490 0x0000922c, 0x00050004, 0xffffffff,
491 0x00009238, 0x00010006, 0xffffffff,
492 0x0000923c, 0x00090008, 0xffffffff,
493 0x00009240, 0x00070000, 0xffffffff,
494 0x00009244, 0x00030002, 0xffffffff,
495 0x00009248, 0x00050004, 0xffffffff,
496 0x00009254, 0x00010006, 0xffffffff,
497 0x00009258, 0x00090008, 0xffffffff,
498 0x0000925c, 0x00070000, 0xffffffff,
499 0x00009260, 0x00030002, 0xffffffff,
500 0x00009264, 0x00050004, 0xffffffff,
501 0x00009270, 0x00010006, 0xffffffff,
502 0x00009274, 0x00090008, 0xffffffff,
503 0x00009278, 0x00070000, 0xffffffff,
504 0x0000927c, 0x00030002, 0xffffffff,
505 0x00009280, 0x00050004, 0xffffffff,
506 0x0000928c, 0x00010006, 0xffffffff,
507 0x00009290, 0x00090008, 0xffffffff,
508 0x000092a8, 0x00070000, 0xffffffff,
509 0x000092ac, 0x00030002, 0xffffffff,
510 0x000092b0, 0x00050004, 0xffffffff,
511 0x000092bc, 0x00010006, 0xffffffff,
512 0x000092c0, 0x00090008, 0xffffffff,
513 0x000092c4, 0x00070000, 0xffffffff,
514 0x000092c8, 0x00030002, 0xffffffff,
515 0x000092cc, 0x00050004, 0xffffffff,
516 0x000092d8, 0x00010006, 0xffffffff,
517 0x000092dc, 0x00090008, 0xffffffff,
518 0x00009294, 0x00000000, 0xffffffff,
519 0x0000802c, 0x40010000, 0xffffffff,
520 0x00003fc4, 0x40010000, 0xffffffff,
521 0x0000915c, 0x00010000, 0xffffffff,
522 0x00009160, 0x00030002, 0xffffffff,
523 0x00009164, 0x00050004, 0xffffffff,
524 0x00009168, 0x00070006, 0xffffffff,
525 0x00009178, 0x00070000, 0xffffffff,
526 0x0000917c, 0x00030002, 0xffffffff,
527 0x00009180, 0x00050004, 0xffffffff,
528 0x0000918c, 0x00010006, 0xffffffff,
529 0x00009190, 0x00090008, 0xffffffff,
530 0x00009194, 0x00070000, 0xffffffff,
531 0x00009198, 0x00030002, 0xffffffff,
532 0x0000919c, 0x00050004, 0xffffffff,
533 0x000091a8, 0x00010006, 0xffffffff,
534 0x000091ac, 0x00090008, 0xffffffff,
535 0x000091b0, 0x00070000, 0xffffffff,
536 0x000091b4, 0x00030002, 0xffffffff,
537 0x000091b8, 0x00050004, 0xffffffff,
538 0x000091c4, 0x00010006, 0xffffffff,
539 0x000091c8, 0x00090008, 0xffffffff,
540 0x000091cc, 0x00070000, 0xffffffff,
541 0x000091d0, 0x00030002, 0xffffffff,
542 0x000091d4, 0x00050004, 0xffffffff,
543 0x000091e0, 0x00010006, 0xffffffff,
544 0x000091e4, 0x00090008, 0xffffffff,
545 0x000091e8, 0x00000000, 0xffffffff,
546 0x000091ec, 0x00070000, 0xffffffff,
547 0x000091f0, 0x00030002, 0xffffffff,
548 0x000091f4, 0x00050004, 0xffffffff,
549 0x00009200, 0x00010006, 0xffffffff,
550 0x00009204, 0x00090008, 0xffffffff,
551 0x00009208, 0x00070000, 0xffffffff,
552 0x0000920c, 0x00030002, 0xffffffff,
553 0x00009210, 0x00050004, 0xffffffff,
554 0x0000921c, 0x00010006, 0xffffffff,
555 0x00009220, 0x00090008, 0xffffffff,
556 0x00009224, 0x00070000, 0xffffffff,
557 0x00009228, 0x00030002, 0xffffffff,
558 0x0000922c, 0x00050004, 0xffffffff,
559 0x00009238, 0x00010006, 0xffffffff,
560 0x0000923c, 0x00090008, 0xffffffff,
561 0x00009240, 0x00070000, 0xffffffff,
562 0x00009244, 0x00030002, 0xffffffff,
563 0x00009248, 0x00050004, 0xffffffff,
564 0x00009254, 0x00010006, 0xffffffff,
565 0x00009258, 0x00090008, 0xffffffff,
566 0x0000925c, 0x00070000, 0xffffffff,
567 0x00009260, 0x00030002, 0xffffffff,
568 0x00009264, 0x00050004, 0xffffffff,
569 0x00009270, 0x00010006, 0xffffffff,
570 0x00009274, 0x00090008, 0xffffffff,
571 0x00009278, 0x00070000, 0xffffffff,
572 0x0000927c, 0x00030002, 0xffffffff,
573 0x00009280, 0x00050004, 0xffffffff,
574 0x0000928c, 0x00010006, 0xffffffff,
575 0x00009290, 0x00090008, 0xffffffff,
576 0x000092a8, 0x00070000, 0xffffffff,
577 0x000092ac, 0x00030002, 0xffffffff,
578 0x000092b0, 0x00050004, 0xffffffff,
579 0x000092bc, 0x00010006, 0xffffffff,
580 0x000092c0, 0x00090008, 0xffffffff,
581 0x000092c4, 0x00070000, 0xffffffff,
582 0x000092c8, 0x00030002, 0xffffffff,
583 0x000092cc, 0x00050004, 0xffffffff,
584 0x000092d8, 0x00010006, 0xffffffff,
585 0x000092dc, 0x00090008, 0xffffffff,
586 0x00009294, 0x00000000, 0xffffffff,
587 0x0000802c, 0xc0000000, 0xffffffff,
588 0x00003fc4, 0xc0000000, 0xffffffff,
589 0x000008f8, 0x00000010, 0xffffffff,
590 0x000008fc, 0x00000000, 0xffffffff,
591 0x000008f8, 0x00000011, 0xffffffff,
592 0x000008fc, 0x00000000, 0xffffffff,
593 0x000008f8, 0x00000012, 0xffffffff,
594 0x000008fc, 0x00000000, 0xffffffff,
595 0x000008f8, 0x00000013, 0xffffffff,
596 0x000008fc, 0x00000000, 0xffffffff,
597 0x000008f8, 0x00000014, 0xffffffff,
598 0x000008fc, 0x00000000, 0xffffffff,
599 0x000008f8, 0x00000015, 0xffffffff,
600 0x000008fc, 0x00000000, 0xffffffff,
601 0x000008f8, 0x00000016, 0xffffffff,
602 0x000008fc, 0x00000000, 0xffffffff,
603 0x000008f8, 0x00000017, 0xffffffff,
604 0x000008fc, 0x00000000, 0xffffffff,
605 0x000008f8, 0x00000018, 0xffffffff,
606 0x000008fc, 0x00000000, 0xffffffff,
607 0x000008f8, 0x00000019, 0xffffffff,
608 0x000008fc, 0x00000000, 0xffffffff,
609 0x000008f8, 0x0000001a, 0xffffffff,
610 0x000008fc, 0x00000000, 0xffffffff,
611 0x000008f8, 0x0000001b, 0xffffffff,
612 0x000008fc, 0x00000000, 0xffffffff
613};
614#define CAYMAN_MGCG_DEFAULT_LENGTH sizeof(cayman_mgcg_default) / (3 * sizeof(u32))
615
616static const u32 cayman_mgcg_disable[] =
617{
618 0x0000802c, 0xc0000000, 0xffffffff,
619 0x000008f8, 0x00000000, 0xffffffff,
620 0x000008fc, 0xffffffff, 0xffffffff,
621 0x000008f8, 0x00000001, 0xffffffff,
622 0x000008fc, 0xffffffff, 0xffffffff,
623 0x000008f8, 0x00000002, 0xffffffff,
624 0x000008fc, 0xffffffff, 0xffffffff,
625 0x000008f8, 0x00000003, 0xffffffff,
626 0x000008fc, 0xffffffff, 0xffffffff,
627 0x00009150, 0x00600000, 0xffffffff
628};
629#define CAYMAN_MGCG_DISABLE_LENGTH sizeof(cayman_mgcg_disable) / (3 * sizeof(u32))
630
631static const u32 cayman_mgcg_enable[] =
632{
633 0x0000802c, 0xc0000000, 0xffffffff,
634 0x000008f8, 0x00000000, 0xffffffff,
635 0x000008fc, 0x00000000, 0xffffffff,
636 0x000008f8, 0x00000001, 0xffffffff,
637 0x000008fc, 0x00000000, 0xffffffff,
638 0x000008f8, 0x00000002, 0xffffffff,
639 0x000008fc, 0x00600000, 0xffffffff,
640 0x000008f8, 0x00000003, 0xffffffff,
641 0x000008fc, 0x00000000, 0xffffffff,
642 0x00009150, 0x96944200, 0xffffffff
643};
644
645#define CAYMAN_MGCG_ENABLE_LENGTH sizeof(cayman_mgcg_enable) / (3 * sizeof(u32))
646
647#define NISLANDS_SYSLS_SEQUENCE 100
648
649static const u32 cayman_sysls_default[] =
650{
651 /* Register, Value, Mask bits */
652 0x000055e8, 0x00000000, 0xffffffff,
653 0x0000d0bc, 0x00000000, 0xffffffff,
654 0x0000d8bc, 0x00000000, 0xffffffff,
655 0x000015c0, 0x000c1401, 0xffffffff,
656 0x0000264c, 0x000c0400, 0xffffffff,
657 0x00002648, 0x000c0400, 0xffffffff,
658 0x00002650, 0x000c0400, 0xffffffff,
659 0x000020b8, 0x000c0400, 0xffffffff,
660 0x000020bc, 0x000c0400, 0xffffffff,
661 0x000020c0, 0x000c0c80, 0xffffffff,
662 0x0000f4a0, 0x000000c0, 0xffffffff,
663 0x0000f4a4, 0x00680fff, 0xffffffff,
664 0x00002f50, 0x00000404, 0xffffffff,
665 0x000004c8, 0x00000001, 0xffffffff,
666 0x000064ec, 0x00000000, 0xffffffff,
667 0x00000c7c, 0x00000000, 0xffffffff,
668 0x00008dfc, 0x00000000, 0xffffffff
669};
670#define CAYMAN_SYSLS_DEFAULT_LENGTH sizeof(cayman_sysls_default) / (3 * sizeof(u32))
671
672static const u32 cayman_sysls_disable[] =
673{
674 /* Register, Value, Mask bits */
675 0x0000d0c0, 0x00000000, 0xffffffff,
676 0x0000d8c0, 0x00000000, 0xffffffff,
677 0x000055e8, 0x00000000, 0xffffffff,
678 0x0000d0bc, 0x00000000, 0xffffffff,
679 0x0000d8bc, 0x00000000, 0xffffffff,
680 0x000015c0, 0x00041401, 0xffffffff,
681 0x0000264c, 0x00040400, 0xffffffff,
682 0x00002648, 0x00040400, 0xffffffff,
683 0x00002650, 0x00040400, 0xffffffff,
684 0x000020b8, 0x00040400, 0xffffffff,
685 0x000020bc, 0x00040400, 0xffffffff,
686 0x000020c0, 0x00040c80, 0xffffffff,
687 0x0000f4a0, 0x000000c0, 0xffffffff,
688 0x0000f4a4, 0x00680000, 0xffffffff,
689 0x00002f50, 0x00000404, 0xffffffff,
690 0x000004c8, 0x00000001, 0xffffffff,
691 0x000064ec, 0x00007ffd, 0xffffffff,
692 0x00000c7c, 0x0000ff00, 0xffffffff,
693 0x00008dfc, 0x0000007f, 0xffffffff
694};
695#define CAYMAN_SYSLS_DISABLE_LENGTH sizeof(cayman_sysls_disable) / (3 * sizeof(u32))
696
697static const u32 cayman_sysls_enable[] =
698{
699 /* Register, Value, Mask bits */
700 0x000055e8, 0x00000001, 0xffffffff,
701 0x0000d0bc, 0x00000100, 0xffffffff,
702 0x0000d8bc, 0x00000100, 0xffffffff,
703 0x000015c0, 0x000c1401, 0xffffffff,
704 0x0000264c, 0x000c0400, 0xffffffff,
705 0x00002648, 0x000c0400, 0xffffffff,
706 0x00002650, 0x000c0400, 0xffffffff,
707 0x000020b8, 0x000c0400, 0xffffffff,
708 0x000020bc, 0x000c0400, 0xffffffff,
709 0x000020c0, 0x000c0c80, 0xffffffff,
710 0x0000f4a0, 0x000000c0, 0xffffffff,
711 0x0000f4a4, 0x00680fff, 0xffffffff,
712 0x00002f50, 0x00000903, 0xffffffff,
713 0x000004c8, 0x00000000, 0xffffffff,
714 0x000064ec, 0x00000000, 0xffffffff,
715 0x00000c7c, 0x00000000, 0xffffffff,
716 0x00008dfc, 0x00000000, 0xffffffff
717};
718#define CAYMAN_SYSLS_ENABLE_LENGTH sizeof(cayman_sysls_enable) / (3 * sizeof(u32))
719
720struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
721struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
722
723struct ni_power_info *ni_get_pi(struct radeon_device *rdev)
724{
725 struct ni_power_info *pi = rdev->pm.dpm.priv;
726
727 return pi;
728}
729
730struct ni_ps *ni_get_ps(struct radeon_ps *rps)
731{
732 struct ni_ps *ps = rps->ps_priv;
733
734 return ps;
735}
736
737static void ni_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
738 u16 v, s32 t,
739 u32 ileakage,
740 u32 *leakage)
741{
742 s64 kt, kv, leakage_w, i_leakage, vddc, temperature;
743
744 i_leakage = div64_s64(drm_int2fixp(ileakage), 1000);
745 vddc = div64_s64(drm_int2fixp(v), 1000);
746 temperature = div64_s64(drm_int2fixp(t), 1000);
747
748 kt = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->at), 1000),
749 drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bt), 1000), temperature)));
750 kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 1000),
751 drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 1000), vddc)));
752
753 leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
754
755 *leakage = drm_fixp2int(leakage_w * 1000);
756}
757
758static void ni_calculate_leakage_for_v_and_t(struct radeon_device *rdev,
759 const struct ni_leakage_coeffients *coeff,
760 u16 v,
761 s32 t,
762 u32 i_leakage,
763 u32 *leakage)
764{
765 ni_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
766}
767
768static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
769 struct radeon_ps *rps)
770{
771 struct ni_ps *ps = ni_get_ps(rps);
772 struct radeon_clock_and_voltage_limits *max_limits;
773 bool disable_mclk_switching;
774 u32 mclk, sclk;
775 u16 vddc, vddci;
776 int i;
777
778 if (rdev->pm.dpm.new_active_crtc_count > 1)
779 disable_mclk_switching = true;
780 else
781 disable_mclk_switching = false;
782
783 if (rdev->pm.dpm.ac_power)
784 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
785 else
786 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
787
788 if (rdev->pm.dpm.ac_power == false) {
789 for (i = 0; i < ps->performance_level_count; i++) {
790 if (ps->performance_levels[i].mclk > max_limits->mclk)
791 ps->performance_levels[i].mclk = max_limits->mclk;
792 if (ps->performance_levels[i].sclk > max_limits->sclk)
793 ps->performance_levels[i].sclk = max_limits->sclk;
794 if (ps->performance_levels[i].vddc > max_limits->vddc)
795 ps->performance_levels[i].vddc = max_limits->vddc;
796 if (ps->performance_levels[i].vddci > max_limits->vddci)
797 ps->performance_levels[i].vddci = max_limits->vddci;
798 }
799 }
800
801 /* XXX validate the min clocks required for display */
802
803 if (disable_mclk_switching) {
804 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
805 sclk = ps->performance_levels[0].sclk;
806 vddc = ps->performance_levels[0].vddc;
807 vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
808 } else {
809 sclk = ps->performance_levels[0].sclk;
810 mclk = ps->performance_levels[0].mclk;
811 vddc = ps->performance_levels[0].vddc;
812 vddci = ps->performance_levels[0].vddci;
813 }
814
815 /* adjusted low state */
816 ps->performance_levels[0].sclk = sclk;
817 ps->performance_levels[0].mclk = mclk;
818 ps->performance_levels[0].vddc = vddc;
819 ps->performance_levels[0].vddci = vddci;
820
821 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
822 &ps->performance_levels[0].sclk,
823 &ps->performance_levels[0].mclk);
824
825 for (i = 1; i < ps->performance_level_count; i++) {
826 if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
827 ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
828 if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
829 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
830 }
831
832 if (disable_mclk_switching) {
833 mclk = ps->performance_levels[0].mclk;
834 for (i = 1; i < ps->performance_level_count; i++) {
835 if (mclk < ps->performance_levels[i].mclk)
836 mclk = ps->performance_levels[i].mclk;
837 }
838 for (i = 0; i < ps->performance_level_count; i++) {
839 ps->performance_levels[i].mclk = mclk;
840 ps->performance_levels[i].vddci = vddci;
841 }
842 } else {
843 for (i = 1; i < ps->performance_level_count; i++) {
844 if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
845 ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
846 if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
847 ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
848 }
849 }
850
851 for (i = 1; i < ps->performance_level_count; i++)
852 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
853 &ps->performance_levels[i].sclk,
854 &ps->performance_levels[i].mclk);
855
856 for (i = 0; i < ps->performance_level_count; i++)
857 btc_adjust_clock_combinations(rdev, max_limits,
858 &ps->performance_levels[i]);
859
860 for (i = 0; i < ps->performance_level_count; i++) {
861 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
862 ps->performance_levels[i].sclk,
863 max_limits->vddc, &ps->performance_levels[i].vddc);
864 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
865 ps->performance_levels[i].mclk,
866 max_limits->vddci, &ps->performance_levels[i].vddci);
867 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
868 ps->performance_levels[i].mclk,
869 max_limits->vddc, &ps->performance_levels[i].vddc);
870 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
871 rdev->clock.current_dispclk,
872 max_limits->vddc, &ps->performance_levels[i].vddc);
873 }
874
875 for (i = 0; i < ps->performance_level_count; i++) {
876 btc_apply_voltage_delta_rules(rdev,
877 max_limits->vddc, max_limits->vddci,
878 &ps->performance_levels[i].vddc,
879 &ps->performance_levels[i].vddci);
880 }
881
882 ps->dc_compatible = true;
883 for (i = 0; i < ps->performance_level_count; i++) {
884 if (ps->performance_levels[i].vddc > rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
885 ps->dc_compatible = false;
886
887 if (ps->performance_levels[i].vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
888 ps->performance_levels[i].flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
889 }
890}
891
892static void ni_cg_clockgating_default(struct radeon_device *rdev)
893{
894 u32 count;
895 const u32 *ps = NULL;
896
897 ps = (const u32 *)&cayman_cgcg_cgls_default;
898 count = CAYMAN_CGCG_CGLS_DEFAULT_LENGTH;
899
900 btc_program_mgcg_hw_sequence(rdev, ps, count);
901}
902
903static void ni_gfx_clockgating_enable(struct radeon_device *rdev,
904 bool enable)
905{
906 u32 count;
907 const u32 *ps = NULL;
908
909 if (enable) {
910 ps = (const u32 *)&cayman_cgcg_cgls_enable;
911 count = CAYMAN_CGCG_CGLS_ENABLE_LENGTH;
912 } else {
913 ps = (const u32 *)&cayman_cgcg_cgls_disable;
914 count = CAYMAN_CGCG_CGLS_DISABLE_LENGTH;
915 }
916
917 btc_program_mgcg_hw_sequence(rdev, ps, count);
918}
919
920static void ni_mg_clockgating_default(struct radeon_device *rdev)
921{
922 u32 count;
923 const u32 *ps = NULL;
924
925 ps = (const u32 *)&cayman_mgcg_default;
926 count = CAYMAN_MGCG_DEFAULT_LENGTH;
927
928 btc_program_mgcg_hw_sequence(rdev, ps, count);
929}
930
931static void ni_mg_clockgating_enable(struct radeon_device *rdev,
932 bool enable)
933{
934 u32 count;
935 const u32 *ps = NULL;
936
937 if (enable) {
938 ps = (const u32 *)&cayman_mgcg_enable;
939 count = CAYMAN_MGCG_ENABLE_LENGTH;
940 } else {
941 ps = (const u32 *)&cayman_mgcg_disable;
942 count = CAYMAN_MGCG_DISABLE_LENGTH;
943 }
944
945 btc_program_mgcg_hw_sequence(rdev, ps, count);
946}
947
948static void ni_ls_clockgating_default(struct radeon_device *rdev)
949{
950 u32 count;
951 const u32 *ps = NULL;
952
953 ps = (const u32 *)&cayman_sysls_default;
954 count = CAYMAN_SYSLS_DEFAULT_LENGTH;
955
956 btc_program_mgcg_hw_sequence(rdev, ps, count);
957}
958
959static void ni_ls_clockgating_enable(struct radeon_device *rdev,
960 bool enable)
961{
962 u32 count;
963 const u32 *ps = NULL;
964
965 if (enable) {
966 ps = (const u32 *)&cayman_sysls_enable;
967 count = CAYMAN_SYSLS_ENABLE_LENGTH;
968 } else {
969 ps = (const u32 *)&cayman_sysls_disable;
970 count = CAYMAN_SYSLS_DISABLE_LENGTH;
971 }
972
973 btc_program_mgcg_hw_sequence(rdev, ps, count);
974
975}
976
977static int ni_patch_single_dependency_table_based_on_leakage(struct radeon_device *rdev,
978 struct radeon_clock_voltage_dependency_table *table)
979{
980 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
981 u32 i;
982
983 if (table) {
984 for (i = 0; i < table->count; i++) {
985 if (0xff01 == table->entries[i].v) {
986 if (pi->max_vddc == 0)
987 return -EINVAL;
988 table->entries[i].v = pi->max_vddc;
989 }
990 }
991 }
992 return 0;
993}
994
995static int ni_patch_dependency_tables_based_on_leakage(struct radeon_device *rdev)
996{
997 int ret = 0;
998
999 ret = ni_patch_single_dependency_table_based_on_leakage(rdev,
1000 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
1001
1002 ret = ni_patch_single_dependency_table_based_on_leakage(rdev,
1003 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
1004 return ret;
1005}
1006
1007static void ni_stop_dpm(struct radeon_device *rdev)
1008{
1009 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
1010}
1011
1012#if 0
1013static int ni_notify_hw_of_power_source(struct radeon_device *rdev,
1014 bool ac_power)
1015{
1016 if (ac_power)
1017 return (rv770_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
1018 0 : -EINVAL;
1019
1020 return 0;
1021}
1022#endif
1023
1024static PPSMC_Result ni_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1025 PPSMC_Msg msg, u32 parameter)
1026{
1027 WREG32(SMC_SCRATCH0, parameter);
1028 return rv770_send_msg_to_smc(rdev, msg);
1029}
1030
1031static int ni_restrict_performance_levels_before_switch(struct radeon_device *rdev)
1032{
1033 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
1034 return -EINVAL;
1035
1036 return (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
1037 0 : -EINVAL;
1038}
1039
1040static int ni_unrestrict_performance_levels_after_switch(struct radeon_device *rdev)
1041{
1042 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
1043 return -EINVAL;
1044
1045 return (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) == PPSMC_Result_OK) ?
1046 0 : -EINVAL;
1047}
1048
1049static void ni_stop_smc(struct radeon_device *rdev)
1050{
1051 u32 tmp;
1052 int i;
1053
1054 for (i = 0; i < rdev->usec_timeout; i++) {
1055 tmp = RREG32(LB_SYNC_RESET_SEL) & LB_SYNC_RESET_SEL_MASK;
1056 if (tmp != 1)
1057 break;
1058 udelay(1);
1059 }
1060
1061 udelay(100);
1062
1063 r7xx_stop_smc(rdev);
1064}
1065
1066static int ni_process_firmware_header(struct radeon_device *rdev)
1067{
1068 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1069 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1070 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1071 u32 tmp;
1072 int ret;
1073
1074 ret = rv770_read_smc_sram_dword(rdev,
1075 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1076 NISLANDS_SMC_FIRMWARE_HEADER_stateTable,
1077 &tmp, pi->sram_end);
1078
1079 if (ret)
1080 return ret;
1081
1082 pi->state_table_start = (u16)tmp;
1083
1084 ret = rv770_read_smc_sram_dword(rdev,
1085 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1086 NISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
1087 &tmp, pi->sram_end);
1088
1089 if (ret)
1090 return ret;
1091
1092 pi->soft_regs_start = (u16)tmp;
1093
1094 ret = rv770_read_smc_sram_dword(rdev,
1095 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1096 NISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
1097 &tmp, pi->sram_end);
1098
1099 if (ret)
1100 return ret;
1101
1102 eg_pi->mc_reg_table_start = (u16)tmp;
1103
1104 ret = rv770_read_smc_sram_dword(rdev,
1105 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1106 NISLANDS_SMC_FIRMWARE_HEADER_fanTable,
1107 &tmp, pi->sram_end);
1108
1109 if (ret)
1110 return ret;
1111
1112 ni_pi->fan_table_start = (u16)tmp;
1113
1114 ret = rv770_read_smc_sram_dword(rdev,
1115 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1116 NISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
1117 &tmp, pi->sram_end);
1118
1119 if (ret)
1120 return ret;
1121
1122 ni_pi->arb_table_start = (u16)tmp;
1123
1124 ret = rv770_read_smc_sram_dword(rdev,
1125 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1126 NISLANDS_SMC_FIRMWARE_HEADER_cacTable,
1127 &tmp, pi->sram_end);
1128
1129 if (ret)
1130 return ret;
1131
1132 ni_pi->cac_table_start = (u16)tmp;
1133
1134 ret = rv770_read_smc_sram_dword(rdev,
1135 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1136 NISLANDS_SMC_FIRMWARE_HEADER_spllTable,
1137 &tmp, pi->sram_end);
1138
1139 if (ret)
1140 return ret;
1141
1142 ni_pi->spll_table_start = (u16)tmp;
1143
1144
1145 return ret;
1146}
1147
1148static void ni_read_clock_registers(struct radeon_device *rdev)
1149{
1150 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1151
1152 ni_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
1153 ni_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
1154 ni_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
1155 ni_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
1156 ni_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
1157 ni_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
1158 ni_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1159 ni_pi->clock_registers.mpll_ad_func_cntl_2 = RREG32(MPLL_AD_FUNC_CNTL_2);
1160 ni_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1161 ni_pi->clock_registers.mpll_dq_func_cntl_2 = RREG32(MPLL_DQ_FUNC_CNTL_2);
1162 ni_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1163 ni_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1164 ni_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1165 ni_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1166}
1167
1168#if 0
1169static int ni_enter_ulp_state(struct radeon_device *rdev)
1170{
1171 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1172
1173 if (pi->gfx_clock_gating) {
1174 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
1175 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
1176 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
1177 RREG32(GB_ADDR_CONFIG);
1178 }
1179
1180 WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
1181 ~HOST_SMC_MSG_MASK);
1182
1183 udelay(25000);
1184
1185 return 0;
1186}
1187#endif
1188
1189static void ni_program_response_times(struct radeon_device *rdev)
1190{
1191 u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
1192 u32 vddc_dly, bb_dly, acpi_dly, vbi_dly, mclk_switch_limit;
1193 u32 reference_clock;
1194
1195 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
1196
1197 voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
1198 backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
1199
1200 if (voltage_response_time == 0)
1201 voltage_response_time = 1000;
1202
1203 if (backbias_response_time == 0)
1204 backbias_response_time = 1000;
1205
1206 acpi_delay_time = 15000;
1207 vbi_time_out = 100000;
1208
1209 reference_clock = radeon_get_xclk(rdev);
1210
1211 vddc_dly = (voltage_response_time * reference_clock) / 1600;
1212 bb_dly = (backbias_response_time * reference_clock) / 1600;
1213 acpi_dly = (acpi_delay_time * reference_clock) / 1600;
1214 vbi_dly = (vbi_time_out * reference_clock) / 1600;
1215
1216 mclk_switch_limit = (460 * reference_clock) / 100;
1217
1218 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
1219 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_bbias, bb_dly);
1220 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
1221 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
1222 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
1223 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mclk_switch_lim, mclk_switch_limit);
1224}
1225
1226static void ni_populate_smc_voltage_table(struct radeon_device *rdev,
1227 struct atom_voltage_table *voltage_table,
1228 NISLANDS_SMC_STATETABLE *table)
1229{
1230 unsigned int i;
1231
1232 for (i = 0; i < voltage_table->count; i++) {
1233 table->highSMIO[i] = 0;
1234 table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
1235 }
1236}
1237
1238static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
1239 NISLANDS_SMC_STATETABLE *table)
1240{
1241 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1242 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1243 unsigned char i;
1244
1245 if (eg_pi->vddc_voltage_table.count) {
1246 ni_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
1247 table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] = 0;
1248 table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] =
1249 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
1250
1251 for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
1252 if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
1253 table->maxVDDCIndexInPPTable = i;
1254 break;
1255 }
1256 }
1257 }
1258
1259 if (eg_pi->vddci_voltage_table.count) {
1260 ni_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
1261
1262 table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
1263 table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
1264 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
1265 }
1266}
1267
1268static int ni_populate_voltage_value(struct radeon_device *rdev,
1269 struct atom_voltage_table *table,
1270 u16 value,
1271 NISLANDS_SMC_VOLTAGE_VALUE *voltage)
1272{
1273 unsigned int i;
1274
1275 for (i = 0; i < table->count; i++) {
1276 if (value <= table->entries[i].value) {
1277 voltage->index = (u8)i;
1278 voltage->value = cpu_to_be16(table->entries[i].value);
1279 break;
1280 }
1281 }
1282
1283 if (i >= table->count)
1284 return -EINVAL;
1285
1286 return 0;
1287}
1288
1289static void ni_populate_mvdd_value(struct radeon_device *rdev,
1290 u32 mclk,
1291 NISLANDS_SMC_VOLTAGE_VALUE *voltage)
1292{
1293 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1294 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1295
1296 if (!pi->mvdd_control) {
1297 voltage->index = eg_pi->mvdd_high_index;
1298 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1299 return;
1300 }
1301
1302 if (mclk <= pi->mvdd_split_frequency) {
1303 voltage->index = eg_pi->mvdd_low_index;
1304 voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
1305 } else {
1306 voltage->index = eg_pi->mvdd_high_index;
1307 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1308 }
1309}
1310
1311static int ni_get_std_voltage_value(struct radeon_device *rdev,
1312 NISLANDS_SMC_VOLTAGE_VALUE *voltage,
1313 u16 *std_voltage)
1314{
1315 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries &&
1316 ((u32)voltage->index < rdev->pm.dpm.dyn_state.cac_leakage_table.count))
1317 *std_voltage = rdev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
1318 else
1319 *std_voltage = be16_to_cpu(voltage->value);
1320
1321 return 0;
1322}
1323
1324static void ni_populate_std_voltage_value(struct radeon_device *rdev,
1325 u16 value, u8 index,
1326 NISLANDS_SMC_VOLTAGE_VALUE *voltage)
1327{
1328 voltage->index = index;
1329 voltage->value = cpu_to_be16(value);
1330}
1331
1332static u32 ni_get_smc_power_scaling_factor(struct radeon_device *rdev)
1333{
1334 u32 xclk_period;
1335 u32 xclk = radeon_get_xclk(rdev);
1336 u32 tmp = RREG32(CG_CAC_CTRL) & TID_CNT_MASK;
1337
1338 xclk_period = (1000000000UL / xclk);
1339 xclk_period /= 10000UL;
1340
1341 return tmp * xclk_period;
1342}
1343
1344static u32 ni_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
1345{
1346 return (power_in_watts * scaling_factor) << 2;
1347}
1348
1349static u32 ni_calculate_power_boost_limit(struct radeon_device *rdev,
1350 struct radeon_ps *radeon_state,
1351 u32 near_tdp_limit)
1352{
1353 struct ni_ps *state = ni_get_ps(radeon_state);
1354 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1355 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1356 u32 power_boost_limit = 0;
1357 int ret;
1358
1359 if (ni_pi->enable_power_containment &&
1360 ni_pi->use_power_boost_limit) {
1361 NISLANDS_SMC_VOLTAGE_VALUE vddc;
1362 u16 std_vddc_med;
1363 u16 std_vddc_high;
1364 u64 tmp, n, d;
1365
1366 if (state->performance_level_count < 3)
1367 return 0;
1368
1369 ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
1370 state->performance_levels[state->performance_level_count - 2].vddc,
1371 &vddc);
1372 if (ret)
1373 return 0;
1374
1375 ret = ni_get_std_voltage_value(rdev, &vddc, &std_vddc_med);
1376 if (ret)
1377 return 0;
1378
1379 ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
1380 state->performance_levels[state->performance_level_count - 1].vddc,
1381 &vddc);
1382 if (ret)
1383 return 0;
1384
1385 ret = ni_get_std_voltage_value(rdev, &vddc, &std_vddc_high);
1386 if (ret)
1387 return 0;
1388
1389 n = ((u64)near_tdp_limit * ((u64)std_vddc_med * (u64)std_vddc_med) * 90);
1390 d = ((u64)std_vddc_high * (u64)std_vddc_high * 100);
1391 tmp = div64_u64(n, d);
1392
1393 if (tmp >> 32)
1394 return 0;
1395 power_boost_limit = (u32)tmp;
1396 }
1397
1398 return power_boost_limit;
1399}
1400
1401static int ni_calculate_adjusted_tdp_limits(struct radeon_device *rdev,
1402 bool adjust_polarity,
1403 u32 tdp_adjustment,
1404 u32 *tdp_limit,
1405 u32 *near_tdp_limit)
1406{
1407 if (tdp_adjustment > (u32)rdev->pm.dpm.tdp_od_limit)
1408 return -EINVAL;
1409
1410 if (adjust_polarity) {
1411 *tdp_limit = ((100 + tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
1412 *near_tdp_limit = rdev->pm.dpm.near_tdp_limit + (*tdp_limit - rdev->pm.dpm.tdp_limit);
1413 } else {
1414 *tdp_limit = ((100 - tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
1415 *near_tdp_limit = rdev->pm.dpm.near_tdp_limit - (rdev->pm.dpm.tdp_limit - *tdp_limit);
1416 }
1417
1418 return 0;
1419}
1420
1421static int ni_populate_smc_tdp_limits(struct radeon_device *rdev,
1422 struct radeon_ps *radeon_state)
1423{
1424 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1425 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1426
1427 if (ni_pi->enable_power_containment) {
1428 NISLANDS_SMC_STATETABLE *smc_table = &ni_pi->smc_statetable;
1429 u32 scaling_factor = ni_get_smc_power_scaling_factor(rdev);
1430 u32 tdp_limit;
1431 u32 near_tdp_limit;
1432 u32 power_boost_limit;
1433 int ret;
1434
1435 if (scaling_factor == 0)
1436 return -EINVAL;
1437
1438 memset(smc_table, 0, sizeof(NISLANDS_SMC_STATETABLE));
1439
1440 ret = ni_calculate_adjusted_tdp_limits(rdev,
1441 false, /* ??? */
1442 rdev->pm.dpm.tdp_adjustment,
1443 &tdp_limit,
1444 &near_tdp_limit);
1445 if (ret)
1446 return ret;
1447
1448 power_boost_limit = ni_calculate_power_boost_limit(rdev, radeon_state,
1449 near_tdp_limit);
1450
1451 smc_table->dpm2Params.TDPLimit =
1452 cpu_to_be32(ni_scale_power_for_smc(tdp_limit, scaling_factor));
1453 smc_table->dpm2Params.NearTDPLimit =
1454 cpu_to_be32(ni_scale_power_for_smc(near_tdp_limit, scaling_factor));
1455 smc_table->dpm2Params.SafePowerLimit =
1456 cpu_to_be32(ni_scale_power_for_smc((near_tdp_limit * NISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100,
1457 scaling_factor));
1458 smc_table->dpm2Params.PowerBoostLimit =
1459 cpu_to_be32(ni_scale_power_for_smc(power_boost_limit, scaling_factor));
1460
1461 ret = rv770_copy_bytes_to_smc(rdev,
1462 (u16)(pi->state_table_start + offsetof(NISLANDS_SMC_STATETABLE, dpm2Params) +
1463 offsetof(PP_NIslands_DPM2Parameters, TDPLimit)),
1464 (u8 *)(&smc_table->dpm2Params.TDPLimit),
1465 sizeof(u32) * 4, pi->sram_end);
1466 if (ret)
1467 return ret;
1468 }
1469
1470 return 0;
1471}
1472
1473int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
1474 u32 arb_freq_src, u32 arb_freq_dest)
1475{
1476 u32 mc_arb_dram_timing;
1477 u32 mc_arb_dram_timing2;
1478 u32 burst_time;
1479 u32 mc_cg_config;
1480
1481 switch (arb_freq_src) {
1482 case MC_CG_ARB_FREQ_F0:
1483 mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
1484 mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
1485 burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
1486 break;
1487 case MC_CG_ARB_FREQ_F1:
1488 mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
1489 mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
1490 burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
1491 break;
1492 case MC_CG_ARB_FREQ_F2:
1493 mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
1494 mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
1495 burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
1496 break;
1497 case MC_CG_ARB_FREQ_F3:
1498 mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
1499 mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
1500 burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
1501 break;
1502 default:
1503 return -EINVAL;
1504 }
1505
1506 switch (arb_freq_dest) {
1507 case MC_CG_ARB_FREQ_F0:
1508 WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
1509 WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
1510 WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
1511 break;
1512 case MC_CG_ARB_FREQ_F1:
1513 WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
1514 WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
1515 WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
1516 break;
1517 case MC_CG_ARB_FREQ_F2:
1518 WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
1519 WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
1520 WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
1521 break;
1522 case MC_CG_ARB_FREQ_F3:
1523 WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
1524 WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
1525 WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
1526 break;
1527 default:
1528 return -EINVAL;
1529 }
1530
1531 mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
1532 WREG32(MC_CG_CONFIG, mc_cg_config);
1533 WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
1534
1535 return 0;
1536}
1537
1538static int ni_init_arb_table_index(struct radeon_device *rdev)
1539{
1540 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1541 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1542 u32 tmp;
1543 int ret;
1544
1545 ret = rv770_read_smc_sram_dword(rdev, ni_pi->arb_table_start,
1546 &tmp, pi->sram_end);
1547 if (ret)
1548 return ret;
1549
1550 tmp &= 0x00FFFFFF;
1551 tmp |= ((u32)MC_CG_ARB_FREQ_F1) << 24;
1552
1553 return rv770_write_smc_sram_dword(rdev, ni_pi->arb_table_start,
1554 tmp, pi->sram_end);
1555}
1556
1557static int ni_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
1558{
1559 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
1560}
1561
1562static int ni_force_switch_to_arb_f0(struct radeon_device *rdev)
1563{
1564 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1565 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1566 u32 tmp;
1567 int ret;
1568
1569 ret = rv770_read_smc_sram_dword(rdev, ni_pi->arb_table_start,
1570 &tmp, pi->sram_end);
1571 if (ret)
1572 return ret;
1573
1574 tmp = (tmp >> 24) & 0xff;
1575
1576 if (tmp == MC_CG_ARB_FREQ_F0)
1577 return 0;
1578
1579 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
1580}
1581
1582static int ni_populate_memory_timing_parameters(struct radeon_device *rdev,
1583 struct rv7xx_pl *pl,
1584 SMC_NIslands_MCArbDramTimingRegisterSet *arb_regs)
1585{
1586 u32 dram_timing;
1587 u32 dram_timing2;
1588
1589 arb_regs->mc_arb_rfsh_rate =
1590 (u8)rv770_calculate_memory_refresh_rate(rdev, pl->sclk);
1591
1592
1593 radeon_atom_set_engine_dram_timings(rdev,
1594 pl->sclk,
1595 pl->mclk);
1596
1597 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
1598 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
1599
1600 arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing);
1601 arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
1602
1603 return 0;
1604}
1605
1606static int ni_do_program_memory_timing_parameters(struct radeon_device *rdev,
1607 struct radeon_ps *radeon_state,
1608 unsigned int first_arb_set)
1609{
1610 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1611 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1612 struct ni_ps *state = ni_get_ps(radeon_state);
1613 SMC_NIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
1614 int i, ret = 0;
1615
1616 for (i = 0; i < state->performance_level_count; i++) {
1617 ret = ni_populate_memory_timing_parameters(rdev, &state->performance_levels[i], &arb_regs);
1618 if (ret)
1619 break;
1620
1621 ret = rv770_copy_bytes_to_smc(rdev,
1622 (u16)(ni_pi->arb_table_start +
1623 offsetof(SMC_NIslands_MCArbDramTimingRegisters, data) +
1624 sizeof(SMC_NIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i)),
1625 (u8 *)&arb_regs,
1626 (u16)sizeof(SMC_NIslands_MCArbDramTimingRegisterSet),
1627 pi->sram_end);
1628 if (ret)
1629 break;
1630 }
1631 return ret;
1632}
1633
1634static int ni_program_memory_timing_parameters(struct radeon_device *rdev,
1635 struct radeon_ps *radeon_new_state)
1636{
1637 return ni_do_program_memory_timing_parameters(rdev, radeon_new_state,
1638 NISLANDS_DRIVER_STATE_ARB_INDEX);
1639}
1640
1641static void ni_populate_initial_mvdd_value(struct radeon_device *rdev,
1642 struct NISLANDS_SMC_VOLTAGE_VALUE *voltage)
1643{
1644 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1645
1646 voltage->index = eg_pi->mvdd_high_index;
1647 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1648}
1649
1650static int ni_populate_smc_initial_state(struct radeon_device *rdev,
1651 struct radeon_ps *radeon_initial_state,
1652 NISLANDS_SMC_STATETABLE *table)
1653{
1654 struct ni_ps *initial_state = ni_get_ps(radeon_initial_state);
1655 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1656 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1657 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1658 u32 reg;
1659 int ret;
1660
1661 table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
1662 cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl);
1663 table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 =
1664 cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl_2);
1665 table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
1666 cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl);
1667 table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 =
1668 cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl_2);
1669 table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
1670 cpu_to_be32(ni_pi->clock_registers.mclk_pwrmgt_cntl);
1671 table->initialState.levels[0].mclk.vDLL_CNTL =
1672 cpu_to_be32(ni_pi->clock_registers.dll_cntl);
1673 table->initialState.levels[0].mclk.vMPLL_SS =
1674 cpu_to_be32(ni_pi->clock_registers.mpll_ss1);
1675 table->initialState.levels[0].mclk.vMPLL_SS2 =
1676 cpu_to_be32(ni_pi->clock_registers.mpll_ss2);
1677 table->initialState.levels[0].mclk.mclk_value =
1678 cpu_to_be32(initial_state->performance_levels[0].mclk);
1679
1680 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
1681 cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl);
1682 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
1683 cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_2);
1684 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
1685 cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_3);
1686 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
1687 cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_4);
1688 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
1689 cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum);
1690 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
1691 cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum_2);
1692 table->initialState.levels[0].sclk.sclk_value =
1693 cpu_to_be32(initial_state->performance_levels[0].sclk);
1694 table->initialState.levels[0].arbRefreshState =
1695 NISLANDS_INITIAL_STATE_ARB_INDEX;
1696
1697 table->initialState.levels[0].ACIndex = 0;
1698
1699 ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
1700 initial_state->performance_levels[0].vddc,
1701 &table->initialState.levels[0].vddc);
1702 if (!ret) {
1703 u16 std_vddc;
1704
1705 ret = ni_get_std_voltage_value(rdev,
1706 &table->initialState.levels[0].vddc,
1707 &std_vddc);
1708 if (!ret)
1709 ni_populate_std_voltage_value(rdev, std_vddc,
1710 table->initialState.levels[0].vddc.index,
1711 &table->initialState.levels[0].std_vddc);
1712 }
1713
1714 if (eg_pi->vddci_control)
1715 ni_populate_voltage_value(rdev,
1716 &eg_pi->vddci_voltage_table,
1717 initial_state->performance_levels[0].vddci,
1718 &table->initialState.levels[0].vddci);
1719
1720 ni_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
1721
1722 reg = CG_R(0xffff) | CG_L(0);
1723 table->initialState.levels[0].aT = cpu_to_be32(reg);
1724
1725 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
1726
1727 if (pi->boot_in_gen2)
1728 table->initialState.levels[0].gen2PCIE = 1;
1729 else
1730 table->initialState.levels[0].gen2PCIE = 0;
1731
1732 if (pi->mem_gddr5) {
1733 table->initialState.levels[0].strobeMode =
1734 cypress_get_strobe_mode_settings(rdev,
1735 initial_state->performance_levels[0].mclk);
1736
1737 if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
1738 table->initialState.levels[0].mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG;
1739 else
1740 table->initialState.levels[0].mcFlags = 0;
1741 }
1742
1743 table->initialState.levelCount = 1;
1744
1745 table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
1746
1747 table->initialState.levels[0].dpm2.MaxPS = 0;
1748 table->initialState.levels[0].dpm2.NearTDPDec = 0;
1749 table->initialState.levels[0].dpm2.AboveSafeInc = 0;
1750 table->initialState.levels[0].dpm2.BelowSafeInc = 0;
1751
1752 reg = MIN_POWER_MASK | MAX_POWER_MASK;
1753 table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
1754
1755 reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
1756 table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
1757
1758 return 0;
1759}
1760
1761static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
1762 NISLANDS_SMC_STATETABLE *table)
1763{
1764 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1765 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1766 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1767 u32 mpll_ad_func_cntl = ni_pi->clock_registers.mpll_ad_func_cntl;
1768 u32 mpll_ad_func_cntl_2 = ni_pi->clock_registers.mpll_ad_func_cntl_2;
1769 u32 mpll_dq_func_cntl = ni_pi->clock_registers.mpll_dq_func_cntl;
1770 u32 mpll_dq_func_cntl_2 = ni_pi->clock_registers.mpll_dq_func_cntl_2;
1771 u32 spll_func_cntl = ni_pi->clock_registers.cg_spll_func_cntl;
1772 u32 spll_func_cntl_2 = ni_pi->clock_registers.cg_spll_func_cntl_2;
1773 u32 spll_func_cntl_3 = ni_pi->clock_registers.cg_spll_func_cntl_3;
1774 u32 spll_func_cntl_4 = ni_pi->clock_registers.cg_spll_func_cntl_4;
1775 u32 mclk_pwrmgt_cntl = ni_pi->clock_registers.mclk_pwrmgt_cntl;
1776 u32 dll_cntl = ni_pi->clock_registers.dll_cntl;
1777 u32 reg;
1778 int ret;
1779
1780 table->ACPIState = table->initialState;
1781
1782 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
1783
1784 if (pi->acpi_vddc) {
1785 ret = ni_populate_voltage_value(rdev,
1786 &eg_pi->vddc_voltage_table,
1787 pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
1788 if (!ret) {
1789 u16 std_vddc;
1790
1791 ret = ni_get_std_voltage_value(rdev,
1792 &table->ACPIState.levels[0].vddc, &std_vddc);
1793 if (!ret)
1794 ni_populate_std_voltage_value(rdev, std_vddc,
1795 table->ACPIState.levels[0].vddc.index,
1796 &table->ACPIState.levels[0].std_vddc);
1797 }
1798
1799 if (pi->pcie_gen2) {
1800 if (pi->acpi_pcie_gen2)
1801 table->ACPIState.levels[0].gen2PCIE = 1;
1802 else
1803 table->ACPIState.levels[0].gen2PCIE = 0;
1804 } else {
1805 table->ACPIState.levels[0].gen2PCIE = 0;
1806 }
1807 } else {
1808 ret = ni_populate_voltage_value(rdev,
1809 &eg_pi->vddc_voltage_table,
1810 pi->min_vddc_in_table,
1811 &table->ACPIState.levels[0].vddc);
1812 if (!ret) {
1813 u16 std_vddc;
1814
1815 ret = ni_get_std_voltage_value(rdev,
1816 &table->ACPIState.levels[0].vddc,
1817 &std_vddc);
1818 if (!ret)
1819 ni_populate_std_voltage_value(rdev, std_vddc,
1820 table->ACPIState.levels[0].vddc.index,
1821 &table->ACPIState.levels[0].std_vddc);
1822 }
1823 table->ACPIState.levels[0].gen2PCIE = 0;
1824 }
1825
1826 if (eg_pi->acpi_vddci) {
1827 if (eg_pi->vddci_control)
1828 ni_populate_voltage_value(rdev,
1829 &eg_pi->vddci_voltage_table,
1830 eg_pi->acpi_vddci,
1831 &table->ACPIState.levels[0].vddci);
1832 }
1833
1834
1835 mpll_ad_func_cntl &= ~PDNB;
1836
1837 mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
1838
1839 if (pi->mem_gddr5)
1840 mpll_dq_func_cntl &= ~PDNB;
1841 mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
1842
1843
1844 mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
1845 MRDCKA1_RESET |
1846 MRDCKB0_RESET |
1847 MRDCKB1_RESET |
1848 MRDCKC0_RESET |
1849 MRDCKC1_RESET |
1850 MRDCKD0_RESET |
1851 MRDCKD1_RESET);
1852
1853 mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
1854 MRDCKA1_PDNB |
1855 MRDCKB0_PDNB |
1856 MRDCKB1_PDNB |
1857 MRDCKC0_PDNB |
1858 MRDCKC1_PDNB |
1859 MRDCKD0_PDNB |
1860 MRDCKD1_PDNB);
1861
1862 dll_cntl |= (MRDCKA0_BYPASS |
1863 MRDCKA1_BYPASS |
1864 MRDCKB0_BYPASS |
1865 MRDCKB1_BYPASS |
1866 MRDCKC0_BYPASS |
1867 MRDCKC1_BYPASS |
1868 MRDCKD0_BYPASS |
1869 MRDCKD1_BYPASS);
1870
1871 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
1872 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
1873
1874 table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
1875 table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
1876 table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
1877 table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
1878 table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
1879 table->ACPIState.levels[0].mclk.vDLL_CNTL = cpu_to_be32(dll_cntl);
1880
1881 table->ACPIState.levels[0].mclk.mclk_value = 0;
1882
1883 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
1884 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
1885 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
1886 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4);
1887
1888 table->ACPIState.levels[0].sclk.sclk_value = 0;
1889
1890 ni_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
1891
1892 if (eg_pi->dynamic_ac_timing)
1893 table->ACPIState.levels[0].ACIndex = 1;
1894
1895 table->ACPIState.levels[0].dpm2.MaxPS = 0;
1896 table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
1897 table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
1898 table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
1899
1900 reg = MIN_POWER_MASK | MAX_POWER_MASK;
1901 table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
1902
1903 reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
1904 table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
1905
1906 return 0;
1907}
1908
1909static int ni_init_smc_table(struct radeon_device *rdev)
1910{
1911 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1912 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1913 int ret;
1914 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
1915 NISLANDS_SMC_STATETABLE *table = &ni_pi->smc_statetable;
1916
1917 memset(table, 0, sizeof(NISLANDS_SMC_STATETABLE));
1918
1919 ni_populate_smc_voltage_tables(rdev, table);
1920
1921 switch (rdev->pm.int_thermal_type) {
1922 case THERMAL_TYPE_NI:
1923 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
1924 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
1925 break;
1926 case THERMAL_TYPE_NONE:
1927 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
1928 break;
1929 default:
1930 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
1931 break;
1932 }
1933
1934 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
1935 table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1936
1937 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1938 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
1939
1940 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1941 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1942
1943 if (pi->mem_gddr5)
1944 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1945
1946 ret = ni_populate_smc_initial_state(rdev, radeon_boot_state, table);
1947 if (ret)
1948 return ret;
1949
1950 ret = ni_populate_smc_acpi_state(rdev, table);
1951 if (ret)
1952 return ret;
1953
1954 table->driverState = table->initialState;
1955
1956 table->ULVState = table->initialState;
1957
1958 ret = ni_do_program_memory_timing_parameters(rdev, radeon_boot_state,
1959 NISLANDS_INITIAL_STATE_ARB_INDEX);
1960 if (ret)
1961 return ret;
1962
1963 return rv770_copy_bytes_to_smc(rdev, pi->state_table_start, (u8 *)table,
1964 sizeof(NISLANDS_SMC_STATETABLE), pi->sram_end);
1965}
1966
1967static int ni_calculate_sclk_params(struct radeon_device *rdev,
1968 u32 engine_clock,
1969 NISLANDS_SMC_SCLK_VALUE *sclk)
1970{
1971 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1972 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1973 struct atom_clock_dividers dividers;
1974 u32 spll_func_cntl = ni_pi->clock_registers.cg_spll_func_cntl;
1975 u32 spll_func_cntl_2 = ni_pi->clock_registers.cg_spll_func_cntl_2;
1976 u32 spll_func_cntl_3 = ni_pi->clock_registers.cg_spll_func_cntl_3;
1977 u32 spll_func_cntl_4 = ni_pi->clock_registers.cg_spll_func_cntl_4;
1978 u32 cg_spll_spread_spectrum = ni_pi->clock_registers.cg_spll_spread_spectrum;
1979 u32 cg_spll_spread_spectrum_2 = ni_pi->clock_registers.cg_spll_spread_spectrum_2;
1980 u64 tmp;
1981 u32 reference_clock = rdev->clock.spll.reference_freq;
1982 u32 reference_divider;
1983 u32 fbdiv;
1984 int ret;
1985
1986 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1987 engine_clock, false, &dividers);
1988 if (ret)
1989 return ret;
1990
1991 reference_divider = 1 + dividers.ref_div;
1992
1993
1994 tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16834;
1995 do_div(tmp, reference_clock);
1996 fbdiv = (u32) tmp;
1997
1998 spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
1999 spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
2000 spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
2001
2002 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2003 spll_func_cntl_2 |= SCLK_MUX_SEL(2);
2004
2005 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2006 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2007 spll_func_cntl_3 |= SPLL_DITHEN;
2008
2009 if (pi->sclk_ss) {
2010 struct radeon_atom_ss ss;
2011 u32 vco_freq = engine_clock * dividers.post_div;
2012
2013 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2014 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2015 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2016 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2017
2018 cg_spll_spread_spectrum &= ~CLK_S_MASK;
2019 cg_spll_spread_spectrum |= CLK_S(clk_s);
2020 cg_spll_spread_spectrum |= SSEN;
2021
2022 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2023 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2024 }
2025 }
2026
2027 sclk->sclk_value = engine_clock;
2028 sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
2029 sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
2030 sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
2031 sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
2032 sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
2033 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
2034
2035 return 0;
2036}
2037
2038static int ni_populate_sclk_value(struct radeon_device *rdev,
2039 u32 engine_clock,
2040 NISLANDS_SMC_SCLK_VALUE *sclk)
2041{
2042 NISLANDS_SMC_SCLK_VALUE sclk_tmp;
2043 int ret;
2044
2045 ret = ni_calculate_sclk_params(rdev, engine_clock, &sclk_tmp);
2046 if (!ret) {
2047 sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
2048 sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
2049 sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
2050 sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
2051 sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
2052 sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
2053 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
2054 }
2055
2056 return ret;
2057}
2058
2059static int ni_init_smc_spll_table(struct radeon_device *rdev)
2060{
2061 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2062 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2063 SMC_NISLANDS_SPLL_DIV_TABLE *spll_table;
2064 NISLANDS_SMC_SCLK_VALUE sclk_params;
2065 u32 fb_div;
2066 u32 p_div;
2067 u32 clk_s;
2068 u32 clk_v;
2069 u32 sclk = 0;
2070 int i, ret;
2071 u32 tmp;
2072
2073 if (ni_pi->spll_table_start == 0)
2074 return -EINVAL;
2075
2076 spll_table = kzalloc(sizeof(SMC_NISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
2077 if (spll_table == NULL)
2078 return -ENOMEM;
2079
2080 for (i = 0; i < 256; i++) {
2081 ret = ni_calculate_sclk_params(rdev, sclk, &sclk_params);
2082 if (ret)
2083 break;
2084
2085 p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
2086 fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
2087 clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
2088 clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
2089
2090 fb_div &= ~0x00001FFF;
2091 fb_div >>= 1;
2092 clk_v >>= 6;
2093
2094 if (p_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
2095 ret = -EINVAL;
2096
2097 if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
2098 ret = -EINVAL;
2099
2100 if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
2101 ret = -EINVAL;
2102
2103 if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
2104 ret = -EINVAL;
2105
2106 if (ret)
2107 break;
2108
2109 tmp = ((fb_div << SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
2110 ((p_div << SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
2111 spll_table->freq[i] = cpu_to_be32(tmp);
2112
2113 tmp = ((clk_v << SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
2114 ((clk_s << SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
2115 spll_table->ss[i] = cpu_to_be32(tmp);
2116
2117 sclk += 512;
2118 }
2119
2120 if (!ret)
2121 ret = rv770_copy_bytes_to_smc(rdev, ni_pi->spll_table_start, (u8 *)spll_table,
2122 sizeof(SMC_NISLANDS_SPLL_DIV_TABLE), pi->sram_end);
2123
2124 kfree(spll_table);
2125
2126 return ret;
2127}
2128
2129static int ni_populate_mclk_value(struct radeon_device *rdev,
2130 u32 engine_clock,
2131 u32 memory_clock,
2132 NISLANDS_SMC_MCLK_VALUE *mclk,
2133 bool strobe_mode,
2134 bool dll_state_on)
2135{
2136 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2137 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2138 u32 mpll_ad_func_cntl = ni_pi->clock_registers.mpll_ad_func_cntl;
2139 u32 mpll_ad_func_cntl_2 = ni_pi->clock_registers.mpll_ad_func_cntl_2;
2140 u32 mpll_dq_func_cntl = ni_pi->clock_registers.mpll_dq_func_cntl;
2141 u32 mpll_dq_func_cntl_2 = ni_pi->clock_registers.mpll_dq_func_cntl_2;
2142 u32 mclk_pwrmgt_cntl = ni_pi->clock_registers.mclk_pwrmgt_cntl;
2143 u32 dll_cntl = ni_pi->clock_registers.dll_cntl;
2144 u32 mpll_ss1 = ni_pi->clock_registers.mpll_ss1;
2145 u32 mpll_ss2 = ni_pi->clock_registers.mpll_ss2;
2146 struct atom_clock_dividers dividers;
2147 u32 ibias;
2148 u32 dll_speed;
2149 int ret;
2150 u32 mc_seq_misc7;
2151
2152 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
2153 memory_clock, strobe_mode, &dividers);
2154 if (ret)
2155 return ret;
2156
2157 if (!strobe_mode) {
2158 mc_seq_misc7 = RREG32(MC_SEQ_MISC7);
2159
2160 if (mc_seq_misc7 & 0x8000000)
2161 dividers.post_div = 1;
2162 }
2163
2164 ibias = cypress_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
2165
2166 mpll_ad_func_cntl &= ~(CLKR_MASK |
2167 YCLK_POST_DIV_MASK |
2168 CLKF_MASK |
2169 CLKFRAC_MASK |
2170 IBIAS_MASK);
2171 mpll_ad_func_cntl |= CLKR(dividers.ref_div);
2172 mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
2173 mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
2174 mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
2175 mpll_ad_func_cntl |= IBIAS(ibias);
2176
2177 if (dividers.vco_mode)
2178 mpll_ad_func_cntl_2 |= VCO_MODE;
2179 else
2180 mpll_ad_func_cntl_2 &= ~VCO_MODE;
2181
2182 if (pi->mem_gddr5) {
2183 mpll_dq_func_cntl &= ~(CLKR_MASK |
2184 YCLK_POST_DIV_MASK |
2185 CLKF_MASK |
2186 CLKFRAC_MASK |
2187 IBIAS_MASK);
2188 mpll_dq_func_cntl |= CLKR(dividers.ref_div);
2189 mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
2190 mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
2191 mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
2192 mpll_dq_func_cntl |= IBIAS(ibias);
2193
2194 if (strobe_mode)
2195 mpll_dq_func_cntl &= ~PDNB;
2196 else
2197 mpll_dq_func_cntl |= PDNB;
2198
2199 if (dividers.vco_mode)
2200 mpll_dq_func_cntl_2 |= VCO_MODE;
2201 else
2202 mpll_dq_func_cntl_2 &= ~VCO_MODE;
2203 }
2204
2205 if (pi->mclk_ss) {
2206 struct radeon_atom_ss ss;
2207 u32 vco_freq = memory_clock * dividers.post_div;
2208
2209 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2210 ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
2211 u32 reference_clock = rdev->clock.mpll.reference_freq;
2212 u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
2213 u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
2214 u32 clk_v = ss.percentage *
2215 (0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
2216
2217 mpll_ss1 &= ~CLKV_MASK;
2218 mpll_ss1 |= CLKV(clk_v);
2219
2220 mpll_ss2 &= ~CLKS_MASK;
2221 mpll_ss2 |= CLKS(clk_s);
2222 }
2223 }
2224
2225 dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
2226 memory_clock);
2227
2228 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2229 mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
2230 if (dll_state_on)
2231 mclk_pwrmgt_cntl |= (MRDCKA0_PDNB |
2232 MRDCKA1_PDNB |
2233 MRDCKB0_PDNB |
2234 MRDCKB1_PDNB |
2235 MRDCKC0_PDNB |
2236 MRDCKC1_PDNB |
2237 MRDCKD0_PDNB |
2238 MRDCKD1_PDNB);
2239 else
2240 mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
2241 MRDCKA1_PDNB |
2242 MRDCKB0_PDNB |
2243 MRDCKB1_PDNB |
2244 MRDCKC0_PDNB |
2245 MRDCKC1_PDNB |
2246 MRDCKD0_PDNB |
2247 MRDCKD1_PDNB);
2248
2249
2250 mclk->mclk_value = cpu_to_be32(memory_clock);
2251 mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
2252 mclk->vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
2253 mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
2254 mclk->vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
2255 mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
2256 mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
2257 mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
2258 mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
2259
2260 return 0;
2261}
2262
2263static void ni_populate_smc_sp(struct radeon_device *rdev,
2264 struct radeon_ps *radeon_state,
2265 NISLANDS_SMC_SWSTATE *smc_state)
2266{
2267 struct ni_ps *ps = ni_get_ps(radeon_state);
2268 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2269 int i;
2270
2271 for (i = 0; i < ps->performance_level_count - 1; i++)
2272 smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
2273
2274 smc_state->levels[ps->performance_level_count - 1].bSP =
2275 cpu_to_be32(pi->psp);
2276}
2277
2278static int ni_convert_power_level_to_smc(struct radeon_device *rdev,
2279 struct rv7xx_pl *pl,
2280 NISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
2281{
2282 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2283 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2284 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2285 int ret;
2286 bool dll_state_on;
2287 u16 std_vddc;
2288 u32 tmp = RREG32(DC_STUTTER_CNTL);
2289
2290 level->gen2PCIE = pi->pcie_gen2 ?
2291 ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
2292
2293 ret = ni_populate_sclk_value(rdev, pl->sclk, &level->sclk);
2294 if (ret)
2295 return ret;
2296
2297 level->mcFlags = 0;
2298 if (pi->mclk_stutter_mode_threshold &&
2299 (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
2300 !eg_pi->uvd_enabled &&
2301 (tmp & DC_STUTTER_ENABLE_A) &&
2302 (tmp & DC_STUTTER_ENABLE_B))
2303 level->mcFlags |= NISLANDS_SMC_MC_STUTTER_EN;
2304
2305 if (pi->mem_gddr5) {
2306 if (pl->mclk > pi->mclk_edc_enable_threshold)
2307 level->mcFlags |= NISLANDS_SMC_MC_EDC_RD_FLAG;
2308 if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
2309 level->mcFlags |= NISLANDS_SMC_MC_EDC_WR_FLAG;
2310
2311 level->strobeMode = cypress_get_strobe_mode_settings(rdev, pl->mclk);
2312
2313 if (level->strobeMode & NISLANDS_SMC_STROBE_ENABLE) {
2314 if (cypress_get_mclk_frequency_ratio(rdev, pl->mclk, true) >=
2315 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2316 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2317 else
2318 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2319 } else {
2320 dll_state_on = false;
2321 if (pl->mclk > ni_pi->mclk_rtt_mode_threshold)
2322 level->mcFlags |= NISLANDS_SMC_MC_RTT_ENABLE;
2323 }
2324
2325 ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk,
2326 &level->mclk,
2327 (level->strobeMode & NISLANDS_SMC_STROBE_ENABLE) != 0,
2328 dll_state_on);
2329 } else
2330 ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk, &level->mclk, 1, 1);
2331
2332 if (ret)
2333 return ret;
2334
2335 ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
2336 pl->vddc, &level->vddc);
2337 if (ret)
2338 return ret;
2339
2340 ret = ni_get_std_voltage_value(rdev, &level->vddc, &std_vddc);
2341 if (ret)
2342 return ret;
2343
2344 ni_populate_std_voltage_value(rdev, std_vddc,
2345 level->vddc.index, &level->std_vddc);
2346
2347 if (eg_pi->vddci_control) {
2348 ret = ni_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
2349 pl->vddci, &level->vddci);
2350 if (ret)
2351 return ret;
2352 }
2353
2354 ni_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
2355
2356 return ret;
2357}
2358
2359static int ni_populate_smc_t(struct radeon_device *rdev,
2360 struct radeon_ps *radeon_state,
2361 NISLANDS_SMC_SWSTATE *smc_state)
2362{
2363 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2364 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2365 struct ni_ps *state = ni_get_ps(radeon_state);
2366 u32 a_t;
2367 u32 t_l, t_h;
2368 u32 high_bsp;
2369 int i, ret;
2370
2371 if (state->performance_level_count >= 9)
2372 return -EINVAL;
2373
2374 if (state->performance_level_count < 2) {
2375 a_t = CG_R(0xffff) | CG_L(0);
2376 smc_state->levels[0].aT = cpu_to_be32(a_t);
2377 return 0;
2378 }
2379
2380 smc_state->levels[0].aT = cpu_to_be32(0);
2381
2382 for (i = 0; i <= state->performance_level_count - 2; i++) {
2383 if (eg_pi->uvd_enabled)
2384 ret = r600_calculate_at(
2385 1000 * (i * (eg_pi->smu_uvd_hs ? 2 : 8) + 2),
2386 100 * R600_AH_DFLT,
2387 state->performance_levels[i + 1].sclk,
2388 state->performance_levels[i].sclk,
2389 &t_l,
2390 &t_h);
2391 else
2392 ret = r600_calculate_at(
2393 1000 * (i + 1),
2394 100 * R600_AH_DFLT,
2395 state->performance_levels[i + 1].sclk,
2396 state->performance_levels[i].sclk,
2397 &t_l,
2398 &t_h);
2399
2400 if (ret) {
2401 t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
2402 t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
2403 }
2404
2405 a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
2406 a_t |= CG_R(t_l * pi->bsp / 20000);
2407 smc_state->levels[i].aT = cpu_to_be32(a_t);
2408
2409 high_bsp = (i == state->performance_level_count - 2) ?
2410 pi->pbsp : pi->bsp;
2411
2412 a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
2413 smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
2414 }
2415
2416 return 0;
2417}
2418
2419static int ni_populate_power_containment_values(struct radeon_device *rdev,
2420 struct radeon_ps *radeon_state,
2421 NISLANDS_SMC_SWSTATE *smc_state)
2422{
2423 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2424 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2425 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2426 struct ni_ps *state = ni_get_ps(radeon_state);
2427 u32 prev_sclk;
2428 u32 max_sclk;
2429 u32 min_sclk;
2430 int i, ret;
2431 u32 tdp_limit;
2432 u32 near_tdp_limit;
2433 u32 power_boost_limit;
2434 u8 max_ps_percent;
2435
2436 if (ni_pi->enable_power_containment == false)
2437 return 0;
2438
2439 if (state->performance_level_count == 0)
2440 return -EINVAL;
2441
2442 if (smc_state->levelCount != state->performance_level_count)
2443 return -EINVAL;
2444
2445 ret = ni_calculate_adjusted_tdp_limits(rdev,
2446 false, /* ??? */
2447 rdev->pm.dpm.tdp_adjustment,
2448 &tdp_limit,
2449 &near_tdp_limit);
2450 if (ret)
2451 return ret;
2452
2453 power_boost_limit = ni_calculate_power_boost_limit(rdev, radeon_state, near_tdp_limit);
2454
2455 ret = rv770_write_smc_sram_dword(rdev,
2456 pi->state_table_start +
2457 offsetof(NISLANDS_SMC_STATETABLE, dpm2Params) +
2458 offsetof(PP_NIslands_DPM2Parameters, PowerBoostLimit),
2459 ni_scale_power_for_smc(power_boost_limit, ni_get_smc_power_scaling_factor(rdev)),
2460 pi->sram_end);
2461 if (ret)
2462 power_boost_limit = 0;
2463
2464 smc_state->levels[0].dpm2.MaxPS = 0;
2465 smc_state->levels[0].dpm2.NearTDPDec = 0;
2466 smc_state->levels[0].dpm2.AboveSafeInc = 0;
2467 smc_state->levels[0].dpm2.BelowSafeInc = 0;
2468 smc_state->levels[0].stateFlags |= power_boost_limit ? PPSMC_STATEFLAG_POWERBOOST : 0;
2469
2470 for (i = 1; i < state->performance_level_count; i++) {
2471 prev_sclk = state->performance_levels[i-1].sclk;
2472 max_sclk = state->performance_levels[i].sclk;
2473 max_ps_percent = (i != (state->performance_level_count - 1)) ?
2474 NISLANDS_DPM2_MAXPS_PERCENT_M : NISLANDS_DPM2_MAXPS_PERCENT_H;
2475
2476 if (max_sclk < prev_sclk)
2477 return -EINVAL;
2478
2479 if ((max_ps_percent == 0) || (prev_sclk == max_sclk) || eg_pi->uvd_enabled)
2480 min_sclk = max_sclk;
2481 else if (1 == i)
2482 min_sclk = prev_sclk;
2483 else
2484 min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
2485
2486 if (min_sclk < state->performance_levels[0].sclk)
2487 min_sclk = state->performance_levels[0].sclk;
2488
2489 if (min_sclk == 0)
2490 return -EINVAL;
2491
2492 smc_state->levels[i].dpm2.MaxPS =
2493 (u8)((NISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
2494 smc_state->levels[i].dpm2.NearTDPDec = NISLANDS_DPM2_NEAR_TDP_DEC;
2495 smc_state->levels[i].dpm2.AboveSafeInc = NISLANDS_DPM2_ABOVE_SAFE_INC;
2496 smc_state->levels[i].dpm2.BelowSafeInc = NISLANDS_DPM2_BELOW_SAFE_INC;
2497 smc_state->levels[i].stateFlags |=
2498 ((i != (state->performance_level_count - 1)) && power_boost_limit) ?
2499 PPSMC_STATEFLAG_POWERBOOST : 0;
2500 }
2501
2502 return 0;
2503}
2504
2505static int ni_populate_sq_ramping_values(struct radeon_device *rdev,
2506 struct radeon_ps *radeon_state,
2507 NISLANDS_SMC_SWSTATE *smc_state)
2508{
2509 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2510 struct ni_ps *state = ni_get_ps(radeon_state);
2511 u32 sq_power_throttle;
2512 u32 sq_power_throttle2;
2513 bool enable_sq_ramping = ni_pi->enable_sq_ramping;
2514 int i;
2515
2516 if (state->performance_level_count == 0)
2517 return -EINVAL;
2518
2519 if (smc_state->levelCount != state->performance_level_count)
2520 return -EINVAL;
2521
2522 if (rdev->pm.dpm.sq_ramping_threshold == 0)
2523 return -EINVAL;
2524
2525 if (NISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
2526 enable_sq_ramping = false;
2527
2528 if (NISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
2529 enable_sq_ramping = false;
2530
2531 if (NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
2532 enable_sq_ramping = false;
2533
2534 if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
2535 enable_sq_ramping = false;
2536
2537 if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
2538 enable_sq_ramping = false;
2539
2540 for (i = 0; i < state->performance_level_count; i++) {
2541 sq_power_throttle = 0;
2542 sq_power_throttle2 = 0;
2543
2544 if ((state->performance_levels[i].sclk >= rdev->pm.dpm.sq_ramping_threshold) &&
2545 enable_sq_ramping) {
2546 sq_power_throttle |= MAX_POWER(NISLANDS_DPM2_SQ_RAMP_MAX_POWER);
2547 sq_power_throttle |= MIN_POWER(NISLANDS_DPM2_SQ_RAMP_MIN_POWER);
2548 sq_power_throttle2 |= MAX_POWER_DELTA(NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
2549 sq_power_throttle2 |= STI_SIZE(NISLANDS_DPM2_SQ_RAMP_STI_SIZE);
2550 sq_power_throttle2 |= LTI_RATIO(NISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
2551 } else {
2552 sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
2553 sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
2554 }
2555
2556 smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
2557 smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
2558 }
2559
2560 return 0;
2561}
2562
2563static int ni_enable_power_containment(struct radeon_device *rdev,
2564 struct radeon_ps *radeon_new_state,
2565 bool enable)
2566{
2567 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2568 PPSMC_Result smc_result;
2569 int ret = 0;
2570
2571 if (ni_pi->enable_power_containment) {
2572 if (enable) {
2573 if (!r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
2574 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_TDPClampingActive);
2575 if (smc_result != PPSMC_Result_OK) {
2576 ret = -EINVAL;
2577 ni_pi->pc_enabled = false;
2578 } else {
2579 ni_pi->pc_enabled = true;
2580 }
2581 }
2582 } else {
2583 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_TDPClampingInactive);
2584 if (smc_result != PPSMC_Result_OK)
2585 ret = -EINVAL;
2586 ni_pi->pc_enabled = false;
2587 }
2588 }
2589
2590 return ret;
2591}
2592
2593static int ni_convert_power_state_to_smc(struct radeon_device *rdev,
2594 struct radeon_ps *radeon_state,
2595 NISLANDS_SMC_SWSTATE *smc_state)
2596{
2597 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2598 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2599 struct ni_ps *state = ni_get_ps(radeon_state);
2600 int i, ret;
2601 u32 threshold = state->performance_levels[state->performance_level_count - 1].sclk * 100 / 100;
2602
2603 if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
2604 smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
2605
2606 smc_state->levelCount = 0;
2607
2608 if (state->performance_level_count > NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE)
2609 return -EINVAL;
2610
2611 for (i = 0; i < state->performance_level_count; i++) {
2612 ret = ni_convert_power_level_to_smc(rdev, &state->performance_levels[i],
2613 &smc_state->levels[i]);
2614 smc_state->levels[i].arbRefreshState =
2615 (u8)(NISLANDS_DRIVER_STATE_ARB_INDEX + i);
2616
2617 if (ret)
2618 return ret;
2619
2620 if (ni_pi->enable_power_containment)
2621 smc_state->levels[i].displayWatermark =
2622 (state->performance_levels[i].sclk < threshold) ?
2623 PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
2624 else
2625 smc_state->levels[i].displayWatermark = (i < 2) ?
2626 PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
2627
2628 if (eg_pi->dynamic_ac_timing)
2629 smc_state->levels[i].ACIndex = NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
2630 else
2631 smc_state->levels[i].ACIndex = 0;
2632
2633 smc_state->levelCount++;
2634 }
2635
2636 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_watermark_threshold,
2637 cpu_to_be32(threshold / 512));
2638
2639 ni_populate_smc_sp(rdev, radeon_state, smc_state);
2640
2641 ret = ni_populate_power_containment_values(rdev, radeon_state, smc_state);
2642 if (ret)
2643 ni_pi->enable_power_containment = false;
2644
2645 ret = ni_populate_sq_ramping_values(rdev, radeon_state, smc_state);
2646 if (ret)
2647 ni_pi->enable_sq_ramping = false;
2648
2649 return ni_populate_smc_t(rdev, radeon_state, smc_state);
2650}
2651
2652static int ni_upload_sw_state(struct radeon_device *rdev,
2653 struct radeon_ps *radeon_new_state)
2654{
2655 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2656 u16 address = pi->state_table_start +
2657 offsetof(NISLANDS_SMC_STATETABLE, driverState);
2658 u16 state_size = sizeof(NISLANDS_SMC_SWSTATE) +
2659 ((NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1) * sizeof(NISLANDS_SMC_HW_PERFORMANCE_LEVEL));
2660 int ret;
2661 NISLANDS_SMC_SWSTATE *smc_state = kzalloc(state_size, GFP_KERNEL);
2662
2663 if (smc_state == NULL)
2664 return -ENOMEM;
2665
2666 ret = ni_convert_power_state_to_smc(rdev, radeon_new_state, smc_state);
2667 if (ret)
2668 goto done;
2669
2670 ret = rv770_copy_bytes_to_smc(rdev, address, (u8 *)smc_state, state_size, pi->sram_end);
2671
2672done:
2673 kfree(smc_state);
2674
2675 return ret;
2676}
2677
2678static int ni_set_mc_special_registers(struct radeon_device *rdev,
2679 struct ni_mc_reg_table *table)
2680{
2681 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2682 u8 i, j, k;
2683 u32 temp_reg;
2684
2685 for (i = 0, j = table->last; i < table->last; i++) {
2686 switch (table->mc_reg_address[i].s1) {
2687 case MC_SEQ_MISC1 >> 2:
2688 if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2689 return -EINVAL;
2690 temp_reg = RREG32(MC_PMG_CMD_EMRS);
2691 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
2692 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
2693 for (k = 0; k < table->num_entries; k++)
2694 table->mc_reg_table_entry[k].mc_data[j] =
2695 ((temp_reg & 0xffff0000)) |
2696 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2697 j++;
2698 if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2699 return -EINVAL;
2700
2701 temp_reg = RREG32(MC_PMG_CMD_MRS);
2702 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
2703 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
2704 for(k = 0; k < table->num_entries; k++) {
2705 table->mc_reg_table_entry[k].mc_data[j] =
2706 (temp_reg & 0xffff0000) |
2707 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2708 if (!pi->mem_gddr5)
2709 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2710 }
2711 j++;
2712 if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2713 return -EINVAL;
2714 break;
2715 case MC_SEQ_RESERVE_M >> 2:
2716 temp_reg = RREG32(MC_PMG_CMD_MRS1);
2717 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
2718 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
2719 for (k = 0; k < table->num_entries; k++)
2720 table->mc_reg_table_entry[k].mc_data[j] =
2721 (temp_reg & 0xffff0000) |
2722 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2723 j++;
2724 if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2725 return -EINVAL;
2726 break;
2727 default:
2728 break;
2729 }
2730 }
2731
2732 table->last = j;
2733
2734 return 0;
2735}
2736
2737static bool ni_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
2738{
2739 bool result = true;
2740
2741 switch (in_reg) {
2742 case MC_SEQ_RAS_TIMING >> 2:
2743 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
2744 break;
2745 case MC_SEQ_CAS_TIMING >> 2:
2746 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
2747 break;
2748 case MC_SEQ_MISC_TIMING >> 2:
2749 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
2750 break;
2751 case MC_SEQ_MISC_TIMING2 >> 2:
2752 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
2753 break;
2754 case MC_SEQ_RD_CTL_D0 >> 2:
2755 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
2756 break;
2757 case MC_SEQ_RD_CTL_D1 >> 2:
2758 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
2759 break;
2760 case MC_SEQ_WR_CTL_D0 >> 2:
2761 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
2762 break;
2763 case MC_SEQ_WR_CTL_D1 >> 2:
2764 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
2765 break;
2766 case MC_PMG_CMD_EMRS >> 2:
2767 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
2768 break;
2769 case MC_PMG_CMD_MRS >> 2:
2770 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
2771 break;
2772 case MC_PMG_CMD_MRS1 >> 2:
2773 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
2774 break;
2775 case MC_SEQ_PMG_TIMING >> 2:
2776 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
2777 break;
2778 case MC_PMG_CMD_MRS2 >> 2:
2779 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
2780 break;
2781 default:
2782 result = false;
2783 break;
2784 }
2785
2786 return result;
2787}
2788
2789static void ni_set_valid_flag(struct ni_mc_reg_table *table)
2790{
2791 u8 i, j;
2792
2793 for (i = 0; i < table->last; i++) {
2794 for (j = 1; j < table->num_entries; j++) {
2795 if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
2796 table->valid_flag |= 1 << i;
2797 break;
2798 }
2799 }
2800 }
2801}
2802
2803static void ni_set_s0_mc_reg_index(struct ni_mc_reg_table *table)
2804{
2805 u32 i;
2806 u16 address;
2807
2808 for (i = 0; i < table->last; i++)
2809 table->mc_reg_address[i].s0 =
2810 ni_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
2811 address : table->mc_reg_address[i].s1;
2812}
2813
2814static int ni_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
2815 struct ni_mc_reg_table *ni_table)
2816{
2817 u8 i, j;
2818
2819 if (table->last > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2820 return -EINVAL;
2821 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
2822 return -EINVAL;
2823
2824 for (i = 0; i < table->last; i++)
2825 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2826 ni_table->last = table->last;
2827
2828 for (i = 0; i < table->num_entries; i++) {
2829 ni_table->mc_reg_table_entry[i].mclk_max =
2830 table->mc_reg_table_entry[i].mclk_max;
2831 for (j = 0; j < table->last; j++)
2832 ni_table->mc_reg_table_entry[i].mc_data[j] =
2833 table->mc_reg_table_entry[i].mc_data[j];
2834 }
2835 ni_table->num_entries = table->num_entries;
2836
2837 return 0;
2838}
2839
2840static int ni_initialize_mc_reg_table(struct radeon_device *rdev)
2841{
2842 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2843 int ret;
2844 struct atom_mc_reg_table *table;
2845 struct ni_mc_reg_table *ni_table = &ni_pi->mc_reg_table;
2846 u8 module_index = rv770_get_memory_module_index(rdev);
2847
2848 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
2849 if (!table)
2850 return -ENOMEM;
2851
2852 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
2853 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
2854 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
2855 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
2856 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
2857 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
2858 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
2859 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
2860 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
2861 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
2862 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
2863 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
2864 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
2865
2866 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
2867
2868 if (ret)
2869 goto init_mc_done;
2870
2871 ret = ni_copy_vbios_mc_reg_table(table, ni_table);
2872
2873 if (ret)
2874 goto init_mc_done;
2875
2876 ni_set_s0_mc_reg_index(ni_table);
2877
2878 ret = ni_set_mc_special_registers(rdev, ni_table);
2879
2880 if (ret)
2881 goto init_mc_done;
2882
2883 ni_set_valid_flag(ni_table);
2884
2885init_mc_done:
2886 kfree(table);
2887
2888 return ret;
2889}
2890
2891static void ni_populate_mc_reg_addresses(struct radeon_device *rdev,
2892 SMC_NIslands_MCRegisters *mc_reg_table)
2893{
2894 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2895 u32 i, j;
2896
2897 for (i = 0, j = 0; j < ni_pi->mc_reg_table.last; j++) {
2898 if (ni_pi->mc_reg_table.valid_flag & (1 << j)) {
2899 if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2900 break;
2901 mc_reg_table->address[i].s0 =
2902 cpu_to_be16(ni_pi->mc_reg_table.mc_reg_address[j].s0);
2903 mc_reg_table->address[i].s1 =
2904 cpu_to_be16(ni_pi->mc_reg_table.mc_reg_address[j].s1);
2905 i++;
2906 }
2907 }
2908 mc_reg_table->last = (u8)i;
2909}
2910
2911
2912static void ni_convert_mc_registers(struct ni_mc_reg_entry *entry,
2913 SMC_NIslands_MCRegisterSet *data,
2914 u32 num_entries, u32 valid_flag)
2915{
2916 u32 i, j;
2917
2918 for (i = 0, j = 0; j < num_entries; j++) {
2919 if (valid_flag & (1 << j)) {
2920 data->value[i] = cpu_to_be32(entry->mc_data[j]);
2921 i++;
2922 }
2923 }
2924}
2925
2926static void ni_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
2927 struct rv7xx_pl *pl,
2928 SMC_NIslands_MCRegisterSet *mc_reg_table_data)
2929{
2930 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2931 u32 i = 0;
2932
2933 for (i = 0; i < ni_pi->mc_reg_table.num_entries; i++) {
2934 if (pl->mclk <= ni_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
2935 break;
2936 }
2937
2938 if ((i == ni_pi->mc_reg_table.num_entries) && (i > 0))
2939 --i;
2940
2941 ni_convert_mc_registers(&ni_pi->mc_reg_table.mc_reg_table_entry[i],
2942 mc_reg_table_data,
2943 ni_pi->mc_reg_table.last,
2944 ni_pi->mc_reg_table.valid_flag);
2945}
2946
2947static void ni_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
2948 struct radeon_ps *radeon_state,
2949 SMC_NIslands_MCRegisters *mc_reg_table)
2950{
2951 struct ni_ps *state = ni_get_ps(radeon_state);
2952 int i;
2953
2954 for (i = 0; i < state->performance_level_count; i++) {
2955 ni_convert_mc_reg_table_entry_to_smc(rdev,
2956 &state->performance_levels[i],
2957 &mc_reg_table->data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
2958 }
2959}
2960
2961static int ni_populate_mc_reg_table(struct radeon_device *rdev,
2962 struct radeon_ps *radeon_boot_state)
2963{
2964 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2965 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2966 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2967 struct ni_ps *boot_state = ni_get_ps(radeon_boot_state);
2968 SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
2969
2970 memset(mc_reg_table, 0, sizeof(SMC_NIslands_MCRegisters));
2971
2972 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_seq_index, 1);
2973
2974 ni_populate_mc_reg_addresses(rdev, mc_reg_table);
2975
2976 ni_convert_mc_reg_table_entry_to_smc(rdev, &boot_state->performance_levels[0],
2977 &mc_reg_table->data[0]);
2978
2979 ni_convert_mc_registers(&ni_pi->mc_reg_table.mc_reg_table_entry[0],
2980 &mc_reg_table->data[1],
2981 ni_pi->mc_reg_table.last,
2982 ni_pi->mc_reg_table.valid_flag);
2983
2984 ni_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, mc_reg_table);
2985
2986 return rv770_copy_bytes_to_smc(rdev, eg_pi->mc_reg_table_start,
2987 (u8 *)mc_reg_table,
2988 sizeof(SMC_NIslands_MCRegisters),
2989 pi->sram_end);
2990}
2991
2992static int ni_upload_mc_reg_table(struct radeon_device *rdev,
2993 struct radeon_ps *radeon_new_state)
2994{
2995 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2996 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2997 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2998 struct ni_ps *ni_new_state = ni_get_ps(radeon_new_state);
2999 SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
3000 u16 address;
3001
3002 memset(mc_reg_table, 0, sizeof(SMC_NIslands_MCRegisters));
3003
3004 ni_convert_mc_reg_table_to_smc(rdev, radeon_new_state, mc_reg_table);
3005
3006 address = eg_pi->mc_reg_table_start +
3007 (u16)offsetof(SMC_NIslands_MCRegisters, data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
3008
3009 return rv770_copy_bytes_to_smc(rdev, address,
3010 (u8 *)&mc_reg_table->data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
3011 sizeof(SMC_NIslands_MCRegisterSet) * ni_new_state->performance_level_count,
3012 pi->sram_end);
3013}
3014
3015static int ni_init_driver_calculated_leakage_table(struct radeon_device *rdev,
3016 PP_NIslands_CACTABLES *cac_tables)
3017{
3018 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3019 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3020 u32 leakage = 0;
3021 unsigned int i, j, table_size;
3022 s32 t;
3023 u32 smc_leakage, max_leakage = 0;
3024 u32 scaling_factor;
3025
3026 table_size = eg_pi->vddc_voltage_table.count;
3027
3028 if (SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES < table_size)
3029 table_size = SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
3030
3031 scaling_factor = ni_get_smc_power_scaling_factor(rdev);
3032
3033 for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++) {
3034 for (j = 0; j < table_size; j++) {
3035 t = (1000 * ((i + 1) * 8));
3036
3037 if (t < ni_pi->cac_data.leakage_minimum_temperature)
3038 t = ni_pi->cac_data.leakage_minimum_temperature;
3039
3040 ni_calculate_leakage_for_v_and_t(rdev,
3041 &ni_pi->cac_data.leakage_coefficients,
3042 eg_pi->vddc_voltage_table.entries[j].value,
3043 t,
3044 ni_pi->cac_data.i_leakage,
3045 &leakage);
3046
3047 smc_leakage = ni_scale_power_for_smc(leakage, scaling_factor) / 1000;
3048 if (smc_leakage > max_leakage)
3049 max_leakage = smc_leakage;
3050
3051 cac_tables->cac_lkge_lut[i][j] = cpu_to_be32(smc_leakage);
3052 }
3053 }
3054
3055 for (j = table_size; j < SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
3056 for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
3057 cac_tables->cac_lkge_lut[i][j] = cpu_to_be32(max_leakage);
3058 }
3059 return 0;
3060}
3061
3062static int ni_init_simplified_leakage_table(struct radeon_device *rdev,
3063 PP_NIslands_CACTABLES *cac_tables)
3064{
3065 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3066 struct radeon_cac_leakage_table *leakage_table =
3067 &rdev->pm.dpm.dyn_state.cac_leakage_table;
3068 u32 i, j, table_size;
3069 u32 smc_leakage, max_leakage = 0;
3070 u32 scaling_factor;
3071
3072 if (!leakage_table)
3073 return -EINVAL;
3074
3075 table_size = leakage_table->count;
3076
3077 if (eg_pi->vddc_voltage_table.count != table_size)
3078 table_size = (eg_pi->vddc_voltage_table.count < leakage_table->count) ?
3079 eg_pi->vddc_voltage_table.count : leakage_table->count;
3080
3081 if (SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES < table_size)
3082 table_size = SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
3083
3084 if (table_size == 0)
3085 return -EINVAL;
3086
3087 scaling_factor = ni_get_smc_power_scaling_factor(rdev);
3088
3089 for (j = 0; j < table_size; j++) {
3090 smc_leakage = leakage_table->entries[j].leakage;
3091
3092 if (smc_leakage > max_leakage)
3093 max_leakage = smc_leakage;
3094
3095 for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
3096 cac_tables->cac_lkge_lut[i][j] =
3097 cpu_to_be32(ni_scale_power_for_smc(smc_leakage, scaling_factor));
3098 }
3099
3100 for (j = table_size; j < SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
3101 for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
3102 cac_tables->cac_lkge_lut[i][j] =
3103 cpu_to_be32(ni_scale_power_for_smc(max_leakage, scaling_factor));
3104 }
3105 return 0;
3106}
3107
3108static int ni_initialize_smc_cac_tables(struct radeon_device *rdev)
3109{
3110 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3111 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3112 PP_NIslands_CACTABLES *cac_tables = NULL;
3113 int i, ret;
3114 u32 reg;
3115
3116 if (ni_pi->enable_cac == false)
3117 return 0;
3118
3119 cac_tables = kzalloc(sizeof(PP_NIslands_CACTABLES), GFP_KERNEL);
3120 if (!cac_tables)
3121 return -ENOMEM;
3122
3123 reg = RREG32(CG_CAC_CTRL) & ~(TID_CNT_MASK | TID_UNIT_MASK);
3124 reg |= (TID_CNT(ni_pi->cac_weights->tid_cnt) |
3125 TID_UNIT(ni_pi->cac_weights->tid_unit));
3126 WREG32(CG_CAC_CTRL, reg);
3127
3128 for (i = 0; i < NISLANDS_DCCAC_MAX_LEVELS; i++)
3129 ni_pi->dc_cac_table[i] = ni_pi->cac_weights->dc_cac[i];
3130
3131 for (i = 0; i < SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES; i++)
3132 cac_tables->cac_bif_lut[i] = ni_pi->cac_weights->pcie_cac[i];
3133
3134 ni_pi->cac_data.i_leakage = rdev->pm.dpm.cac_leakage;
3135 ni_pi->cac_data.pwr_const = 0;
3136 ni_pi->cac_data.dc_cac_value = ni_pi->dc_cac_table[NISLANDS_DCCAC_LEVEL_0];
3137 ni_pi->cac_data.bif_cac_value = 0;
3138 ni_pi->cac_data.mc_wr_weight = ni_pi->cac_weights->mc_write_weight;
3139 ni_pi->cac_data.mc_rd_weight = ni_pi->cac_weights->mc_read_weight;
3140 ni_pi->cac_data.allow_ovrflw = 0;
3141 ni_pi->cac_data.l2num_win_tdp = ni_pi->lta_window_size;
3142 ni_pi->cac_data.num_win_tdp = 0;
3143 ni_pi->cac_data.lts_truncate_n = ni_pi->lts_truncate;
3144
3145 if (ni_pi->driver_calculate_cac_leakage)
3146 ret = ni_init_driver_calculated_leakage_table(rdev, cac_tables);
3147 else
3148 ret = ni_init_simplified_leakage_table(rdev, cac_tables);
3149
3150 if (ret)
3151 goto done_free;
3152
3153 cac_tables->pwr_const = cpu_to_be32(ni_pi->cac_data.pwr_const);
3154 cac_tables->dc_cacValue = cpu_to_be32(ni_pi->cac_data.dc_cac_value);
3155 cac_tables->bif_cacValue = cpu_to_be32(ni_pi->cac_data.bif_cac_value);
3156 cac_tables->AllowOvrflw = ni_pi->cac_data.allow_ovrflw;
3157 cac_tables->MCWrWeight = ni_pi->cac_data.mc_wr_weight;
3158 cac_tables->MCRdWeight = ni_pi->cac_data.mc_rd_weight;
3159 cac_tables->numWin_TDP = ni_pi->cac_data.num_win_tdp;
3160 cac_tables->l2numWin_TDP = ni_pi->cac_data.l2num_win_tdp;
3161 cac_tables->lts_truncate_n = ni_pi->cac_data.lts_truncate_n;
3162
3163 ret = rv770_copy_bytes_to_smc(rdev, ni_pi->cac_table_start, (u8 *)cac_tables,
3164 sizeof(PP_NIslands_CACTABLES), pi->sram_end);
3165
3166done_free:
3167 if (ret) {
3168 ni_pi->enable_cac = false;
3169 ni_pi->enable_power_containment = false;
3170 }
3171
3172 kfree(cac_tables);
3173
3174 return 0;
3175}
3176
3177static int ni_initialize_hardware_cac_manager(struct radeon_device *rdev)
3178{
3179 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3180 u32 reg;
3181
3182 if (!ni_pi->enable_cac ||
3183 !ni_pi->cac_configuration_required)
3184 return 0;
3185
3186 if (ni_pi->cac_weights == NULL)
3187 return -EINVAL;
3188
3189 reg = RREG32_CG(CG_CAC_REGION_1_WEIGHT_0) & ~(WEIGHT_TCP_SIG0_MASK |
3190 WEIGHT_TCP_SIG1_MASK |
3191 WEIGHT_TA_SIG_MASK);
3192 reg |= (WEIGHT_TCP_SIG0(ni_pi->cac_weights->weight_tcp_sig0) |
3193 WEIGHT_TCP_SIG1(ni_pi->cac_weights->weight_tcp_sig1) |
3194 WEIGHT_TA_SIG(ni_pi->cac_weights->weight_ta_sig));
3195 WREG32_CG(CG_CAC_REGION_1_WEIGHT_0, reg);
3196
3197 reg = RREG32_CG(CG_CAC_REGION_1_WEIGHT_1) & ~(WEIGHT_TCC_EN0_MASK |
3198 WEIGHT_TCC_EN1_MASK |
3199 WEIGHT_TCC_EN2_MASK);
3200 reg |= (WEIGHT_TCC_EN0(ni_pi->cac_weights->weight_tcc_en0) |
3201 WEIGHT_TCC_EN1(ni_pi->cac_weights->weight_tcc_en1) |
3202 WEIGHT_TCC_EN2(ni_pi->cac_weights->weight_tcc_en2));
3203 WREG32_CG(CG_CAC_REGION_1_WEIGHT_1, reg);
3204
3205 reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_0) & ~(WEIGHT_CB_EN0_MASK |
3206 WEIGHT_CB_EN1_MASK |
3207 WEIGHT_CB_EN2_MASK |
3208 WEIGHT_CB_EN3_MASK);
3209 reg |= (WEIGHT_CB_EN0(ni_pi->cac_weights->weight_cb_en0) |
3210 WEIGHT_CB_EN1(ni_pi->cac_weights->weight_cb_en1) |
3211 WEIGHT_CB_EN2(ni_pi->cac_weights->weight_cb_en2) |
3212 WEIGHT_CB_EN3(ni_pi->cac_weights->weight_cb_en3));
3213 WREG32_CG(CG_CAC_REGION_2_WEIGHT_0, reg);
3214
3215 reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_1) & ~(WEIGHT_DB_SIG0_MASK |
3216 WEIGHT_DB_SIG1_MASK |
3217 WEIGHT_DB_SIG2_MASK |
3218 WEIGHT_DB_SIG3_MASK);
3219 reg |= (WEIGHT_DB_SIG0(ni_pi->cac_weights->weight_db_sig0) |
3220 WEIGHT_DB_SIG1(ni_pi->cac_weights->weight_db_sig1) |
3221 WEIGHT_DB_SIG2(ni_pi->cac_weights->weight_db_sig2) |
3222 WEIGHT_DB_SIG3(ni_pi->cac_weights->weight_db_sig3));
3223 WREG32_CG(CG_CAC_REGION_2_WEIGHT_1, reg);
3224
3225 reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_2) & ~(WEIGHT_SXM_SIG0_MASK |
3226 WEIGHT_SXM_SIG1_MASK |
3227 WEIGHT_SXM_SIG2_MASK |
3228 WEIGHT_SXS_SIG0_MASK |
3229 WEIGHT_SXS_SIG1_MASK);
3230 reg |= (WEIGHT_SXM_SIG0(ni_pi->cac_weights->weight_sxm_sig0) |
3231 WEIGHT_SXM_SIG1(ni_pi->cac_weights->weight_sxm_sig1) |
3232 WEIGHT_SXM_SIG2(ni_pi->cac_weights->weight_sxm_sig2) |
3233 WEIGHT_SXS_SIG0(ni_pi->cac_weights->weight_sxs_sig0) |
3234 WEIGHT_SXS_SIG1(ni_pi->cac_weights->weight_sxs_sig1));
3235 WREG32_CG(CG_CAC_REGION_2_WEIGHT_2, reg);
3236
3237 reg = RREG32_CG(CG_CAC_REGION_3_WEIGHT_0) & ~(WEIGHT_XBR_0_MASK |
3238 WEIGHT_XBR_1_MASK |
3239 WEIGHT_XBR_2_MASK |
3240 WEIGHT_SPI_SIG0_MASK);
3241 reg |= (WEIGHT_XBR_0(ni_pi->cac_weights->weight_xbr_0) |
3242 WEIGHT_XBR_1(ni_pi->cac_weights->weight_xbr_1) |
3243 WEIGHT_XBR_2(ni_pi->cac_weights->weight_xbr_2) |
3244 WEIGHT_SPI_SIG0(ni_pi->cac_weights->weight_spi_sig0));
3245 WREG32_CG(CG_CAC_REGION_3_WEIGHT_0, reg);
3246
3247 reg = RREG32_CG(CG_CAC_REGION_3_WEIGHT_1) & ~(WEIGHT_SPI_SIG1_MASK |
3248 WEIGHT_SPI_SIG2_MASK |
3249 WEIGHT_SPI_SIG3_MASK |
3250 WEIGHT_SPI_SIG4_MASK |
3251 WEIGHT_SPI_SIG5_MASK);
3252 reg |= (WEIGHT_SPI_SIG1(ni_pi->cac_weights->weight_spi_sig1) |
3253 WEIGHT_SPI_SIG2(ni_pi->cac_weights->weight_spi_sig2) |
3254 WEIGHT_SPI_SIG3(ni_pi->cac_weights->weight_spi_sig3) |
3255 WEIGHT_SPI_SIG4(ni_pi->cac_weights->weight_spi_sig4) |
3256 WEIGHT_SPI_SIG5(ni_pi->cac_weights->weight_spi_sig5));
3257 WREG32_CG(CG_CAC_REGION_3_WEIGHT_1, reg);
3258
3259 reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_0) & ~(WEIGHT_LDS_SIG0_MASK |
3260 WEIGHT_LDS_SIG1_MASK |
3261 WEIGHT_SC_MASK);
3262 reg |= (WEIGHT_LDS_SIG0(ni_pi->cac_weights->weight_lds_sig0) |
3263 WEIGHT_LDS_SIG1(ni_pi->cac_weights->weight_lds_sig1) |
3264 WEIGHT_SC(ni_pi->cac_weights->weight_sc));
3265 WREG32_CG(CG_CAC_REGION_4_WEIGHT_0, reg);
3266
3267 reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_1) & ~(WEIGHT_BIF_MASK |
3268 WEIGHT_CP_MASK |
3269 WEIGHT_PA_SIG0_MASK |
3270 WEIGHT_PA_SIG1_MASK |
3271 WEIGHT_VGT_SIG0_MASK);
3272 reg |= (WEIGHT_BIF(ni_pi->cac_weights->weight_bif) |
3273 WEIGHT_CP(ni_pi->cac_weights->weight_cp) |
3274 WEIGHT_PA_SIG0(ni_pi->cac_weights->weight_pa_sig0) |
3275 WEIGHT_PA_SIG1(ni_pi->cac_weights->weight_pa_sig1) |
3276 WEIGHT_VGT_SIG0(ni_pi->cac_weights->weight_vgt_sig0));
3277 WREG32_CG(CG_CAC_REGION_4_WEIGHT_1, reg);
3278
3279 reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_2) & ~(WEIGHT_VGT_SIG1_MASK |
3280 WEIGHT_VGT_SIG2_MASK |
3281 WEIGHT_DC_SIG0_MASK |
3282 WEIGHT_DC_SIG1_MASK |
3283 WEIGHT_DC_SIG2_MASK);
3284 reg |= (WEIGHT_VGT_SIG1(ni_pi->cac_weights->weight_vgt_sig1) |
3285 WEIGHT_VGT_SIG2(ni_pi->cac_weights->weight_vgt_sig2) |
3286 WEIGHT_DC_SIG0(ni_pi->cac_weights->weight_dc_sig0) |
3287 WEIGHT_DC_SIG1(ni_pi->cac_weights->weight_dc_sig1) |
3288 WEIGHT_DC_SIG2(ni_pi->cac_weights->weight_dc_sig2));
3289 WREG32_CG(CG_CAC_REGION_4_WEIGHT_2, reg);
3290
3291 reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_3) & ~(WEIGHT_DC_SIG3_MASK |
3292 WEIGHT_UVD_SIG0_MASK |
3293 WEIGHT_UVD_SIG1_MASK |
3294 WEIGHT_SPARE0_MASK |
3295 WEIGHT_SPARE1_MASK);
3296 reg |= (WEIGHT_DC_SIG3(ni_pi->cac_weights->weight_dc_sig3) |
3297 WEIGHT_UVD_SIG0(ni_pi->cac_weights->weight_uvd_sig0) |
3298 WEIGHT_UVD_SIG1(ni_pi->cac_weights->weight_uvd_sig1) |
3299 WEIGHT_SPARE0(ni_pi->cac_weights->weight_spare0) |
3300 WEIGHT_SPARE1(ni_pi->cac_weights->weight_spare1));
3301 WREG32_CG(CG_CAC_REGION_4_WEIGHT_3, reg);
3302
3303 reg = RREG32_CG(CG_CAC_REGION_5_WEIGHT_0) & ~(WEIGHT_SQ_VSP_MASK |
3304 WEIGHT_SQ_VSP0_MASK);
3305 reg |= (WEIGHT_SQ_VSP(ni_pi->cac_weights->weight_sq_vsp) |
3306 WEIGHT_SQ_VSP0(ni_pi->cac_weights->weight_sq_vsp0));
3307 WREG32_CG(CG_CAC_REGION_5_WEIGHT_0, reg);
3308
3309 reg = RREG32_CG(CG_CAC_REGION_5_WEIGHT_1) & ~(WEIGHT_SQ_GPR_MASK);
3310 reg |= WEIGHT_SQ_GPR(ni_pi->cac_weights->weight_sq_gpr);
3311 WREG32_CG(CG_CAC_REGION_5_WEIGHT_1, reg);
3312
3313 reg = RREG32_CG(CG_CAC_REGION_4_OVERRIDE_4) & ~(OVR_MODE_SPARE_0_MASK |
3314 OVR_VAL_SPARE_0_MASK |
3315 OVR_MODE_SPARE_1_MASK |
3316 OVR_VAL_SPARE_1_MASK);
3317 reg |= (OVR_MODE_SPARE_0(ni_pi->cac_weights->ovr_mode_spare_0) |
3318 OVR_VAL_SPARE_0(ni_pi->cac_weights->ovr_val_spare_0) |
3319 OVR_MODE_SPARE_1(ni_pi->cac_weights->ovr_mode_spare_1) |
3320 OVR_VAL_SPARE_1(ni_pi->cac_weights->ovr_val_spare_1));
3321 WREG32_CG(CG_CAC_REGION_4_OVERRIDE_4, reg);
3322
3323 reg = RREG32(SQ_CAC_THRESHOLD) & ~(VSP_MASK |
3324 VSP0_MASK |
3325 GPR_MASK);
3326 reg |= (VSP(ni_pi->cac_weights->vsp) |
3327 VSP0(ni_pi->cac_weights->vsp0) |
3328 GPR(ni_pi->cac_weights->gpr));
3329 WREG32(SQ_CAC_THRESHOLD, reg);
3330
3331 reg = (MCDW_WR_ENABLE |
3332 MCDX_WR_ENABLE |
3333 MCDY_WR_ENABLE |
3334 MCDZ_WR_ENABLE |
3335 INDEX(0x09D4));
3336 WREG32(MC_CG_CONFIG, reg);
3337
3338 reg = (READ_WEIGHT(ni_pi->cac_weights->mc_read_weight) |
3339 WRITE_WEIGHT(ni_pi->cac_weights->mc_write_weight) |
3340 ALLOW_OVERFLOW);
3341 WREG32(MC_CG_DATAPORT, reg);
3342
3343 return 0;
3344}
3345
3346static int ni_enable_smc_cac(struct radeon_device *rdev,
3347 struct radeon_ps *radeon_new_state,
3348 bool enable)
3349{
3350 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3351 int ret = 0;
3352 PPSMC_Result smc_result;
3353
3354 if (ni_pi->enable_cac) {
3355 if (enable) {
3356 if (!r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
3357 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_CollectCAC_PowerCorreln);
3358
3359 if (ni_pi->support_cac_long_term_average) {
3360 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgEnable);
3361 if (PPSMC_Result_OK != smc_result)
3362 ni_pi->support_cac_long_term_average = false;
3363 }
3364
3365 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
3366 if (PPSMC_Result_OK != smc_result)
3367 ret = -EINVAL;
3368
3369 ni_pi->cac_enabled = (PPSMC_Result_OK == smc_result) ? true : false;
3370 }
3371 } else if (ni_pi->cac_enabled) {
3372 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
3373
3374 ni_pi->cac_enabled = false;
3375
3376 if (ni_pi->support_cac_long_term_average) {
3377 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgDisable);
3378 if (PPSMC_Result_OK != smc_result)
3379 ni_pi->support_cac_long_term_average = false;
3380 }
3381 }
3382 }
3383
3384 return ret;
3385}
3386
3387static int ni_pcie_performance_request(struct radeon_device *rdev,
3388 u8 perf_req, bool advertise)
3389{
3390 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3391
3392#if defined(CONFIG_ACPI)
3393 if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
3394 (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
3395 if (eg_pi->pcie_performance_request_registered == false)
3396 radeon_acpi_pcie_notify_device_ready(rdev);
3397 eg_pi->pcie_performance_request_registered = true;
3398 return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
3399 } else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
3400 eg_pi->pcie_performance_request_registered) {
3401 eg_pi->pcie_performance_request_registered = false;
3402 return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
3403 }
3404#endif
3405 return 0;
3406}
3407
3408static int ni_advertise_gen2_capability(struct radeon_device *rdev)
3409{
3410 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3411 u32 tmp;
3412
3413 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3414
3415 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3416 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
3417 pi->pcie_gen2 = true;
3418 else
3419 pi->pcie_gen2 = false;
3420
3421 if (!pi->pcie_gen2)
3422 ni_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, true);
3423
3424 return 0;
3425}
3426
3427static void ni_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
3428 bool enable)
3429{
3430 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3431 u32 tmp, bif;
3432
3433 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3434
3435 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3436 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3437 if (enable) {
3438 if (!pi->boot_in_gen2) {
3439 bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
3440 bif |= CG_CLIENT_REQ(0xd);
3441 WREG32(CG_BIF_REQ_AND_RSP, bif);
3442 }
3443 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
3444 tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
3445 tmp |= LC_GEN2_EN_STRAP;
3446
3447 tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT;
3448 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
3449 udelay(10);
3450 tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
3451 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
3452 } else {
3453 if (!pi->boot_in_gen2) {
3454 bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
3455 bif |= CG_CLIENT_REQ(0xd);
3456 WREG32(CG_BIF_REQ_AND_RSP, bif);
3457
3458 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
3459 tmp &= ~LC_GEN2_EN_STRAP;
3460 }
3461 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
3462 }
3463 }
3464}
3465
3466static void ni_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
3467 bool enable)
3468{
3469 ni_enable_bif_dynamic_pcie_gen2(rdev, enable);
3470
3471 if (enable)
3472 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
3473 else
3474 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
3475}
3476
3477void ni_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
3478 struct radeon_ps *new_ps,
3479 struct radeon_ps *old_ps)
3480{
3481 struct ni_ps *new_state = ni_get_ps(new_ps);
3482 struct ni_ps *current_state = ni_get_ps(old_ps);
3483
3484 if ((new_ps->vclk == old_ps->vclk) &&
3485 (new_ps->dclk == old_ps->dclk))
3486 return;
3487
3488 if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
3489 current_state->performance_levels[current_state->performance_level_count - 1].sclk)
3490 return;
3491
3492 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
3493}
3494
3495void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
3496 struct radeon_ps *new_ps,
3497 struct radeon_ps *old_ps)
3498{
3499 struct ni_ps *new_state = ni_get_ps(new_ps);
3500 struct ni_ps *current_state = ni_get_ps(old_ps);
3501
3502 if ((new_ps->vclk == old_ps->vclk) &&
3503 (new_ps->dclk == old_ps->dclk))
3504 return;
3505
3506 if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
3507 current_state->performance_levels[current_state->performance_level_count - 1].sclk)
3508 return;
3509
3510 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
3511}
3512
3513void ni_dpm_setup_asic(struct radeon_device *rdev)
3514{
3515 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3516
3517 ni_read_clock_registers(rdev);
3518 btc_read_arb_registers(rdev);
3519 rv770_get_memory_type(rdev);
3520 if (eg_pi->pcie_performance_request)
3521 ni_advertise_gen2_capability(rdev);
3522 rv770_get_pcie_gen2_status(rdev);
3523 rv770_enable_acpi_pm(rdev);
3524}
3525
3526void ni_update_current_ps(struct radeon_device *rdev,
3527 struct radeon_ps *rps)
3528{
3529 struct ni_ps *new_ps = ni_get_ps(rps);
3530 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3531 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3532
3533 eg_pi->current_rps = *rps;
3534 ni_pi->current_ps = *new_ps;
3535 eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
3536}
3537
3538void ni_update_requested_ps(struct radeon_device *rdev,
3539 struct radeon_ps *rps)
3540{
3541 struct ni_ps *new_ps = ni_get_ps(rps);
3542 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3543 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3544
3545 eg_pi->requested_rps = *rps;
3546 ni_pi->requested_ps = *new_ps;
3547 eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
3548}
3549
3550int ni_dpm_enable(struct radeon_device *rdev)
3551{
3552 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3553 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3554 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
3555 int ret;
3556
3557 if (pi->gfx_clock_gating)
3558 ni_cg_clockgating_default(rdev);
3559 if (btc_dpm_enabled(rdev))
3560 return -EINVAL;
3561 if (pi->mg_clock_gating)
3562 ni_mg_clockgating_default(rdev);
3563 if (eg_pi->ls_clock_gating)
3564 ni_ls_clockgating_default(rdev);
3565 if (pi->voltage_control) {
3566 rv770_enable_voltage_control(rdev, true);
3567 ret = cypress_construct_voltage_tables(rdev);
3568 if (ret) {
3569 DRM_ERROR("cypress_construct_voltage_tables failed\n");
3570 return ret;
3571 }
3572 }
3573 if (eg_pi->dynamic_ac_timing) {
3574 ret = ni_initialize_mc_reg_table(rdev);
3575 if (ret)
3576 eg_pi->dynamic_ac_timing = false;
3577 }
3578 if (pi->dynamic_ss)
3579 cypress_enable_spread_spectrum(rdev, true);
3580 if (pi->thermal_protection)
3581 rv770_enable_thermal_protection(rdev, true);
3582 rv770_setup_bsp(rdev);
3583 rv770_program_git(rdev);
3584 rv770_program_tp(rdev);
3585 rv770_program_tpp(rdev);
3586 rv770_program_sstp(rdev);
3587 cypress_enable_display_gap(rdev);
3588 rv770_program_vc(rdev);
3589 if (pi->dynamic_pcie_gen2)
3590 ni_enable_dynamic_pcie_gen2(rdev, true);
3591 ret = rv770_upload_firmware(rdev);
3592 if (ret) {
3593 DRM_ERROR("rv770_upload_firmware failed\n");
3594 return ret;
3595 }
3596 ret = ni_process_firmware_header(rdev);
3597 if (ret) {
3598 DRM_ERROR("ni_process_firmware_header failed\n");
3599 return ret;
3600 }
3601 ret = ni_initial_switch_from_arb_f0_to_f1(rdev);
3602 if (ret) {
3603 DRM_ERROR("ni_initial_switch_from_arb_f0_to_f1 failed\n");
3604 return ret;
3605 }
3606 ret = ni_init_smc_table(rdev);
3607 if (ret) {
3608 DRM_ERROR("ni_init_smc_table failed\n");
3609 return ret;
3610 }
3611 ret = ni_init_smc_spll_table(rdev);
3612 if (ret) {
3613 DRM_ERROR("ni_init_smc_spll_table failed\n");
3614 return ret;
3615 }
3616 ret = ni_init_arb_table_index(rdev);
3617 if (ret) {
3618 DRM_ERROR("ni_init_arb_table_index failed\n");
3619 return ret;
3620 }
3621 if (eg_pi->dynamic_ac_timing) {
3622 ret = ni_populate_mc_reg_table(rdev, boot_ps);
3623 if (ret) {
3624 DRM_ERROR("ni_populate_mc_reg_table failed\n");
3625 return ret;
3626 }
3627 }
3628 ret = ni_initialize_smc_cac_tables(rdev);
3629 if (ret) {
3630 DRM_ERROR("ni_initialize_smc_cac_tables failed\n");
3631 return ret;
3632 }
3633 ret = ni_initialize_hardware_cac_manager(rdev);
3634 if (ret) {
3635 DRM_ERROR("ni_initialize_hardware_cac_manager failed\n");
3636 return ret;
3637 }
3638 ret = ni_populate_smc_tdp_limits(rdev, boot_ps);
3639 if (ret) {
3640 DRM_ERROR("ni_populate_smc_tdp_limits failed\n");
3641 return ret;
3642 }
3643 ni_program_response_times(rdev);
3644 r7xx_start_smc(rdev);
3645 ret = cypress_notify_smc_display_change(rdev, false);
3646 if (ret) {
3647 DRM_ERROR("cypress_notify_smc_display_change failed\n");
3648 return ret;
3649 }
3650 cypress_enable_sclk_control(rdev, true);
3651 if (eg_pi->memory_transition)
3652 cypress_enable_mclk_control(rdev, true);
3653 cypress_start_dpm(rdev);
3654 if (pi->gfx_clock_gating)
3655 ni_gfx_clockgating_enable(rdev, true);
3656 if (pi->mg_clock_gating)
3657 ni_mg_clockgating_enable(rdev, true);
3658 if (eg_pi->ls_clock_gating)
3659 ni_ls_clockgating_enable(rdev, true);
3660
3661 if (rdev->irq.installed &&
3662 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
3663 PPSMC_Result result;
3664
3665 ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, 0xff * 1000);
3666 if (ret)
3667 return ret;
3668 rdev->irq.dpm_thermal = true;
3669 radeon_irq_set(rdev);
3670 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
3671
3672 if (result != PPSMC_Result_OK)
3673 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
3674 }
3675
3676 rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
3677
3678 ni_update_current_ps(rdev, boot_ps);
3679
3680 return 0;
3681}
3682
3683void ni_dpm_disable(struct radeon_device *rdev)
3684{
3685 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3686 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3687 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
3688
3689 if (!btc_dpm_enabled(rdev))
3690 return;
3691 rv770_clear_vc(rdev);
3692 if (pi->thermal_protection)
3693 rv770_enable_thermal_protection(rdev, false);
3694 ni_enable_power_containment(rdev, boot_ps, false);
3695 ni_enable_smc_cac(rdev, boot_ps, false);
3696 cypress_enable_spread_spectrum(rdev, false);
3697 rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
3698 if (pi->dynamic_pcie_gen2)
3699 ni_enable_dynamic_pcie_gen2(rdev, false);
3700
3701 if (rdev->irq.installed &&
3702 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
3703 rdev->irq.dpm_thermal = false;
3704 radeon_irq_set(rdev);
3705 }
3706
3707 if (pi->gfx_clock_gating)
3708 ni_gfx_clockgating_enable(rdev, false);
3709 if (pi->mg_clock_gating)
3710 ni_mg_clockgating_enable(rdev, false);
3711 if (eg_pi->ls_clock_gating)
3712 ni_ls_clockgating_enable(rdev, false);
3713 ni_stop_dpm(rdev);
3714 btc_reset_to_default(rdev);
3715 ni_stop_smc(rdev);
3716 ni_force_switch_to_arb_f0(rdev);
3717
3718 ni_update_current_ps(rdev, boot_ps);
3719}
3720
3721static int ni_power_control_set_level(struct radeon_device *rdev)
3722{
3723 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
3724 int ret;
3725
3726 ret = ni_restrict_performance_levels_before_switch(rdev);
3727 if (ret)
3728 return ret;
3729 ret = rv770_halt_smc(rdev);
3730 if (ret)
3731 return ret;
3732 ret = ni_populate_smc_tdp_limits(rdev, new_ps);
3733 if (ret)
3734 return ret;
3735 ret = rv770_resume_smc(rdev);
3736 if (ret)
3737 return ret;
3738 ret = rv770_set_sw_state(rdev);
3739 if (ret)
3740 return ret;
3741
3742 return 0;
3743}
3744
3745int ni_dpm_pre_set_power_state(struct radeon_device *rdev)
3746{
3747 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3748 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
3749 struct radeon_ps *new_ps = &requested_ps;
3750
3751 ni_update_requested_ps(rdev, new_ps);
3752
3753 ni_apply_state_adjust_rules(rdev, &eg_pi->requested_rps);
3754
3755 return 0;
3756}
3757
3758int ni_dpm_set_power_state(struct radeon_device *rdev)
3759{
3760 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3761 struct radeon_ps *new_ps = &eg_pi->requested_rps;
3762 struct radeon_ps *old_ps = &eg_pi->current_rps;
3763 int ret;
3764
3765 ret = ni_restrict_performance_levels_before_switch(rdev);
3766 if (ret) {
3767 DRM_ERROR("ni_restrict_performance_levels_before_switch failed\n");
3768 return ret;
3769 }
3770 ni_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
3771 ret = ni_enable_power_containment(rdev, new_ps, false);
3772 if (ret) {
3773 DRM_ERROR("ni_enable_power_containment failed\n");
3774 return ret;
3775 }
3776 ret = ni_enable_smc_cac(rdev, new_ps, false);
3777 if (ret) {
3778 DRM_ERROR("ni_enable_smc_cac failed\n");
3779 return ret;
3780 }
3781 ret = rv770_halt_smc(rdev);
3782 if (ret) {
3783 DRM_ERROR("rv770_halt_smc failed\n");
3784 return ret;
3785 }
3786 if (eg_pi->smu_uvd_hs)
3787 btc_notify_uvd_to_smc(rdev, new_ps);
3788 ret = ni_upload_sw_state(rdev, new_ps);
3789 if (ret) {
3790 DRM_ERROR("ni_upload_sw_state failed\n");
3791 return ret;
3792 }
3793 if (eg_pi->dynamic_ac_timing) {
3794 ret = ni_upload_mc_reg_table(rdev, new_ps);
3795 if (ret) {
3796 DRM_ERROR("ni_upload_mc_reg_table failed\n");
3797 return ret;
3798 }
3799 }
3800 ret = ni_program_memory_timing_parameters(rdev, new_ps);
3801 if (ret) {
3802 DRM_ERROR("ni_program_memory_timing_parameters failed\n");
3803 return ret;
3804 }
3805 ret = rv770_resume_smc(rdev);
3806 if (ret) {
3807 DRM_ERROR("rv770_resume_smc failed\n");
3808 return ret;
3809 }
3810 ret = rv770_set_sw_state(rdev);
3811 if (ret) {
3812 DRM_ERROR("rv770_set_sw_state failed\n");
3813 return ret;
3814 }
3815 ni_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
3816 ret = ni_enable_smc_cac(rdev, new_ps, true);
3817 if (ret) {
3818 DRM_ERROR("ni_enable_smc_cac failed\n");
3819 return ret;
3820 }
3821 ret = ni_enable_power_containment(rdev, new_ps, true);
3822 if (ret) {
3823 DRM_ERROR("ni_enable_power_containment failed\n");
3824 return ret;
3825 }
3826
3827 /* update tdp */
3828 ret = ni_power_control_set_level(rdev);
3829 if (ret) {
3830 DRM_ERROR("ni_power_control_set_level failed\n");
3831 return ret;
3832 }
3833
3834 ret = ni_unrestrict_performance_levels_after_switch(rdev);
3835 if (ret) {
3836 DRM_ERROR("ni_unrestrict_performance_levels_after_switch failed\n");
3837 return ret;
3838 }
3839
3840 return 0;
3841}
3842
3843void ni_dpm_post_set_power_state(struct radeon_device *rdev)
3844{
3845 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3846 struct radeon_ps *new_ps = &eg_pi->requested_rps;
3847
3848 ni_update_current_ps(rdev, new_ps);
3849}
3850
3851void ni_dpm_reset_asic(struct radeon_device *rdev)
3852{
3853 ni_restrict_performance_levels_before_switch(rdev);
3854 rv770_set_boot_state(rdev);
3855}
3856
3857union power_info {
3858 struct _ATOM_POWERPLAY_INFO info;
3859 struct _ATOM_POWERPLAY_INFO_V2 info_2;
3860 struct _ATOM_POWERPLAY_INFO_V3 info_3;
3861 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
3862 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
3863 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
3864};
3865
3866union pplib_clock_info {
3867 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
3868 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
3869 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
3870 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
3871};
3872
3873union pplib_power_state {
3874 struct _ATOM_PPLIB_STATE v1;
3875 struct _ATOM_PPLIB_STATE_V2 v2;
3876};
3877
3878static void ni_parse_pplib_non_clock_info(struct radeon_device *rdev,
3879 struct radeon_ps *rps,
3880 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
3881 u8 table_rev)
3882{
3883 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
3884 rps->class = le16_to_cpu(non_clock_info->usClassification);
3885 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
3886
3887 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
3888 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
3889 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
3890 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
3891 rps->vclk = RV770_DEFAULT_VCLK_FREQ;
3892 rps->dclk = RV770_DEFAULT_DCLK_FREQ;
3893 } else {
3894 rps->vclk = 0;
3895 rps->dclk = 0;
3896 }
3897
3898 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
3899 rdev->pm.dpm.boot_ps = rps;
3900 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
3901 rdev->pm.dpm.uvd_ps = rps;
3902}
3903
3904static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
3905 struct radeon_ps *rps, int index,
3906 union pplib_clock_info *clock_info)
3907{
3908 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3909 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3910 struct ni_ps *ps = ni_get_ps(rps);
3911 u16 vddc;
3912 struct rv7xx_pl *pl = &ps->performance_levels[index];
3913
3914 ps->performance_level_count = index + 1;
3915
3916 pl->sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
3917 pl->sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
3918 pl->mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
3919 pl->mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
3920
3921 pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC);
3922 pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI);
3923 pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags);
3924
3925 /* patch up vddc if necessary */
3926 if (pl->vddc == 0xff01) {
3927 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
3928 pl->vddc = vddc;
3929 }
3930
3931 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
3932 pi->acpi_vddc = pl->vddc;
3933 eg_pi->acpi_vddci = pl->vddci;
3934 if (ps->performance_levels[0].flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
3935 pi->acpi_pcie_gen2 = true;
3936 else
3937 pi->acpi_pcie_gen2 = false;
3938 }
3939
3940 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
3941 eg_pi->ulv.supported = true;
3942 eg_pi->ulv.pl = pl;
3943 }
3944
3945 if (pi->min_vddc_in_table > pl->vddc)
3946 pi->min_vddc_in_table = pl->vddc;
3947
3948 if (pi->max_vddc_in_table < pl->vddc)
3949 pi->max_vddc_in_table = pl->vddc;
3950
3951 /* patch up boot state */
3952 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
3953 u16 vddc, vddci, mvdd;
3954 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
3955 pl->mclk = rdev->clock.default_mclk;
3956 pl->sclk = rdev->clock.default_sclk;
3957 pl->vddc = vddc;
3958 pl->vddci = vddci;
3959 }
3960
3961 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
3962 ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
3963 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
3964 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
3965 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
3966 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
3967 }
3968}
3969
3970static int ni_parse_power_table(struct radeon_device *rdev)
3971{
3972 struct radeon_mode_info *mode_info = &rdev->mode_info;
3973 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
3974 union pplib_power_state *power_state;
3975 int i, j;
3976 union pplib_clock_info *clock_info;
3977 union power_info *power_info;
3978 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
3979 u16 data_offset;
3980 u8 frev, crev;
3981 struct ni_ps *ps;
3982
3983 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
3984 &frev, &crev, &data_offset))
3985 return -EINVAL;
3986 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
3987
3988 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
3989 power_info->pplib.ucNumStates, GFP_KERNEL);
3990 if (!rdev->pm.dpm.ps)
3991 return -ENOMEM;
3992 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
3993 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
3994 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
3995
3996 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
3997 power_state = (union pplib_power_state *)
3998 (mode_info->atom_context->bios + data_offset +
3999 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
4000 i * power_info->pplib.ucStateEntrySize);
4001 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4002 (mode_info->atom_context->bios + data_offset +
4003 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
4004 (power_state->v1.ucNonClockStateIndex *
4005 power_info->pplib.ucNonClockSize));
4006 if (power_info->pplib.ucStateEntrySize - 1) {
4007 ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL);
4008 if (ps == NULL) {
4009 kfree(rdev->pm.dpm.ps);
4010 return -ENOMEM;
4011 }
4012 rdev->pm.dpm.ps[i].ps_priv = ps;
4013 ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4014 non_clock_info,
4015 power_info->pplib.ucNonClockSize);
4016 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
4017 clock_info = (union pplib_clock_info *)
4018 (mode_info->atom_context->bios + data_offset +
4019 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
4020 (power_state->v1.ucClockStateIndices[j] *
4021 power_info->pplib.ucClockInfoSize));
4022 ni_parse_pplib_clock_info(rdev,
4023 &rdev->pm.dpm.ps[i], j,
4024 clock_info);
4025 }
4026 }
4027 }
4028 rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
4029 return 0;
4030}
4031
4032int ni_dpm_init(struct radeon_device *rdev)
4033{
4034 struct rv7xx_power_info *pi;
4035 struct evergreen_power_info *eg_pi;
4036 struct ni_power_info *ni_pi;
4037 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
4038 u16 data_offset, size;
4039 u8 frev, crev;
4040 struct atom_clock_dividers dividers;
4041 int ret;
4042
4043 ni_pi = kzalloc(sizeof(struct ni_power_info), GFP_KERNEL);
4044 if (ni_pi == NULL)
4045 return -ENOMEM;
4046 rdev->pm.dpm.priv = ni_pi;
4047 eg_pi = &ni_pi->eg;
4048 pi = &eg_pi->rv7xx;
4049
4050 rv770_get_max_vddc(rdev);
4051
4052 eg_pi->ulv.supported = false;
4053 pi->acpi_vddc = 0;
4054 eg_pi->acpi_vddci = 0;
4055 pi->min_vddc_in_table = 0;
4056 pi->max_vddc_in_table = 0;
4057
4058 ret = ni_parse_power_table(rdev);
4059 if (ret)
4060 return ret;
4061 ret = r600_parse_extended_power_table(rdev);
4062 if (ret)
4063 return ret;
4064
4065 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
4066 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
4067 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
4068 r600_free_extended_power_table(rdev);
4069 return -ENOMEM;
4070 }
4071 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
4072 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
4073 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
4074 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
4075 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
4076 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
4077 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
4078 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
4079 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
4080
4081 ni_patch_dependency_tables_based_on_leakage(rdev);
4082
4083 if (rdev->pm.dpm.voltage_response_time == 0)
4084 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
4085 if (rdev->pm.dpm.backbias_response_time == 0)
4086 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
4087
4088 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
4089 0, false, &dividers);
4090 if (ret)
4091 pi->ref_div = dividers.ref_div + 1;
4092 else
4093 pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
4094
4095 pi->rlp = RV770_RLP_DFLT;
4096 pi->rmp = RV770_RMP_DFLT;
4097 pi->lhp = RV770_LHP_DFLT;
4098 pi->lmp = RV770_LMP_DFLT;
4099
4100 eg_pi->ats[0].rlp = RV770_RLP_DFLT;
4101 eg_pi->ats[0].rmp = RV770_RMP_DFLT;
4102 eg_pi->ats[0].lhp = RV770_LHP_DFLT;
4103 eg_pi->ats[0].lmp = RV770_LMP_DFLT;
4104
4105 eg_pi->ats[1].rlp = BTC_RLP_UVD_DFLT;
4106 eg_pi->ats[1].rmp = BTC_RMP_UVD_DFLT;
4107 eg_pi->ats[1].lhp = BTC_LHP_UVD_DFLT;
4108 eg_pi->ats[1].lmp = BTC_LMP_UVD_DFLT;
4109
4110 eg_pi->smu_uvd_hs = true;
4111
4112 if (rdev->pdev->device == 0x6707) {
4113 pi->mclk_strobe_mode_threshold = 55000;
4114 pi->mclk_edc_enable_threshold = 55000;
4115 eg_pi->mclk_edc_wr_enable_threshold = 55000;
4116 } else {
4117 pi->mclk_strobe_mode_threshold = 40000;
4118 pi->mclk_edc_enable_threshold = 40000;
4119 eg_pi->mclk_edc_wr_enable_threshold = 40000;
4120 }
4121 ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
4122
4123 pi->voltage_control =
4124 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
4125
4126 pi->mvdd_control =
4127 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
4128
4129 eg_pi->vddci_control =
4130 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
4131
4132 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
4133 &frev, &crev, &data_offset)) {
4134 pi->sclk_ss = true;
4135 pi->mclk_ss = true;
4136 pi->dynamic_ss = true;
4137 } else {
4138 pi->sclk_ss = false;
4139 pi->mclk_ss = false;
4140 pi->dynamic_ss = true;
4141 }
4142
4143 pi->asi = RV770_ASI_DFLT;
4144 pi->pasi = CYPRESS_HASI_DFLT;
4145 pi->vrc = CYPRESS_VRC_DFLT;
4146
4147 pi->power_gating = false;
4148
4149 pi->gfx_clock_gating = true;
4150
4151 pi->mg_clock_gating = true;
4152 pi->mgcgtssm = true;
4153 eg_pi->ls_clock_gating = false;
4154 eg_pi->sclk_deep_sleep = false;
4155
4156 pi->dynamic_pcie_gen2 = true;
4157
4158 if (pi->gfx_clock_gating &&
4159 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
4160 pi->thermal_protection = true;
4161 else
4162 pi->thermal_protection = false;
4163
4164 pi->display_gap = true;
4165
4166 pi->dcodt = true;
4167
4168 pi->ulps = true;
4169
4170 eg_pi->dynamic_ac_timing = true;
4171 eg_pi->abm = true;
4172 eg_pi->mcls = true;
4173 eg_pi->light_sleep = true;
4174 eg_pi->memory_transition = true;
4175#if defined(CONFIG_ACPI)
4176 eg_pi->pcie_performance_request =
4177 radeon_acpi_is_pcie_performance_request_supported(rdev);
4178#else
4179 eg_pi->pcie_performance_request = false;
4180#endif
4181
4182 eg_pi->dll_default_on = false;
4183
4184 eg_pi->sclk_deep_sleep = false;
4185
4186 pi->mclk_stutter_mode_threshold = 0;
4187
4188 pi->sram_end = SMC_RAM_END;
4189
4190 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 3;
4191 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
4192 rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2 = 900;
4193 rdev->pm.dpm.dyn_state.valid_sclk_values.count = ARRAY_SIZE(btc_valid_sclk);
4194 rdev->pm.dpm.dyn_state.valid_sclk_values.values = btc_valid_sclk;
4195 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
4196 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
4197 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 12500;
4198
4199 ni_pi->cac_data.leakage_coefficients.at = 516;
4200 ni_pi->cac_data.leakage_coefficients.bt = 18;
4201 ni_pi->cac_data.leakage_coefficients.av = 51;
4202 ni_pi->cac_data.leakage_coefficients.bv = 2957;
4203
4204 switch (rdev->pdev->device) {
4205 case 0x6700:
4206 case 0x6701:
4207 case 0x6702:
4208 case 0x6703:
4209 case 0x6718:
4210 ni_pi->cac_weights = &cac_weights_cayman_xt;
4211 break;
4212 case 0x6705:
4213 case 0x6719:
4214 case 0x671D:
4215 case 0x671C:
4216 default:
4217 ni_pi->cac_weights = &cac_weights_cayman_pro;
4218 break;
4219 case 0x6704:
4220 case 0x6706:
4221 case 0x6707:
4222 case 0x6708:
4223 case 0x6709:
4224 ni_pi->cac_weights = &cac_weights_cayman_le;
4225 break;
4226 }
4227
4228 if (ni_pi->cac_weights->enable_power_containment_by_default) {
4229 ni_pi->enable_power_containment = true;
4230 ni_pi->enable_cac = true;
4231 ni_pi->enable_sq_ramping = true;
4232 } else {
4233 ni_pi->enable_power_containment = false;
4234 ni_pi->enable_cac = false;
4235 ni_pi->enable_sq_ramping = false;
4236 }
4237
4238 ni_pi->driver_calculate_cac_leakage = false;
4239 ni_pi->cac_configuration_required = true;
4240
4241 if (ni_pi->cac_configuration_required) {
4242 ni_pi->support_cac_long_term_average = true;
4243 ni_pi->lta_window_size = ni_pi->cac_weights->l2_lta_window_size;
4244 ni_pi->lts_truncate = ni_pi->cac_weights->lts_truncate;
4245 } else {
4246 ni_pi->support_cac_long_term_average = false;
4247 ni_pi->lta_window_size = 0;
4248 ni_pi->lts_truncate = 0;
4249 }
4250
4251 ni_pi->use_power_boost_limit = true;
4252
4253 return 0;
4254}
4255
4256void ni_dpm_fini(struct radeon_device *rdev)
4257{
4258 int i;
4259
4260 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
4261 kfree(rdev->pm.dpm.ps[i].ps_priv);
4262 }
4263 kfree(rdev->pm.dpm.ps);
4264 kfree(rdev->pm.dpm.priv);
4265 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
4266 r600_free_extended_power_table(rdev);
4267}
4268
4269void ni_dpm_print_power_state(struct radeon_device *rdev,
4270 struct radeon_ps *rps)
4271{
4272 struct ni_ps *ps = ni_get_ps(rps);
4273 struct rv7xx_pl *pl;
4274 int i;
4275
4276 r600_dpm_print_class_info(rps->class, rps->class2);
4277 r600_dpm_print_cap_info(rps->caps);
4278 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
4279 for (i = 0; i < ps->performance_level_count; i++) {
4280 pl = &ps->performance_levels[i];
4281 if (rdev->family >= CHIP_TAHITI)
4282 printk("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
4283 i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
4284 else
4285 printk("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
4286 i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
4287 }
4288 r600_dpm_print_ps_status(rdev, rps);
4289}
4290
4291void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
4292 struct seq_file *m)
4293{
4294 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
4295 struct ni_ps *ps = ni_get_ps(rps);
4296 struct rv7xx_pl *pl;
4297 u32 current_index =
4298 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
4299 CURRENT_STATE_INDEX_SHIFT;
4300
4301 if (current_index >= ps->performance_level_count) {
4302 seq_printf(m, "invalid dpm profile %d\n", current_index);
4303 } else {
4304 pl = &ps->performance_levels[current_index];
4305 seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
4306 seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
4307 current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
4308 }
4309}
4310
4311u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low)
4312{
4313 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4314 struct ni_ps *requested_state = ni_get_ps(&eg_pi->requested_rps);
4315
4316 if (low)
4317 return requested_state->performance_levels[0].sclk;
4318 else
4319 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
4320}
4321
4322u32 ni_dpm_get_mclk(struct radeon_device *rdev, bool low)
4323{
4324 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4325 struct ni_ps *requested_state = ni_get_ps(&eg_pi->requested_rps);
4326
4327 if (low)
4328 return requested_state->performance_levels[0].mclk;
4329 else
4330 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
4331}
4332
diff --git a/drivers/gpu/drm/radeon/ni_dpm.h b/drivers/gpu/drm/radeon/ni_dpm.h
new file mode 100644
index 000000000000..ac1c7abf2c67
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_dpm.h
@@ -0,0 +1,248 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __NI_DPM_H__
24#define __NI_DPM_H__
25
26#include "cypress_dpm.h"
27#include "btc_dpm.h"
28#include "nislands_smc.h"
29
30struct ni_clock_registers {
31 u32 cg_spll_func_cntl;
32 u32 cg_spll_func_cntl_2;
33 u32 cg_spll_func_cntl_3;
34 u32 cg_spll_func_cntl_4;
35 u32 cg_spll_spread_spectrum;
36 u32 cg_spll_spread_spectrum_2;
37 u32 mclk_pwrmgt_cntl;
38 u32 dll_cntl;
39 u32 mpll_ad_func_cntl;
40 u32 mpll_ad_func_cntl_2;
41 u32 mpll_dq_func_cntl;
42 u32 mpll_dq_func_cntl_2;
43 u32 mpll_ss1;
44 u32 mpll_ss2;
45};
46
47struct ni_mc_reg_entry {
48 u32 mclk_max;
49 u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
50};
51
52struct ni_mc_reg_table {
53 u8 last;
54 u8 num_entries;
55 u16 valid_flag;
56 struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
57 SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
58};
59
60#define NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 2
61
62enum ni_dc_cac_level
63{
64 NISLANDS_DCCAC_LEVEL_0 = 0,
65 NISLANDS_DCCAC_LEVEL_1,
66 NISLANDS_DCCAC_LEVEL_2,
67 NISLANDS_DCCAC_LEVEL_3,
68 NISLANDS_DCCAC_LEVEL_4,
69 NISLANDS_DCCAC_LEVEL_5,
70 NISLANDS_DCCAC_LEVEL_6,
71 NISLANDS_DCCAC_LEVEL_7,
72 NISLANDS_DCCAC_MAX_LEVELS
73};
74
75struct ni_leakage_coeffients
76{
77 u32 at;
78 u32 bt;
79 u32 av;
80 u32 bv;
81 s32 t_slope;
82 s32 t_intercept;
83 u32 t_ref;
84};
85
86struct ni_cac_data
87{
88 struct ni_leakage_coeffients leakage_coefficients;
89 u32 i_leakage;
90 s32 leakage_minimum_temperature;
91 u32 pwr_const;
92 u32 dc_cac_value;
93 u32 bif_cac_value;
94 u32 lkge_pwr;
95 u8 mc_wr_weight;
96 u8 mc_rd_weight;
97 u8 allow_ovrflw;
98 u8 num_win_tdp;
99 u8 l2num_win_tdp;
100 u8 lts_truncate_n;
101};
102
103struct ni_cac_weights
104{
105 u32 weight_tcp_sig0;
106 u32 weight_tcp_sig1;
107 u32 weight_ta_sig;
108 u32 weight_tcc_en0;
109 u32 weight_tcc_en1;
110 u32 weight_tcc_en2;
111 u32 weight_cb_en0;
112 u32 weight_cb_en1;
113 u32 weight_cb_en2;
114 u32 weight_cb_en3;
115 u32 weight_db_sig0;
116 u32 weight_db_sig1;
117 u32 weight_db_sig2;
118 u32 weight_db_sig3;
119 u32 weight_sxm_sig0;
120 u32 weight_sxm_sig1;
121 u32 weight_sxm_sig2;
122 u32 weight_sxs_sig0;
123 u32 weight_sxs_sig1;
124 u32 weight_xbr_0;
125 u32 weight_xbr_1;
126 u32 weight_xbr_2;
127 u32 weight_spi_sig0;
128 u32 weight_spi_sig1;
129 u32 weight_spi_sig2;
130 u32 weight_spi_sig3;
131 u32 weight_spi_sig4;
132 u32 weight_spi_sig5;
133 u32 weight_lds_sig0;
134 u32 weight_lds_sig1;
135 u32 weight_sc;
136 u32 weight_bif;
137 u32 weight_cp;
138 u32 weight_pa_sig0;
139 u32 weight_pa_sig1;
140 u32 weight_vgt_sig0;
141 u32 weight_vgt_sig1;
142 u32 weight_vgt_sig2;
143 u32 weight_dc_sig0;
144 u32 weight_dc_sig1;
145 u32 weight_dc_sig2;
146 u32 weight_dc_sig3;
147 u32 weight_uvd_sig0;
148 u32 weight_uvd_sig1;
149 u32 weight_spare0;
150 u32 weight_spare1;
151 u32 weight_sq_vsp;
152 u32 weight_sq_vsp0;
153 u32 weight_sq_gpr;
154 u32 ovr_mode_spare_0;
155 u32 ovr_val_spare_0;
156 u32 ovr_mode_spare_1;
157 u32 ovr_val_spare_1;
158 u32 vsp;
159 u32 vsp0;
160 u32 gpr;
161 u8 mc_read_weight;
162 u8 mc_write_weight;
163 u32 tid_cnt;
164 u32 tid_unit;
165 u32 l2_lta_window_size;
166 u32 lts_truncate;
167 u32 dc_cac[NISLANDS_DCCAC_MAX_LEVELS];
168 u32 pcie_cac[SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES];
169 bool enable_power_containment_by_default;
170};
171
172struct ni_ps {
173 u16 performance_level_count;
174 bool dc_compatible;
175 struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
176};
177
178struct ni_power_info {
179 /* must be first! */
180 struct evergreen_power_info eg;
181 struct ni_clock_registers clock_registers;
182 struct ni_mc_reg_table mc_reg_table;
183 u32 mclk_rtt_mode_threshold;
184 /* flags */
185 bool use_power_boost_limit;
186 bool support_cac_long_term_average;
187 bool cac_enabled;
188 bool cac_configuration_required;
189 bool driver_calculate_cac_leakage;
190 bool pc_enabled;
191 bool enable_power_containment;
192 bool enable_cac;
193 bool enable_sq_ramping;
194 /* smc offsets */
195 u16 arb_table_start;
196 u16 fan_table_start;
197 u16 cac_table_start;
198 u16 spll_table_start;
199 /* CAC stuff */
200 struct ni_cac_data cac_data;
201 u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS];
202 const struct ni_cac_weights *cac_weights;
203 u8 lta_window_size;
204 u8 lts_truncate;
205 struct ni_ps current_ps;
206 struct ni_ps requested_ps;
207 /* scratch structs */
208 SMC_NIslands_MCRegisters smc_mc_reg_table;
209 NISLANDS_SMC_STATETABLE smc_statetable;
210};
211
212#define NISLANDS_INITIAL_STATE_ARB_INDEX 0
213#define NISLANDS_ACPI_STATE_ARB_INDEX 1
214#define NISLANDS_ULV_STATE_ARB_INDEX 2
215#define NISLANDS_DRIVER_STATE_ARB_INDEX 3
216
217#define NISLANDS_DPM2_MAX_PULSE_SKIP 256
218
219#define NISLANDS_DPM2_NEAR_TDP_DEC 10
220#define NISLANDS_DPM2_ABOVE_SAFE_INC 5
221#define NISLANDS_DPM2_BELOW_SAFE_INC 20
222
223#define NISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT 80
224
225#define NISLANDS_DPM2_MAXPS_PERCENT_H 90
226#define NISLANDS_DPM2_MAXPS_PERCENT_M 0
227
228#define NISLANDS_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
229#define NISLANDS_DPM2_SQ_RAMP_MIN_POWER 0x12
230#define NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
231#define NISLANDS_DPM2_SQ_RAMP_STI_SIZE 0x1E
232#define NISLANDS_DPM2_SQ_RAMP_LTI_RATIO 0xF
233
234int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
235 u32 arb_freq_src, u32 arb_freq_dest);
236void ni_update_current_ps(struct radeon_device *rdev,
237 struct radeon_ps *rps);
238void ni_update_requested_ps(struct radeon_device *rdev,
239 struct radeon_ps *rps);
240
241void ni_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
242 struct radeon_ps *new_ps,
243 struct radeon_ps *old_ps);
244void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
245 struct radeon_ps *new_ps,
246 struct radeon_ps *old_ps);
247
248#endif
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index e226faf16fea..fe24a93542ec 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -489,6 +489,571 @@
489# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0) 489# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
490# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) 490# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
491 491
492/* TN SMU registers */
493#define TN_CURRENT_GNB_TEMP 0x1F390
494
495/* pm registers */
496#define SMC_MSG 0x20c
497#define HOST_SMC_MSG(x) ((x) << 0)
498#define HOST_SMC_MSG_MASK (0xff << 0)
499#define HOST_SMC_MSG_SHIFT 0
500#define HOST_SMC_RESP(x) ((x) << 8)
501#define HOST_SMC_RESP_MASK (0xff << 8)
502#define HOST_SMC_RESP_SHIFT 8
503#define SMC_HOST_MSG(x) ((x) << 16)
504#define SMC_HOST_MSG_MASK (0xff << 16)
505#define SMC_HOST_MSG_SHIFT 16
506#define SMC_HOST_RESP(x) ((x) << 24)
507#define SMC_HOST_RESP_MASK (0xff << 24)
508#define SMC_HOST_RESP_SHIFT 24
509
510#define CG_SPLL_FUNC_CNTL 0x600
511#define SPLL_RESET (1 << 0)
512#define SPLL_SLEEP (1 << 1)
513#define SPLL_BYPASS_EN (1 << 3)
514#define SPLL_REF_DIV(x) ((x) << 4)
515#define SPLL_REF_DIV_MASK (0x3f << 4)
516#define SPLL_PDIV_A(x) ((x) << 20)
517#define SPLL_PDIV_A_MASK (0x7f << 20)
518#define SPLL_PDIV_A_SHIFT 20
519#define CG_SPLL_FUNC_CNTL_2 0x604
520#define SCLK_MUX_SEL(x) ((x) << 0)
521#define SCLK_MUX_SEL_MASK (0x1ff << 0)
522#define CG_SPLL_FUNC_CNTL_3 0x608
523#define SPLL_FB_DIV(x) ((x) << 0)
524#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
525#define SPLL_FB_DIV_SHIFT 0
526#define SPLL_DITHEN (1 << 28)
527
528#define MPLL_CNTL_MODE 0x61c
529# define SS_SSEN (1 << 24)
530# define SS_DSMODE_EN (1 << 25)
531
532#define MPLL_AD_FUNC_CNTL 0x624
533#define CLKF(x) ((x) << 0)
534#define CLKF_MASK (0x7f << 0)
535#define CLKR(x) ((x) << 7)
536#define CLKR_MASK (0x1f << 7)
537#define CLKFRAC(x) ((x) << 12)
538#define CLKFRAC_MASK (0x1f << 12)
539#define YCLK_POST_DIV(x) ((x) << 17)
540#define YCLK_POST_DIV_MASK (3 << 17)
541#define IBIAS(x) ((x) << 20)
542#define IBIAS_MASK (0x3ff << 20)
543#define RESET (1 << 30)
544#define PDNB (1 << 31)
545#define MPLL_AD_FUNC_CNTL_2 0x628
546#define BYPASS (1 << 19)
547#define BIAS_GEN_PDNB (1 << 24)
548#define RESET_EN (1 << 25)
549#define VCO_MODE (1 << 29)
550#define MPLL_DQ_FUNC_CNTL 0x62c
551#define MPLL_DQ_FUNC_CNTL_2 0x630
552
553#define GENERAL_PWRMGT 0x63c
554# define GLOBAL_PWRMGT_EN (1 << 0)
555# define STATIC_PM_EN (1 << 1)
556# define THERMAL_PROTECTION_DIS (1 << 2)
557# define THERMAL_PROTECTION_TYPE (1 << 3)
558# define ENABLE_GEN2PCIE (1 << 4)
559# define ENABLE_GEN2XSP (1 << 5)
560# define SW_SMIO_INDEX(x) ((x) << 6)
561# define SW_SMIO_INDEX_MASK (3 << 6)
562# define SW_SMIO_INDEX_SHIFT 6
563# define LOW_VOLT_D2_ACPI (1 << 8)
564# define LOW_VOLT_D3_ACPI (1 << 9)
565# define VOLT_PWRMGT_EN (1 << 10)
566# define BACKBIAS_PAD_EN (1 << 18)
567# define BACKBIAS_VALUE (1 << 19)
568# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
569# define AC_DC_SW (1 << 24)
570
571#define SCLK_PWRMGT_CNTL 0x644
572# define SCLK_PWRMGT_OFF (1 << 0)
573# define SCLK_LOW_D1 (1 << 1)
574# define FIR_RESET (1 << 4)
575# define FIR_FORCE_TREND_SEL (1 << 5)
576# define FIR_TREND_MODE (1 << 6)
577# define DYN_GFX_CLK_OFF_EN (1 << 7)
578# define GFX_CLK_FORCE_ON (1 << 8)
579# define GFX_CLK_REQUEST_OFF (1 << 9)
580# define GFX_CLK_FORCE_OFF (1 << 10)
581# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
582# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
583# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
584# define DYN_LIGHT_SLEEP_EN (1 << 14)
585#define MCLK_PWRMGT_CNTL 0x648
586# define DLL_SPEED(x) ((x) << 0)
587# define DLL_SPEED_MASK (0x1f << 0)
588# define MPLL_PWRMGT_OFF (1 << 5)
589# define DLL_READY (1 << 6)
590# define MC_INT_CNTL (1 << 7)
591# define MRDCKA0_PDNB (1 << 8)
592# define MRDCKA1_PDNB (1 << 9)
593# define MRDCKB0_PDNB (1 << 10)
594# define MRDCKB1_PDNB (1 << 11)
595# define MRDCKC0_PDNB (1 << 12)
596# define MRDCKC1_PDNB (1 << 13)
597# define MRDCKD0_PDNB (1 << 14)
598# define MRDCKD1_PDNB (1 << 15)
599# define MRDCKA0_RESET (1 << 16)
600# define MRDCKA1_RESET (1 << 17)
601# define MRDCKB0_RESET (1 << 18)
602# define MRDCKB1_RESET (1 << 19)
603# define MRDCKC0_RESET (1 << 20)
604# define MRDCKC1_RESET (1 << 21)
605# define MRDCKD0_RESET (1 << 22)
606# define MRDCKD1_RESET (1 << 23)
607# define DLL_READY_READ (1 << 24)
608# define USE_DISPLAY_GAP (1 << 25)
609# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
610# define MPLL_TURNOFF_D2 (1 << 28)
611#define DLL_CNTL 0x64c
612# define MRDCKA0_BYPASS (1 << 24)
613# define MRDCKA1_BYPASS (1 << 25)
614# define MRDCKB0_BYPASS (1 << 26)
615# define MRDCKB1_BYPASS (1 << 27)
616# define MRDCKC0_BYPASS (1 << 28)
617# define MRDCKC1_BYPASS (1 << 29)
618# define MRDCKD0_BYPASS (1 << 30)
619# define MRDCKD1_BYPASS (1 << 31)
620
621#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
622# define CURRENT_STATE_INDEX_MASK (0xf << 4)
623# define CURRENT_STATE_INDEX_SHIFT 4
624
625#define CG_AT 0x6d4
626# define CG_R(x) ((x) << 0)
627# define CG_R_MASK (0xffff << 0)
628# define CG_L(x) ((x) << 16)
629# define CG_L_MASK (0xffff << 16)
630
631#define CG_BIF_REQ_AND_RSP 0x7f4
632#define CG_CLIENT_REQ(x) ((x) << 0)
633#define CG_CLIENT_REQ_MASK (0xff << 0)
634#define CG_CLIENT_REQ_SHIFT 0
635#define CG_CLIENT_RESP(x) ((x) << 8)
636#define CG_CLIENT_RESP_MASK (0xff << 8)
637#define CG_CLIENT_RESP_SHIFT 8
638#define CLIENT_CG_REQ(x) ((x) << 16)
639#define CLIENT_CG_REQ_MASK (0xff << 16)
640#define CLIENT_CG_REQ_SHIFT 16
641#define CLIENT_CG_RESP(x) ((x) << 24)
642#define CLIENT_CG_RESP_MASK (0xff << 24)
643#define CLIENT_CG_RESP_SHIFT 24
644
645#define CG_SPLL_SPREAD_SPECTRUM 0x790
646#define SSEN (1 << 0)
647#define CLK_S(x) ((x) << 4)
648#define CLK_S_MASK (0xfff << 4)
649#define CLK_S_SHIFT 4
650#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
651#define CLK_V(x) ((x) << 0)
652#define CLK_V_MASK (0x3ffffff << 0)
653#define CLK_V_SHIFT 0
654
655#define SMC_SCRATCH0 0x81c
656
657#define CG_SPLL_FUNC_CNTL_4 0x850
658
659#define MPLL_SS1 0x85c
660#define CLKV(x) ((x) << 0)
661#define CLKV_MASK (0x3ffffff << 0)
662#define MPLL_SS2 0x860
663#define CLKS(x) ((x) << 0)
664#define CLKS_MASK (0xfff << 0)
665
666#define CG_CAC_CTRL 0x88c
667#define TID_CNT(x) ((x) << 0)
668#define TID_CNT_MASK (0x3fff << 0)
669#define TID_UNIT(x) ((x) << 14)
670#define TID_UNIT_MASK (0xf << 14)
671
672#define CG_IND_ADDR 0x8f8
673#define CG_IND_DATA 0x8fc
674/* CGIND regs */
675#define CG_CGTT_LOCAL_0 0x00
676#define CG_CGTT_LOCAL_1 0x01
677
678#define MC_CG_CONFIG 0x25bc
679#define MCDW_WR_ENABLE (1 << 0)
680#define MCDX_WR_ENABLE (1 << 1)
681#define MCDY_WR_ENABLE (1 << 2)
682#define MCDZ_WR_ENABLE (1 << 3)
683#define MC_RD_ENABLE(x) ((x) << 4)
684#define MC_RD_ENABLE_MASK (3 << 4)
685#define INDEX(x) ((x) << 6)
686#define INDEX_MASK (0xfff << 6)
687#define INDEX_SHIFT 6
688
689#define MC_ARB_CAC_CNTL 0x2750
690#define ENABLE (1 << 0)
691#define READ_WEIGHT(x) ((x) << 1)
692#define READ_WEIGHT_MASK (0x3f << 1)
693#define READ_WEIGHT_SHIFT 1
694#define WRITE_WEIGHT(x) ((x) << 7)
695#define WRITE_WEIGHT_MASK (0x3f << 7)
696#define WRITE_WEIGHT_SHIFT 7
697#define ALLOW_OVERFLOW (1 << 13)
698
699#define MC_ARB_DRAM_TIMING 0x2774
700#define MC_ARB_DRAM_TIMING2 0x2778
701
702#define MC_ARB_RFSH_RATE 0x27b0
703#define POWERMODE0(x) ((x) << 0)
704#define POWERMODE0_MASK (0xff << 0)
705#define POWERMODE0_SHIFT 0
706#define POWERMODE1(x) ((x) << 8)
707#define POWERMODE1_MASK (0xff << 8)
708#define POWERMODE1_SHIFT 8
709#define POWERMODE2(x) ((x) << 16)
710#define POWERMODE2_MASK (0xff << 16)
711#define POWERMODE2_SHIFT 16
712#define POWERMODE3(x) ((x) << 24)
713#define POWERMODE3_MASK (0xff << 24)
714#define POWERMODE3_SHIFT 24
715
716#define MC_ARB_CG 0x27e8
717#define CG_ARB_REQ(x) ((x) << 0)
718#define CG_ARB_REQ_MASK (0xff << 0)
719#define CG_ARB_REQ_SHIFT 0
720#define CG_ARB_RESP(x) ((x) << 8)
721#define CG_ARB_RESP_MASK (0xff << 8)
722#define CG_ARB_RESP_SHIFT 8
723#define ARB_CG_REQ(x) ((x) << 16)
724#define ARB_CG_REQ_MASK (0xff << 16)
725#define ARB_CG_REQ_SHIFT 16
726#define ARB_CG_RESP(x) ((x) << 24)
727#define ARB_CG_RESP_MASK (0xff << 24)
728#define ARB_CG_RESP_SHIFT 24
729
730#define MC_ARB_DRAM_TIMING_1 0x27f0
731#define MC_ARB_DRAM_TIMING_2 0x27f4
732#define MC_ARB_DRAM_TIMING_3 0x27f8
733#define MC_ARB_DRAM_TIMING2_1 0x27fc
734#define MC_ARB_DRAM_TIMING2_2 0x2800
735#define MC_ARB_DRAM_TIMING2_3 0x2804
736#define MC_ARB_BURST_TIME 0x2808
737#define STATE0(x) ((x) << 0)
738#define STATE0_MASK (0x1f << 0)
739#define STATE0_SHIFT 0
740#define STATE1(x) ((x) << 5)
741#define STATE1_MASK (0x1f << 5)
742#define STATE1_SHIFT 5
743#define STATE2(x) ((x) << 10)
744#define STATE2_MASK (0x1f << 10)
745#define STATE2_SHIFT 10
746#define STATE3(x) ((x) << 15)
747#define STATE3_MASK (0x1f << 15)
748#define STATE3_SHIFT 15
749
750#define MC_CG_DATAPORT 0x2884
751
752#define MC_SEQ_RAS_TIMING 0x28a0
753#define MC_SEQ_CAS_TIMING 0x28a4
754#define MC_SEQ_MISC_TIMING 0x28a8
755#define MC_SEQ_MISC_TIMING2 0x28ac
756#define MC_SEQ_PMG_TIMING 0x28b0
757#define MC_SEQ_RD_CTL_D0 0x28b4
758#define MC_SEQ_RD_CTL_D1 0x28b8
759#define MC_SEQ_WR_CTL_D0 0x28bc
760#define MC_SEQ_WR_CTL_D1 0x28c0
761
762#define MC_SEQ_MISC0 0x2a00
763#define MC_SEQ_MISC0_GDDR5_SHIFT 28
764#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
765#define MC_SEQ_MISC0_GDDR5_VALUE 5
766#define MC_SEQ_MISC1 0x2a04
767#define MC_SEQ_RESERVE_M 0x2a08
768#define MC_PMG_CMD_EMRS 0x2a0c
769
770#define MC_SEQ_MISC3 0x2a2c
771
772#define MC_SEQ_MISC5 0x2a54
773#define MC_SEQ_MISC6 0x2a58
774
775#define MC_SEQ_MISC7 0x2a64
776
777#define MC_SEQ_RAS_TIMING_LP 0x2a6c
778#define MC_SEQ_CAS_TIMING_LP 0x2a70
779#define MC_SEQ_MISC_TIMING_LP 0x2a74
780#define MC_SEQ_MISC_TIMING2_LP 0x2a78
781#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
782#define MC_SEQ_WR_CTL_D1_LP 0x2a80
783#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
784#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
785
786#define MC_PMG_CMD_MRS 0x2aac
787
788#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
789#define MC_SEQ_RD_CTL_D1_LP 0x2b20
790
791#define MC_PMG_CMD_MRS1 0x2b44
792#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
793#define MC_SEQ_PMG_TIMING_LP 0x2b4c
794
795#define MC_PMG_CMD_MRS2 0x2b5c
796#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
797
798#define LB_SYNC_RESET_SEL 0x6b28
799#define LB_SYNC_RESET_SEL_MASK (3 << 0)
800#define LB_SYNC_RESET_SEL_SHIFT 0
801
802#define DC_STUTTER_CNTL 0x6b30
803#define DC_STUTTER_ENABLE_A (1 << 0)
804#define DC_STUTTER_ENABLE_B (1 << 1)
805
806#define SQ_CAC_THRESHOLD 0x8e4c
807#define VSP(x) ((x) << 0)
808#define VSP_MASK (0xff << 0)
809#define VSP_SHIFT 0
810#define VSP0(x) ((x) << 8)
811#define VSP0_MASK (0xff << 8)
812#define VSP0_SHIFT 8
813#define GPR(x) ((x) << 16)
814#define GPR_MASK (0xff << 16)
815#define GPR_SHIFT 16
816
817#define SQ_POWER_THROTTLE 0x8e58
818#define MIN_POWER(x) ((x) << 0)
819#define MIN_POWER_MASK (0x3fff << 0)
820#define MIN_POWER_SHIFT 0
821#define MAX_POWER(x) ((x) << 16)
822#define MAX_POWER_MASK (0x3fff << 16)
823#define MAX_POWER_SHIFT 0
824#define SQ_POWER_THROTTLE2 0x8e5c
825#define MAX_POWER_DELTA(x) ((x) << 0)
826#define MAX_POWER_DELTA_MASK (0x3fff << 0)
827#define MAX_POWER_DELTA_SHIFT 0
828#define STI_SIZE(x) ((x) << 16)
829#define STI_SIZE_MASK (0x3ff << 16)
830#define STI_SIZE_SHIFT 16
831#define LTI_RATIO(x) ((x) << 27)
832#define LTI_RATIO_MASK (0xf << 27)
833#define LTI_RATIO_SHIFT 27
834
835/* CG indirect registers */
836#define CG_CAC_REGION_1_WEIGHT_0 0x83
837#define WEIGHT_TCP_SIG0(x) ((x) << 0)
838#define WEIGHT_TCP_SIG0_MASK (0x3f << 0)
839#define WEIGHT_TCP_SIG0_SHIFT 0
840#define WEIGHT_TCP_SIG1(x) ((x) << 6)
841#define WEIGHT_TCP_SIG1_MASK (0x3f << 6)
842#define WEIGHT_TCP_SIG1_SHIFT 6
843#define WEIGHT_TA_SIG(x) ((x) << 12)
844#define WEIGHT_TA_SIG_MASK (0x3f << 12)
845#define WEIGHT_TA_SIG_SHIFT 12
846#define CG_CAC_REGION_1_WEIGHT_1 0x84
847#define WEIGHT_TCC_EN0(x) ((x) << 0)
848#define WEIGHT_TCC_EN0_MASK (0x3f << 0)
849#define WEIGHT_TCC_EN0_SHIFT 0
850#define WEIGHT_TCC_EN1(x) ((x) << 6)
851#define WEIGHT_TCC_EN1_MASK (0x3f << 6)
852#define WEIGHT_TCC_EN1_SHIFT 6
853#define WEIGHT_TCC_EN2(x) ((x) << 12)
854#define WEIGHT_TCC_EN2_MASK (0x3f << 12)
855#define WEIGHT_TCC_EN2_SHIFT 12
856#define WEIGHT_TCC_EN3(x) ((x) << 18)
857#define WEIGHT_TCC_EN3_MASK (0x3f << 18)
858#define WEIGHT_TCC_EN3_SHIFT 18
859#define CG_CAC_REGION_2_WEIGHT_0 0x85
860#define WEIGHT_CB_EN0(x) ((x) << 0)
861#define WEIGHT_CB_EN0_MASK (0x3f << 0)
862#define WEIGHT_CB_EN0_SHIFT 0
863#define WEIGHT_CB_EN1(x) ((x) << 6)
864#define WEIGHT_CB_EN1_MASK (0x3f << 6)
865#define WEIGHT_CB_EN1_SHIFT 6
866#define WEIGHT_CB_EN2(x) ((x) << 12)
867#define WEIGHT_CB_EN2_MASK (0x3f << 12)
868#define WEIGHT_CB_EN2_SHIFT 12
869#define WEIGHT_CB_EN3(x) ((x) << 18)
870#define WEIGHT_CB_EN3_MASK (0x3f << 18)
871#define WEIGHT_CB_EN3_SHIFT 18
872#define CG_CAC_REGION_2_WEIGHT_1 0x86
873#define WEIGHT_DB_SIG0(x) ((x) << 0)
874#define WEIGHT_DB_SIG0_MASK (0x3f << 0)
875#define WEIGHT_DB_SIG0_SHIFT 0
876#define WEIGHT_DB_SIG1(x) ((x) << 6)
877#define WEIGHT_DB_SIG1_MASK (0x3f << 6)
878#define WEIGHT_DB_SIG1_SHIFT 6
879#define WEIGHT_DB_SIG2(x) ((x) << 12)
880#define WEIGHT_DB_SIG2_MASK (0x3f << 12)
881#define WEIGHT_DB_SIG2_SHIFT 12
882#define WEIGHT_DB_SIG3(x) ((x) << 18)
883#define WEIGHT_DB_SIG3_MASK (0x3f << 18)
884#define WEIGHT_DB_SIG3_SHIFT 18
885#define CG_CAC_REGION_2_WEIGHT_2 0x87
886#define WEIGHT_SXM_SIG0(x) ((x) << 0)
887#define WEIGHT_SXM_SIG0_MASK (0x3f << 0)
888#define WEIGHT_SXM_SIG0_SHIFT 0
889#define WEIGHT_SXM_SIG1(x) ((x) << 6)
890#define WEIGHT_SXM_SIG1_MASK (0x3f << 6)
891#define WEIGHT_SXM_SIG1_SHIFT 6
892#define WEIGHT_SXM_SIG2(x) ((x) << 12)
893#define WEIGHT_SXM_SIG2_MASK (0x3f << 12)
894#define WEIGHT_SXM_SIG2_SHIFT 12
895#define WEIGHT_SXS_SIG0(x) ((x) << 18)
896#define WEIGHT_SXS_SIG0_MASK (0x3f << 18)
897#define WEIGHT_SXS_SIG0_SHIFT 18
898#define WEIGHT_SXS_SIG1(x) ((x) << 24)
899#define WEIGHT_SXS_SIG1_MASK (0x3f << 24)
900#define WEIGHT_SXS_SIG1_SHIFT 24
901#define CG_CAC_REGION_3_WEIGHT_0 0x88
902#define WEIGHT_XBR_0(x) ((x) << 0)
903#define WEIGHT_XBR_0_MASK (0x3f << 0)
904#define WEIGHT_XBR_0_SHIFT 0
905#define WEIGHT_XBR_1(x) ((x) << 6)
906#define WEIGHT_XBR_1_MASK (0x3f << 6)
907#define WEIGHT_XBR_1_SHIFT 6
908#define WEIGHT_XBR_2(x) ((x) << 12)
909#define WEIGHT_XBR_2_MASK (0x3f << 12)
910#define WEIGHT_XBR_2_SHIFT 12
911#define WEIGHT_SPI_SIG0(x) ((x) << 18)
912#define WEIGHT_SPI_SIG0_MASK (0x3f << 18)
913#define WEIGHT_SPI_SIG0_SHIFT 18
914#define CG_CAC_REGION_3_WEIGHT_1 0x89
915#define WEIGHT_SPI_SIG1(x) ((x) << 0)
916#define WEIGHT_SPI_SIG1_MASK (0x3f << 0)
917#define WEIGHT_SPI_SIG1_SHIFT 0
918#define WEIGHT_SPI_SIG2(x) ((x) << 6)
919#define WEIGHT_SPI_SIG2_MASK (0x3f << 6)
920#define WEIGHT_SPI_SIG2_SHIFT 6
921#define WEIGHT_SPI_SIG3(x) ((x) << 12)
922#define WEIGHT_SPI_SIG3_MASK (0x3f << 12)
923#define WEIGHT_SPI_SIG3_SHIFT 12
924#define WEIGHT_SPI_SIG4(x) ((x) << 18)
925#define WEIGHT_SPI_SIG4_MASK (0x3f << 18)
926#define WEIGHT_SPI_SIG4_SHIFT 18
927#define WEIGHT_SPI_SIG5(x) ((x) << 24)
928#define WEIGHT_SPI_SIG5_MASK (0x3f << 24)
929#define WEIGHT_SPI_SIG5_SHIFT 24
930#define CG_CAC_REGION_4_WEIGHT_0 0x8a
931#define WEIGHT_LDS_SIG0(x) ((x) << 0)
932#define WEIGHT_LDS_SIG0_MASK (0x3f << 0)
933#define WEIGHT_LDS_SIG0_SHIFT 0
934#define WEIGHT_LDS_SIG1(x) ((x) << 6)
935#define WEIGHT_LDS_SIG1_MASK (0x3f << 6)
936#define WEIGHT_LDS_SIG1_SHIFT 6
937#define WEIGHT_SC(x) ((x) << 24)
938#define WEIGHT_SC_MASK (0x3f << 24)
939#define WEIGHT_SC_SHIFT 24
940#define CG_CAC_REGION_4_WEIGHT_1 0x8b
941#define WEIGHT_BIF(x) ((x) << 0)
942#define WEIGHT_BIF_MASK (0x3f << 0)
943#define WEIGHT_BIF_SHIFT 0
944#define WEIGHT_CP(x) ((x) << 6)
945#define WEIGHT_CP_MASK (0x3f << 6)
946#define WEIGHT_CP_SHIFT 6
947#define WEIGHT_PA_SIG0(x) ((x) << 12)
948#define WEIGHT_PA_SIG0_MASK (0x3f << 12)
949#define WEIGHT_PA_SIG0_SHIFT 12
950#define WEIGHT_PA_SIG1(x) ((x) << 18)
951#define WEIGHT_PA_SIG1_MASK (0x3f << 18)
952#define WEIGHT_PA_SIG1_SHIFT 18
953#define WEIGHT_VGT_SIG0(x) ((x) << 24)
954#define WEIGHT_VGT_SIG0_MASK (0x3f << 24)
955#define WEIGHT_VGT_SIG0_SHIFT 24
956#define CG_CAC_REGION_4_WEIGHT_2 0x8c
957#define WEIGHT_VGT_SIG1(x) ((x) << 0)
958#define WEIGHT_VGT_SIG1_MASK (0x3f << 0)
959#define WEIGHT_VGT_SIG1_SHIFT 0
960#define WEIGHT_VGT_SIG2(x) ((x) << 6)
961#define WEIGHT_VGT_SIG2_MASK (0x3f << 6)
962#define WEIGHT_VGT_SIG2_SHIFT 6
963#define WEIGHT_DC_SIG0(x) ((x) << 12)
964#define WEIGHT_DC_SIG0_MASK (0x3f << 12)
965#define WEIGHT_DC_SIG0_SHIFT 12
966#define WEIGHT_DC_SIG1(x) ((x) << 18)
967#define WEIGHT_DC_SIG1_MASK (0x3f << 18)
968#define WEIGHT_DC_SIG1_SHIFT 18
969#define WEIGHT_DC_SIG2(x) ((x) << 24)
970#define WEIGHT_DC_SIG2_MASK (0x3f << 24)
971#define WEIGHT_DC_SIG2_SHIFT 24
972#define CG_CAC_REGION_4_WEIGHT_3 0x8d
973#define WEIGHT_DC_SIG3(x) ((x) << 0)
974#define WEIGHT_DC_SIG3_MASK (0x3f << 0)
975#define WEIGHT_DC_SIG3_SHIFT 0
976#define WEIGHT_UVD_SIG0(x) ((x) << 6)
977#define WEIGHT_UVD_SIG0_MASK (0x3f << 6)
978#define WEIGHT_UVD_SIG0_SHIFT 6
979#define WEIGHT_UVD_SIG1(x) ((x) << 12)
980#define WEIGHT_UVD_SIG1_MASK (0x3f << 12)
981#define WEIGHT_UVD_SIG1_SHIFT 12
982#define WEIGHT_SPARE0(x) ((x) << 18)
983#define WEIGHT_SPARE0_MASK (0x3f << 18)
984#define WEIGHT_SPARE0_SHIFT 18
985#define WEIGHT_SPARE1(x) ((x) << 24)
986#define WEIGHT_SPARE1_MASK (0x3f << 24)
987#define WEIGHT_SPARE1_SHIFT 24
988#define CG_CAC_REGION_5_WEIGHT_0 0x8e
989#define WEIGHT_SQ_VSP(x) ((x) << 0)
990#define WEIGHT_SQ_VSP_MASK (0x3fff << 0)
991#define WEIGHT_SQ_VSP_SHIFT 0
992#define WEIGHT_SQ_VSP0(x) ((x) << 14)
993#define WEIGHT_SQ_VSP0_MASK (0x3fff << 14)
994#define WEIGHT_SQ_VSP0_SHIFT 14
995#define CG_CAC_REGION_4_OVERRIDE_4 0xab
996#define OVR_MODE_SPARE_0(x) ((x) << 16)
997#define OVR_MODE_SPARE_0_MASK (0x1 << 16)
998#define OVR_MODE_SPARE_0_SHIFT 16
999#define OVR_VAL_SPARE_0(x) ((x) << 17)
1000#define OVR_VAL_SPARE_0_MASK (0x1 << 17)
1001#define OVR_VAL_SPARE_0_SHIFT 17
1002#define OVR_MODE_SPARE_1(x) ((x) << 18)
1003#define OVR_MODE_SPARE_1_MASK (0x3f << 18)
1004#define OVR_MODE_SPARE_1_SHIFT 18
1005#define OVR_VAL_SPARE_1(x) ((x) << 19)
1006#define OVR_VAL_SPARE_1_MASK (0x3f << 19)
1007#define OVR_VAL_SPARE_1_SHIFT 19
1008#define CG_CAC_REGION_5_WEIGHT_1 0xb7
1009#define WEIGHT_SQ_GPR(x) ((x) << 0)
1010#define WEIGHT_SQ_GPR_MASK (0x3fff << 0)
1011#define WEIGHT_SQ_GPR_SHIFT 0
1012#define WEIGHT_SQ_LDS(x) ((x) << 14)
1013#define WEIGHT_SQ_LDS_MASK (0x3fff << 14)
1014#define WEIGHT_SQ_LDS_SHIFT 14
1015
1016/* PCIE link stuff */
1017#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
1018#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
1019# define LC_LINK_WIDTH_SHIFT 0
1020# define LC_LINK_WIDTH_MASK 0x7
1021# define LC_LINK_WIDTH_X0 0
1022# define LC_LINK_WIDTH_X1 1
1023# define LC_LINK_WIDTH_X2 2
1024# define LC_LINK_WIDTH_X4 3
1025# define LC_LINK_WIDTH_X8 4
1026# define LC_LINK_WIDTH_X16 6
1027# define LC_LINK_WIDTH_RD_SHIFT 4
1028# define LC_LINK_WIDTH_RD_MASK 0x70
1029# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
1030# define LC_RECONFIG_NOW (1 << 8)
1031# define LC_RENEGOTIATION_SUPPORT (1 << 9)
1032# define LC_RENEGOTIATE_EN (1 << 10)
1033# define LC_SHORT_RECONFIG_EN (1 << 11)
1034# define LC_UPCONFIGURE_SUPPORT (1 << 12)
1035# define LC_UPCONFIGURE_DIS (1 << 13)
1036#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
1037# define LC_GEN2_EN_STRAP (1 << 0)
1038# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
1039# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
1040# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
1041# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
1042# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
1043# define LC_CURRENT_DATA_RATE (1 << 11)
1044# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
1045# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
1046# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
1047# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
1048# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
1049# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
1050# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
1051#define MM_CFGREGS_CNTL 0x544c
1052# define MM_WR_TO_CFG_EN (1 << 3)
1053#define LINK_CNTL2 0x88 /* F0 */
1054# define TARGET_LINK_SPEED_MASK (0xf << 0)
1055# define SELECTABLE_DEEMPHASIS (1 << 6)
1056
492/* 1057/*
493 * UVD 1058 * UVD
494 */ 1059 */
diff --git a/drivers/gpu/drm/radeon/nislands_smc.h b/drivers/gpu/drm/radeon/nislands_smc.h
new file mode 100644
index 000000000000..3cf8fc0d83f4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/nislands_smc.h
@@ -0,0 +1,329 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __NISLANDS_SMC_H__
24#define __NISLANDS_SMC_H__
25
26#pragma pack(push, 1)
27
28#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
29
30struct PP_NIslands_Dpm2PerfLevel
31{
32 uint8_t MaxPS;
33 uint8_t TgtAct;
34 uint8_t MaxPS_StepInc;
35 uint8_t MaxPS_StepDec;
36 uint8_t PSST;
37 uint8_t NearTDPDec;
38 uint8_t AboveSafeInc;
39 uint8_t BelowSafeInc;
40 uint8_t PSDeltaLimit;
41 uint8_t PSDeltaWin;
42 uint8_t Reserved[6];
43};
44
45typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
46
47struct PP_NIslands_DPM2Parameters
48{
49 uint32_t TDPLimit;
50 uint32_t NearTDPLimit;
51 uint32_t SafePowerLimit;
52 uint32_t PowerBoostLimit;
53};
54typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
55
56struct NISLANDS_SMC_SCLK_VALUE
57{
58 uint32_t vCG_SPLL_FUNC_CNTL;
59 uint32_t vCG_SPLL_FUNC_CNTL_2;
60 uint32_t vCG_SPLL_FUNC_CNTL_3;
61 uint32_t vCG_SPLL_FUNC_CNTL_4;
62 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
63 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
64 uint32_t sclk_value;
65};
66
67typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
68
69struct NISLANDS_SMC_MCLK_VALUE
70{
71 uint32_t vMPLL_FUNC_CNTL;
72 uint32_t vMPLL_FUNC_CNTL_1;
73 uint32_t vMPLL_FUNC_CNTL_2;
74 uint32_t vMPLL_AD_FUNC_CNTL;
75 uint32_t vMPLL_AD_FUNC_CNTL_2;
76 uint32_t vMPLL_DQ_FUNC_CNTL;
77 uint32_t vMPLL_DQ_FUNC_CNTL_2;
78 uint32_t vMCLK_PWRMGT_CNTL;
79 uint32_t vDLL_CNTL;
80 uint32_t vMPLL_SS;
81 uint32_t vMPLL_SS2;
82 uint32_t mclk_value;
83};
84
85typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
86
87struct NISLANDS_SMC_VOLTAGE_VALUE
88{
89 uint16_t value;
90 uint8_t index;
91 uint8_t padding;
92};
93
94typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
95
96struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
97{
98 uint8_t arbValue;
99 uint8_t ACIndex;
100 uint8_t displayWatermark;
101 uint8_t gen2PCIE;
102 uint8_t reserved1;
103 uint8_t reserved2;
104 uint8_t strobeMode;
105 uint8_t mcFlags;
106 uint32_t aT;
107 uint32_t bSP;
108 NISLANDS_SMC_SCLK_VALUE sclk;
109 NISLANDS_SMC_MCLK_VALUE mclk;
110 NISLANDS_SMC_VOLTAGE_VALUE vddc;
111 NISLANDS_SMC_VOLTAGE_VALUE mvdd;
112 NISLANDS_SMC_VOLTAGE_VALUE vddci;
113 NISLANDS_SMC_VOLTAGE_VALUE std_vddc;
114 uint32_t powergate_en;
115 uint8_t hUp;
116 uint8_t hDown;
117 uint8_t stateFlags;
118 uint8_t arbRefreshState;
119 uint32_t SQPowerThrottle;
120 uint32_t SQPowerThrottle_2;
121 uint32_t reserved[2];
122 PP_NIslands_Dpm2PerfLevel dpm2;
123};
124
125#define NISLANDS_SMC_STROBE_RATIO 0x0F
126#define NISLANDS_SMC_STROBE_ENABLE 0x10
127
128#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01
129#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02
130#define NISLANDS_SMC_MC_RTT_ENABLE 0x04
131#define NISLANDS_SMC_MC_STUTTER_EN 0x08
132
133typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
134
135struct NISLANDS_SMC_SWSTATE
136{
137 uint8_t flags;
138 uint8_t levelCount;
139 uint8_t padding2;
140 uint8_t padding3;
141 NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1];
142};
143
144typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
145
146#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0
147#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1
148#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
149#define NISLANDS_SMC_VOLTAGEMASK_MAX 4
150
151struct NISLANDS_SMC_VOLTAGEMASKTABLE
152{
153 uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
154 uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
155};
156
157typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
158
159#define NISLANDS_MAX_NO_VREG_STEPS 32
160
161struct NISLANDS_SMC_STATETABLE
162{
163 uint8_t thermalProtectType;
164 uint8_t systemFlags;
165 uint8_t maxVDDCIndexInPPTable;
166 uint8_t extraFlags;
167 uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
168 uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
169 NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
170 PP_NIslands_DPM2Parameters dpm2Params;
171 NISLANDS_SMC_SWSTATE initialState;
172 NISLANDS_SMC_SWSTATE ACPIState;
173 NISLANDS_SMC_SWSTATE ULVState;
174 NISLANDS_SMC_SWSTATE driverState;
175 NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
176};
177
178typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
179
180#define NI_SMC_SOFT_REGISTERS_START 0x108
181
182#define NI_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0
183#define NI_SMC_SOFT_REGISTER_delay_bbias 0xC
184#define NI_SMC_SOFT_REGISTER_delay_vreg 0x10
185#define NI_SMC_SOFT_REGISTER_delay_acpi 0x2C
186#define NI_SMC_SOFT_REGISTER_seq_index 0x64
187#define NI_SMC_SOFT_REGISTER_mvdd_chg_time 0x68
188#define NI_SMC_SOFT_REGISTER_mclk_switch_lim 0x78
189#define NI_SMC_SOFT_REGISTER_watermark_threshold 0x80
190#define NI_SMC_SOFT_REGISTER_mc_block_delay 0x84
191#define NI_SMC_SOFT_REGISTER_uvd_enabled 0x98
192
193#define SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES 16
194#define SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
195#define SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
196#define SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES 4
197
198struct SMC_NISLANDS_MC_TPP_CAC_TABLE
199{
200 uint32_t tpp[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES];
201 uint32_t cacValue[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES];
202};
203
204typedef struct SMC_NISLANDS_MC_TPP_CAC_TABLE SMC_NISLANDS_MC_TPP_CAC_TABLE;
205
206
207struct PP_NIslands_CACTABLES
208{
209 uint32_t cac_bif_lut[SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES];
210 uint32_t cac_lkge_lut[SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
211
212 uint32_t pwr_const;
213
214 uint32_t dc_cacValue;
215 uint32_t bif_cacValue;
216 uint32_t lkge_pwr;
217
218 uint8_t cac_width;
219 uint8_t window_size_p2;
220
221 uint8_t num_drop_lsb;
222 uint8_t padding_0;
223
224 uint32_t last_power;
225
226 uint8_t AllowOvrflw;
227 uint8_t MCWrWeight;
228 uint8_t MCRdWeight;
229 uint8_t padding_1[9];
230
231 uint8_t enableWinAvg;
232 uint8_t numWin_TDP;
233 uint8_t l2numWin_TDP;
234 uint8_t WinIndex;
235
236 uint32_t dynPwr_TDP[4];
237 uint32_t lkgePwr_TDP[4];
238 uint32_t power_TDP[4];
239 uint32_t avg_dynPwr_TDP;
240 uint32_t avg_lkgePwr_TDP;
241 uint32_t avg_power_TDP;
242 uint32_t lts_power_TDP;
243 uint8_t lts_truncate_n;
244 uint8_t padding_2[7];
245};
246
247typedef struct PP_NIslands_CACTABLES PP_NIslands_CACTABLES;
248
249#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
250#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
251
252struct SMC_NIslands_MCRegisterAddress
253{
254 uint16_t s0;
255 uint16_t s1;
256};
257
258typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
259
260
261struct SMC_NIslands_MCRegisterSet
262{
263 uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
264};
265
266typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
267
268struct SMC_NIslands_MCRegisters
269{
270 uint8_t last;
271 uint8_t reserved[3];
272 SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
273 SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
274};
275
276typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
277
278struct SMC_NIslands_MCArbDramTimingRegisterSet
279{
280 uint32_t mc_arb_dram_timing;
281 uint32_t mc_arb_dram_timing2;
282 uint8_t mc_arb_rfsh_rate;
283 uint8_t padding[3];
284};
285
286typedef struct SMC_NIslands_MCArbDramTimingRegisterSet SMC_NIslands_MCArbDramTimingRegisterSet;
287
288struct SMC_NIslands_MCArbDramTimingRegisters
289{
290 uint8_t arb_current;
291 uint8_t reserved[3];
292 SMC_NIslands_MCArbDramTimingRegisterSet data[20];
293};
294
295typedef struct SMC_NIslands_MCArbDramTimingRegisters SMC_NIslands_MCArbDramTimingRegisters;
296
297struct SMC_NISLANDS_SPLL_DIV_TABLE
298{
299 uint32_t freq[256];
300 uint32_t ss[256];
301};
302
303#define SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK 0x01ffffff
304#define SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
305#define SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK 0xfe000000
306#define SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT 25
307#define SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK 0x000fffff
308#define SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT 0
309#define SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK 0xfff00000
310#define SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT 20
311
312typedef struct SMC_NISLANDS_SPLL_DIV_TABLE SMC_NISLANDS_SPLL_DIV_TABLE;
313
314#define NISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x100
315
316#define NISLANDS_SMC_FIRMWARE_HEADER_version 0x0
317#define NISLANDS_SMC_FIRMWARE_HEADER_flags 0x4
318#define NISLANDS_SMC_FIRMWARE_HEADER_softRegisters 0x8
319#define NISLANDS_SMC_FIRMWARE_HEADER_stateTable 0xC
320#define NISLANDS_SMC_FIRMWARE_HEADER_fanTable 0x10
321#define NISLANDS_SMC_FIRMWARE_HEADER_cacTable 0x14
322#define NISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
323#define NISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x2C
324#define NISLANDS_SMC_FIRMWARE_HEADER_spllTable 0x30
325
326#pragma pack(pop)
327
328#endif
329
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
new file mode 100644
index 000000000000..8fb1113a8fd7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef PP_SMC_H
24#define PP_SMC_H
25
26#pragma pack(push, 1)
27
28#define PPSMC_SWSTATE_FLAG_DC 0x01
29#define PPSMC_SWSTATE_FLAG_UVD 0x02
30#define PPSMC_SWSTATE_FLAG_VCE 0x04
31#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08
32
33#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
34#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
35#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
36
37#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
38#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
39#define PPSMC_SYSTEMFLAG_GDDR5 0x04
40#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
41#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
42#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
43#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO 0x40
44
45#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
46#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
47#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
48#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
49#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x02
50
51#define PPSMC_DISPLAY_WATERMARK_LOW 0
52#define PPSMC_DISPLAY_WATERMARK_HIGH 1
53
54#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
55#define PPSMC_STATEFLAG_POWERBOOST 0x02
56#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
57#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
58
59#define PPSMC_Result_OK ((uint8_t)0x01)
60#define PPSMC_Result_Failed ((uint8_t)0xFF)
61
62typedef uint8_t PPSMC_Result;
63
64#define PPSMC_MSG_Halt ((uint8_t)0x10)
65#define PPSMC_MSG_Resume ((uint8_t)0x11)
66#define PPSMC_MSG_ZeroLevelsDisabled ((uint8_t)0x13)
67#define PPSMC_MSG_OneLevelsDisabled ((uint8_t)0x14)
68#define PPSMC_MSG_TwoLevelsDisabled ((uint8_t)0x15)
69#define PPSMC_MSG_EnableThermalInterrupt ((uint8_t)0x16)
70#define PPSMC_MSG_RunningOnAC ((uint8_t)0x17)
71#define PPSMC_MSG_SwitchToSwState ((uint8_t)0x20)
72#define PPSMC_MSG_SwitchToInitialState ((uint8_t)0x40)
73#define PPSMC_MSG_NoForcedLevel ((uint8_t)0x41)
74#define PPSMC_MSG_SwitchToMinimumPower ((uint8_t)0x51)
75#define PPSMC_MSG_ResumeFromMinimumPower ((uint8_t)0x52)
76#define PPSMC_MSG_EnableCac ((uint8_t)0x53)
77#define PPSMC_MSG_DisableCac ((uint8_t)0x54)
78#define PPSMC_TDPClampingActive ((uint8_t)0x59)
79#define PPSMC_TDPClampingInactive ((uint8_t)0x5A)
80#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D)
81#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E)
82#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60)
83#define PPSMC_MSG_UVDPowerON ((uint8_t)0x61)
84#define PPSMC_MSG_EnableULV ((uint8_t)0x62)
85#define PPSMC_MSG_DisableULV ((uint8_t)0x63)
86#define PPSMC_MSG_EnterULV ((uint8_t)0x64)
87#define PPSMC_MSG_ExitULV ((uint8_t)0x65)
88#define PPSMC_CACLongTermAvgEnable ((uint8_t)0x6E)
89#define PPSMC_CACLongTermAvgDisable ((uint8_t)0x6F)
90#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint8_t)0x7A)
91#define PPSMC_FlushDataCache ((uint8_t)0x80)
92#define PPSMC_MSG_SetEnabledLevels ((uint8_t)0x82)
93#define PPSMC_MSG_SetForcedLevels ((uint8_t)0x83)
94#define PPSMC_MSG_ResetToDefaults ((uint8_t)0x84)
95#define PPSMC_MSG_EnableDTE ((uint8_t)0x87)
96#define PPSMC_MSG_DisableDTE ((uint8_t)0x88)
97#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
98#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
99
100/* TN */
101#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
102#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104)
103#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108)
104#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
105#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
106#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
107
108
109typedef uint16_t PPSMC_Msg;
110
111#pragma pack(pop)
112
113#endif
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6948eb88c2b7..2d3655f7f41e 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -38,18 +38,7 @@
38#include "r600d.h" 38#include "r600d.h"
39#include "atom.h" 39#include "atom.h"
40#include "avivod.h" 40#include "avivod.h"
41 41#include "radeon_ucode.h"
42#define PFP_UCODE_SIZE 576
43#define PM4_UCODE_SIZE 1792
44#define RLC_UCODE_SIZE 768
45#define R700_PFP_UCODE_SIZE 848
46#define R700_PM4_UCODE_SIZE 1360
47#define R700_RLC_UCODE_SIZE 1024
48#define EVERGREEN_PFP_UCODE_SIZE 1120
49#define EVERGREEN_PM4_UCODE_SIZE 1376
50#define EVERGREEN_RLC_UCODE_SIZE 768
51#define CAYMAN_RLC_UCODE_SIZE 1024
52#define ARUBA_RLC_UCODE_SIZE 1536
53 42
54/* Firmware Names */ 43/* Firmware Names */
55MODULE_FIRMWARE("radeon/R600_pfp.bin"); 44MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -68,24 +57,32 @@ MODULE_FIRMWARE("radeon/RS780_pfp.bin");
68MODULE_FIRMWARE("radeon/RS780_me.bin"); 57MODULE_FIRMWARE("radeon/RS780_me.bin");
69MODULE_FIRMWARE("radeon/RV770_pfp.bin"); 58MODULE_FIRMWARE("radeon/RV770_pfp.bin");
70MODULE_FIRMWARE("radeon/RV770_me.bin"); 59MODULE_FIRMWARE("radeon/RV770_me.bin");
60MODULE_FIRMWARE("radeon/RV770_smc.bin");
71MODULE_FIRMWARE("radeon/RV730_pfp.bin"); 61MODULE_FIRMWARE("radeon/RV730_pfp.bin");
72MODULE_FIRMWARE("radeon/RV730_me.bin"); 62MODULE_FIRMWARE("radeon/RV730_me.bin");
63MODULE_FIRMWARE("radeon/RV730_smc.bin");
64MODULE_FIRMWARE("radeon/RV740_smc.bin");
73MODULE_FIRMWARE("radeon/RV710_pfp.bin"); 65MODULE_FIRMWARE("radeon/RV710_pfp.bin");
74MODULE_FIRMWARE("radeon/RV710_me.bin"); 66MODULE_FIRMWARE("radeon/RV710_me.bin");
67MODULE_FIRMWARE("radeon/RV710_smc.bin");
75MODULE_FIRMWARE("radeon/R600_rlc.bin"); 68MODULE_FIRMWARE("radeon/R600_rlc.bin");
76MODULE_FIRMWARE("radeon/R700_rlc.bin"); 69MODULE_FIRMWARE("radeon/R700_rlc.bin");
77MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); 70MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
78MODULE_FIRMWARE("radeon/CEDAR_me.bin"); 71MODULE_FIRMWARE("radeon/CEDAR_me.bin");
79MODULE_FIRMWARE("radeon/CEDAR_rlc.bin"); 72MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
73MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
80MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); 74MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
81MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); 75MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
82MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin"); 76MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
77MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
83MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); 78MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
84MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); 79MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
85MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); 80MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
81MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
86MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); 82MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
87MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); 83MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
88MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); 84MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
85MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
89MODULE_FIRMWARE("radeon/PALM_pfp.bin"); 86MODULE_FIRMWARE("radeon/PALM_pfp.bin");
90MODULE_FIRMWARE("radeon/PALM_me.bin"); 87MODULE_FIRMWARE("radeon/PALM_me.bin");
91MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); 88MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
@@ -108,6 +105,7 @@ static void r600_gpu_init(struct radeon_device *rdev);
108void r600_fini(struct radeon_device *rdev); 105void r600_fini(struct radeon_device *rdev);
109void r600_irq_disable(struct radeon_device *rdev); 106void r600_irq_disable(struct radeon_device *rdev);
110static void r600_pcie_gen2_enable(struct radeon_device *rdev); 107static void r600_pcie_gen2_enable(struct radeon_device *rdev);
108extern int evergreen_rlc_resume(struct radeon_device *rdev);
111 109
112/** 110/**
113 * r600_get_xclk - get the xclk 111 * r600_get_xclk - get the xclk
@@ -2149,7 +2147,8 @@ int r600_init_microcode(struct radeon_device *rdev)
2149 struct platform_device *pdev; 2147 struct platform_device *pdev;
2150 const char *chip_name; 2148 const char *chip_name;
2151 const char *rlc_chip_name; 2149 const char *rlc_chip_name;
2152 size_t pfp_req_size, me_req_size, rlc_req_size; 2150 const char *smc_chip_name = "RV770";
2151 size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
2153 char fw_name[30]; 2152 char fw_name[30];
2154 int err; 2153 int err;
2155 2154
@@ -2195,32 +2194,51 @@ int r600_init_microcode(struct radeon_device *rdev)
2195 case CHIP_RV770: 2194 case CHIP_RV770:
2196 chip_name = "RV770"; 2195 chip_name = "RV770";
2197 rlc_chip_name = "R700"; 2196 rlc_chip_name = "R700";
2197 smc_chip_name = "RV770";
2198 smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
2198 break; 2199 break;
2199 case CHIP_RV730: 2200 case CHIP_RV730:
2200 case CHIP_RV740:
2201 chip_name = "RV730"; 2201 chip_name = "RV730";
2202 rlc_chip_name = "R700"; 2202 rlc_chip_name = "R700";
2203 smc_chip_name = "RV730";
2204 smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
2203 break; 2205 break;
2204 case CHIP_RV710: 2206 case CHIP_RV710:
2205 chip_name = "RV710"; 2207 chip_name = "RV710";
2206 rlc_chip_name = "R700"; 2208 rlc_chip_name = "R700";
2209 smc_chip_name = "RV710";
2210 smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
2211 break;
2212 case CHIP_RV740:
2213 chip_name = "RV730";
2214 rlc_chip_name = "R700";
2215 smc_chip_name = "RV740";
2216 smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
2207 break; 2217 break;
2208 case CHIP_CEDAR: 2218 case CHIP_CEDAR:
2209 chip_name = "CEDAR"; 2219 chip_name = "CEDAR";
2210 rlc_chip_name = "CEDAR"; 2220 rlc_chip_name = "CEDAR";
2221 smc_chip_name = "CEDAR";
2222 smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
2211 break; 2223 break;
2212 case CHIP_REDWOOD: 2224 case CHIP_REDWOOD:
2213 chip_name = "REDWOOD"; 2225 chip_name = "REDWOOD";
2214 rlc_chip_name = "REDWOOD"; 2226 rlc_chip_name = "REDWOOD";
2227 smc_chip_name = "REDWOOD";
2228 smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
2215 break; 2229 break;
2216 case CHIP_JUNIPER: 2230 case CHIP_JUNIPER:
2217 chip_name = "JUNIPER"; 2231 chip_name = "JUNIPER";
2218 rlc_chip_name = "JUNIPER"; 2232 rlc_chip_name = "JUNIPER";
2233 smc_chip_name = "JUNIPER";
2234 smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
2219 break; 2235 break;
2220 case CHIP_CYPRESS: 2236 case CHIP_CYPRESS:
2221 case CHIP_HEMLOCK: 2237 case CHIP_HEMLOCK:
2222 chip_name = "CYPRESS"; 2238 chip_name = "CYPRESS";
2223 rlc_chip_name = "CYPRESS"; 2239 rlc_chip_name = "CYPRESS";
2240 smc_chip_name = "CYPRESS";
2241 smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
2224 break; 2242 break;
2225 case CHIP_PALM: 2243 case CHIP_PALM:
2226 chip_name = "PALM"; 2244 chip_name = "PALM";
@@ -2246,9 +2264,9 @@ int r600_init_microcode(struct radeon_device *rdev)
2246 me_req_size = R700_PM4_UCODE_SIZE * 4; 2264 me_req_size = R700_PM4_UCODE_SIZE * 4;
2247 rlc_req_size = R700_RLC_UCODE_SIZE * 4; 2265 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2248 } else { 2266 } else {
2249 pfp_req_size = PFP_UCODE_SIZE * 4; 2267 pfp_req_size = R600_PFP_UCODE_SIZE * 4;
2250 me_req_size = PM4_UCODE_SIZE * 12; 2268 me_req_size = R600_PM4_UCODE_SIZE * 12;
2251 rlc_req_size = RLC_UCODE_SIZE * 4; 2269 rlc_req_size = R600_RLC_UCODE_SIZE * 4;
2252 } 2270 }
2253 2271
2254 DRM_INFO("Loading %s Microcode\n", chip_name); 2272 DRM_INFO("Loading %s Microcode\n", chip_name);
@@ -2287,6 +2305,19 @@ int r600_init_microcode(struct radeon_device *rdev)
2287 err = -EINVAL; 2305 err = -EINVAL;
2288 } 2306 }
2289 2307
2308 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2309 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
2310 err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev);
2311 if (err)
2312 goto out;
2313 if (rdev->smc_fw->size != smc_req_size) {
2314 printk(KERN_ERR
2315 "smc: Bogus length %zu in firmware \"%s\"\n",
2316 rdev->smc_fw->size, fw_name);
2317 err = -EINVAL;
2318 }
2319 }
2320
2290out: 2321out:
2291 platform_device_unregister(pdev); 2322 platform_device_unregister(pdev);
2292 2323
@@ -2301,6 +2332,8 @@ out:
2301 rdev->me_fw = NULL; 2332 rdev->me_fw = NULL;
2302 release_firmware(rdev->rlc_fw); 2333 release_firmware(rdev->rlc_fw);
2303 rdev->rlc_fw = NULL; 2334 rdev->rlc_fw = NULL;
2335 release_firmware(rdev->smc_fw);
2336 rdev->smc_fw = NULL;
2304 } 2337 }
2305 return err; 2338 return err;
2306} 2339}
@@ -2331,13 +2364,13 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
2331 2364
2332 fw_data = (const __be32 *)rdev->me_fw->data; 2365 fw_data = (const __be32 *)rdev->me_fw->data;
2333 WREG32(CP_ME_RAM_WADDR, 0); 2366 WREG32(CP_ME_RAM_WADDR, 0);
2334 for (i = 0; i < PM4_UCODE_SIZE * 3; i++) 2367 for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
2335 WREG32(CP_ME_RAM_DATA, 2368 WREG32(CP_ME_RAM_DATA,
2336 be32_to_cpup(fw_data++)); 2369 be32_to_cpup(fw_data++));
2337 2370
2338 fw_data = (const __be32 *)rdev->pfp_fw->data; 2371 fw_data = (const __be32 *)rdev->pfp_fw->data;
2339 WREG32(CP_PFP_UCODE_ADDR, 0); 2372 WREG32(CP_PFP_UCODE_ADDR, 0);
2340 for (i = 0; i < PFP_UCODE_SIZE; i++) 2373 for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
2341 WREG32(CP_PFP_UCODE_DATA, 2374 WREG32(CP_PFP_UCODE_DATA,
2342 be32_to_cpup(fw_data++)); 2375 be32_to_cpup(fw_data++));
2343 2376
@@ -3789,7 +3822,7 @@ static void r600_rlc_start(struct radeon_device *rdev)
3789 WREG32(RLC_CNTL, RLC_ENABLE); 3822 WREG32(RLC_CNTL, RLC_ENABLE);
3790} 3823}
3791 3824
3792static int r600_rlc_init(struct radeon_device *rdev) 3825static int r600_rlc_resume(struct radeon_device *rdev)
3793{ 3826{
3794 u32 i; 3827 u32 i;
3795 const __be32 *fw_data; 3828 const __be32 *fw_data;
@@ -3801,45 +3834,22 @@ static int r600_rlc_init(struct radeon_device *rdev)
3801 3834
3802 WREG32(RLC_HB_CNTL, 0); 3835 WREG32(RLC_HB_CNTL, 0);
3803 3836
3804 if (rdev->family == CHIP_ARUBA) { 3837 WREG32(RLC_HB_BASE, 0);
3805 WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); 3838 WREG32(RLC_HB_RPTR, 0);
3806 WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); 3839 WREG32(RLC_HB_WPTR, 0);
3807 } 3840 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3808 if (rdev->family <= CHIP_CAYMAN) { 3841 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3809 WREG32(RLC_HB_BASE, 0);
3810 WREG32(RLC_HB_RPTR, 0);
3811 WREG32(RLC_HB_WPTR, 0);
3812 }
3813 if (rdev->family <= CHIP_CAICOS) {
3814 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3815 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3816 }
3817 WREG32(RLC_MC_CNTL, 0); 3842 WREG32(RLC_MC_CNTL, 0);
3818 WREG32(RLC_UCODE_CNTL, 0); 3843 WREG32(RLC_UCODE_CNTL, 0);
3819 3844
3820 fw_data = (const __be32 *)rdev->rlc_fw->data; 3845 fw_data = (const __be32 *)rdev->rlc_fw->data;
3821 if (rdev->family >= CHIP_ARUBA) { 3846 if (rdev->family >= CHIP_RV770) {
3822 for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
3823 WREG32(RLC_UCODE_ADDR, i);
3824 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3825 }
3826 } else if (rdev->family >= CHIP_CAYMAN) {
3827 for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
3828 WREG32(RLC_UCODE_ADDR, i);
3829 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3830 }
3831 } else if (rdev->family >= CHIP_CEDAR) {
3832 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
3833 WREG32(RLC_UCODE_ADDR, i);
3834 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3835 }
3836 } else if (rdev->family >= CHIP_RV770) {
3837 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { 3847 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3838 WREG32(RLC_UCODE_ADDR, i); 3848 WREG32(RLC_UCODE_ADDR, i);
3839 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3849 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3840 } 3850 }
3841 } else { 3851 } else {
3842 for (i = 0; i < RLC_UCODE_SIZE; i++) { 3852 for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
3843 WREG32(RLC_UCODE_ADDR, i); 3853 WREG32(RLC_UCODE_ADDR, i);
3844 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3854 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3845 } 3855 }
@@ -3947,7 +3957,10 @@ int r600_irq_init(struct radeon_device *rdev)
3947 r600_disable_interrupts(rdev); 3957 r600_disable_interrupts(rdev);
3948 3958
3949 /* init rlc */ 3959 /* init rlc */
3950 ret = r600_rlc_init(rdev); 3960 if (rdev->family >= CHIP_CEDAR)
3961 ret = evergreen_rlc_resume(rdev);
3962 else
3963 ret = r600_rlc_resume(rdev);
3951 if (ret) { 3964 if (ret) {
3952 r600_ih_ring_fini(rdev); 3965 r600_ih_ring_fini(rdev);
3953 return ret; 3966 return ret;
@@ -4028,6 +4041,7 @@ int r600_irq_set(struct radeon_device *rdev)
4028 u32 hdmi0, hdmi1; 4041 u32 hdmi0, hdmi1;
4029 u32 d1grph = 0, d2grph = 0; 4042 u32 d1grph = 0, d2grph = 0;
4030 u32 dma_cntl; 4043 u32 dma_cntl;
4044 u32 thermal_int = 0;
4031 4045
4032 if (!rdev->irq.installed) { 4046 if (!rdev->irq.installed) {
4033 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 4047 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -4062,8 +4076,21 @@ int r600_irq_set(struct radeon_device *rdev)
4062 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 4076 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
4063 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 4077 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
4064 } 4078 }
4079
4065 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 4080 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
4066 4081
4082 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
4083 thermal_int = RREG32(CG_THERMAL_INT) &
4084 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
4085 } else if (rdev->family >= CHIP_RV770) {
4086 thermal_int = RREG32(RV770_CG_THERMAL_INT) &
4087 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
4088 }
4089 if (rdev->irq.dpm_thermal) {
4090 DRM_DEBUG("dpm thermal\n");
4091 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
4092 }
4093
4067 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 4094 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
4068 DRM_DEBUG("r600_irq_set: sw int\n"); 4095 DRM_DEBUG("r600_irq_set: sw int\n");
4069 cp_int_cntl |= RB_INT_ENABLE; 4096 cp_int_cntl |= RB_INT_ENABLE;
@@ -4145,6 +4172,11 @@ int r600_irq_set(struct radeon_device *rdev)
4145 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 4172 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
4146 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1); 4173 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
4147 } 4174 }
4175 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
4176 WREG32(CG_THERMAL_INT, thermal_int);
4177 } else if (rdev->family >= CHIP_RV770) {
4178 WREG32(RV770_CG_THERMAL_INT, thermal_int);
4179 }
4148 4180
4149 return 0; 4181 return 0;
4150} 4182}
@@ -4336,6 +4368,7 @@ int r600_irq_process(struct radeon_device *rdev)
4336 u32 ring_index; 4368 u32 ring_index;
4337 bool queue_hotplug = false; 4369 bool queue_hotplug = false;
4338 bool queue_hdmi = false; 4370 bool queue_hdmi = false;
4371 bool queue_thermal = false;
4339 4372
4340 if (!rdev->ih.enabled || rdev->shutdown) 4373 if (!rdev->ih.enabled || rdev->shutdown)
4341 return IRQ_NONE; 4374 return IRQ_NONE;
@@ -4503,6 +4536,16 @@ restart_ih:
4503 DRM_DEBUG("IH: DMA trap\n"); 4536 DRM_DEBUG("IH: DMA trap\n");
4504 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); 4537 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4505 break; 4538 break;
4539 case 230: /* thermal low to high */
4540 DRM_DEBUG("IH: thermal low to high\n");
4541 rdev->pm.dpm.thermal.high_to_low = false;
4542 queue_thermal = true;
4543 break;
4544 case 231: /* thermal high to low */
4545 DRM_DEBUG("IH: thermal high to low\n");
4546 rdev->pm.dpm.thermal.high_to_low = true;
4547 queue_thermal = true;
4548 break;
4506 case 233: /* GUI IDLE */ 4549 case 233: /* GUI IDLE */
4507 DRM_DEBUG("IH: GUI idle\n"); 4550 DRM_DEBUG("IH: GUI idle\n");
4508 break; 4551 break;
@@ -4519,6 +4562,8 @@ restart_ih:
4519 schedule_work(&rdev->hotplug_work); 4562 schedule_work(&rdev->hotplug_work);
4520 if (queue_hdmi) 4563 if (queue_hdmi)
4521 schedule_work(&rdev->audio_work); 4564 schedule_work(&rdev->audio_work);
4565 if (queue_thermal && rdev->pm.dpm_enabled)
4566 schedule_work(&rdev->pm.dpm.thermal.work);
4522 rdev->ih.rptr = rptr; 4567 rdev->ih.rptr = rptr;
4523 WREG32(IH_RB_RPTR, rdev->ih.rptr); 4568 WREG32(IH_RB_RPTR, rdev->ih.rptr);
4524 atomic_set(&rdev->ih.lock, 0); 4569 atomic_set(&rdev->ih.lock, 0);
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
new file mode 100644
index 000000000000..76368c04f809
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -0,0 +1,1024 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "r600d.h"
28#include "r600_dpm.h"
29#include "atom.h"
30
31const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
32{
33 R600_UTC_DFLT_00,
34 R600_UTC_DFLT_01,
35 R600_UTC_DFLT_02,
36 R600_UTC_DFLT_03,
37 R600_UTC_DFLT_04,
38 R600_UTC_DFLT_05,
39 R600_UTC_DFLT_06,
40 R600_UTC_DFLT_07,
41 R600_UTC_DFLT_08,
42 R600_UTC_DFLT_09,
43 R600_UTC_DFLT_10,
44 R600_UTC_DFLT_11,
45 R600_UTC_DFLT_12,
46 R600_UTC_DFLT_13,
47 R600_UTC_DFLT_14,
48};
49
50const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
51{
52 R600_DTC_DFLT_00,
53 R600_DTC_DFLT_01,
54 R600_DTC_DFLT_02,
55 R600_DTC_DFLT_03,
56 R600_DTC_DFLT_04,
57 R600_DTC_DFLT_05,
58 R600_DTC_DFLT_06,
59 R600_DTC_DFLT_07,
60 R600_DTC_DFLT_08,
61 R600_DTC_DFLT_09,
62 R600_DTC_DFLT_10,
63 R600_DTC_DFLT_11,
64 R600_DTC_DFLT_12,
65 R600_DTC_DFLT_13,
66 R600_DTC_DFLT_14,
67};
68
69void r600_dpm_print_class_info(u32 class, u32 class2)
70{
71 printk("\tui class: ");
72 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
73 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
74 default:
75 printk("none\n");
76 break;
77 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
78 printk("battery\n");
79 break;
80 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
81 printk("balanced\n");
82 break;
83 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
84 printk("performance\n");
85 break;
86 }
87 printk("\tinternal class: ");
88 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
89 (class2 == 0))
90 printk("none");
91 else {
92 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
93 printk("boot ");
94 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
95 printk("thermal ");
96 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
97 printk("limited_pwr ");
98 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
99 printk("rest ");
100 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
101 printk("forced ");
102 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
103 printk("3d_perf ");
104 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
105 printk("ovrdrv ");
106 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
107 printk("uvd ");
108 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
109 printk("3d_low ");
110 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
111 printk("acpi ");
112 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
113 printk("uvd_hd2 ");
114 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
115 printk("uvd_hd ");
116 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
117 printk("uvd_sd ");
118 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
119 printk("limited_pwr2 ");
120 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
121 printk("ulv ");
122 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
123 printk("uvd_mvc ");
124 }
125 printk("\n");
126}
127
128void r600_dpm_print_cap_info(u32 caps)
129{
130 printk("\tcaps: ");
131 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
132 printk("single_disp ");
133 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
134 printk("video ");
135 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
136 printk("no_dc ");
137 printk("\n");
138}
139
140void r600_dpm_print_ps_status(struct radeon_device *rdev,
141 struct radeon_ps *rps)
142{
143 printk("\tstatus: ");
144 if (rps == rdev->pm.dpm.current_ps)
145 printk("c ");
146 if (rps == rdev->pm.dpm.requested_ps)
147 printk("r ");
148 if (rps == rdev->pm.dpm.boot_ps)
149 printk("b ");
150 printk("\n");
151}
152
153void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
154 u32 *p, u32 *u)
155{
156 u32 b_c = 0;
157 u32 i_c;
158 u32 tmp;
159
160 i_c = (i * r_c) / 100;
161 tmp = i_c >> p_b;
162
163 while (tmp) {
164 b_c++;
165 tmp >>= 1;
166 }
167
168 *u = (b_c + 1) / 2;
169 *p = i_c / (1 << (2 * (*u)));
170}
171
172int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
173{
174 u32 k, a, ah, al;
175 u32 t1;
176
177 if ((fl == 0) || (fh == 0) || (fl > fh))
178 return -EINVAL;
179
180 k = (100 * fh) / fl;
181 t1 = (t * (k - 100));
182 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
183 a = (a + 5) / 10;
184 ah = ((a * t) + 5000) / 10000;
185 al = a - ah;
186
187 *th = t - ah;
188 *tl = t + al;
189
190 return 0;
191}
192
193void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
194{
195 int i;
196
197 if (enable) {
198 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
199 } else {
200 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
201
202 WREG32(CG_RLC_REQ_AND_RSP, 0x2);
203
204 for (i = 0; i < rdev->usec_timeout; i++) {
205 if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1)
206 break;
207 udelay(1);
208 }
209
210 WREG32(CG_RLC_REQ_AND_RSP, 0x0);
211
212 WREG32(GRBM_PWR_CNTL, 0x1);
213 RREG32(GRBM_PWR_CNTL);
214 }
215}
216
217void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable)
218{
219 if (enable)
220 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
221 else
222 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
223}
224
225void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable)
226{
227 if (enable)
228 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
229 else
230 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
231}
232
233void r600_enable_acpi_pm(struct radeon_device *rdev)
234{
235 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
236}
237
238void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable)
239{
240 if (enable)
241 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
242 else
243 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
244}
245
246bool r600_dynamicpm_enabled(struct radeon_device *rdev)
247{
248 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
249 return true;
250 else
251 return false;
252}
253
254void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
255{
256 if (enable)
257 WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF);
258 else
259 WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
260}
261
262void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
263{
264 if (enable)
265 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
266 else
267 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
268}
269
270void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable)
271{
272 if (enable)
273 WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN);
274 else
275 WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN);
276}
277
278void r600_wait_for_spll_change(struct radeon_device *rdev)
279{
280 int i;
281
282 for (i = 0; i < rdev->usec_timeout; i++) {
283 if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS)
284 break;
285 udelay(1);
286 }
287}
288
289void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p)
290{
291 WREG32(CG_BSP, BSP(p) | BSU(u));
292}
293
294void r600_set_at(struct radeon_device *rdev,
295 u32 l_to_m, u32 m_to_h,
296 u32 h_to_m, u32 m_to_l)
297{
298 WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h));
299 WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l));
300}
301
302void r600_set_tc(struct radeon_device *rdev,
303 u32 index, u32 u_t, u32 d_t)
304{
305 WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t));
306}
307
308void r600_select_td(struct radeon_device *rdev,
309 enum r600_td td)
310{
311 if (td == R600_TD_AUTO)
312 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
313 else
314 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
315 if (td == R600_TD_UP)
316 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
317 if (td == R600_TD_DOWN)
318 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
319}
320
321void r600_set_vrc(struct radeon_device *rdev, u32 vrv)
322{
323 WREG32(CG_FTV, vrv);
324}
325
326void r600_set_tpu(struct radeon_device *rdev, u32 u)
327{
328 WREG32_P(CG_TPC, TPU(u), ~TPU_MASK);
329}
330
331void r600_set_tpc(struct radeon_device *rdev, u32 c)
332{
333 WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK);
334}
335
336void r600_set_sstu(struct radeon_device *rdev, u32 u)
337{
338 WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK);
339}
340
341void r600_set_sst(struct radeon_device *rdev, u32 t)
342{
343 WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK);
344}
345
346void r600_set_git(struct radeon_device *rdev, u32 t)
347{
348 WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK);
349}
350
351void r600_set_fctu(struct radeon_device *rdev, u32 u)
352{
353 WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK);
354}
355
356void r600_set_fct(struct radeon_device *rdev, u32 t)
357{
358 WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK);
359}
360
361void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p)
362{
363 WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK);
364}
365
366void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s)
367{
368 WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK);
369}
370
371void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u)
372{
373 WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK);
374}
375
376void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p)
377{
378 WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK);
379}
380
381void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s)
382{
383 WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK);
384}
385
386void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time)
387{
388 WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK);
389}
390
391void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time)
392{
393 WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK);
394}
395
396void r600_engine_clock_entry_enable(struct radeon_device *rdev,
397 u32 index, bool enable)
398{
399 if (enable)
400 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
401 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID);
402 else
403 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
404 0, ~STEP_0_SPLL_ENTRY_VALID);
405}
406
407void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
408 u32 index, bool enable)
409{
410 if (enable)
411 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
412 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE);
413 else
414 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
415 0, ~STEP_0_SPLL_STEP_ENABLE);
416}
417
418void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
419 u32 index, bool enable)
420{
421 if (enable)
422 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
423 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN);
424 else
425 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
426 0, ~STEP_0_POST_DIV_EN);
427}
428
429void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
430 u32 index, u32 divider)
431{
432 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
433 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK);
434}
435
436void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
437 u32 index, u32 divider)
438{
439 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
440 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK);
441}
442
443void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
444 u32 index, u32 divider)
445{
446 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
447 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK);
448}
449
450void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
451 u32 index, u32 step_time)
452{
453 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
454 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK);
455}
456
457void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u)
458{
459 WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK);
460}
461
462void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u)
463{
464 WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK);
465}
466
467void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt)
468{
469 WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK);
470}
471
472void r600_voltage_control_enable_pins(struct radeon_device *rdev,
473 u64 mask)
474{
475 WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff);
476 WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask));
477}
478
479
480void r600_voltage_control_program_voltages(struct radeon_device *rdev,
481 enum r600_power_level index, u64 pins)
482{
483 u32 tmp, mask;
484 u32 ix = 3 - (3 & index);
485
486 WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff);
487
488 mask = 7 << (3 * ix);
489 tmp = RREG32(VID_UPPER_GPIO_CNTL);
490 tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask);
491 WREG32(VID_UPPER_GPIO_CNTL, tmp);
492}
493
494void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
495 u64 mask)
496{
497 u32 gpio;
498
499 gpio = RREG32(GPIOPAD_MASK);
500 gpio &= ~mask;
501 WREG32(GPIOPAD_MASK, gpio);
502
503 gpio = RREG32(GPIOPAD_EN);
504 gpio &= ~mask;
505 WREG32(GPIOPAD_EN, gpio);
506
507 gpio = RREG32(GPIOPAD_A);
508 gpio &= ~mask;
509 WREG32(GPIOPAD_A, gpio);
510}
511
512void r600_power_level_enable(struct radeon_device *rdev,
513 enum r600_power_level index, bool enable)
514{
515 u32 ix = 3 - (3 & index);
516
517 if (enable)
518 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE,
519 ~CTXSW_FREQ_STATE_ENABLE);
520 else
521 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0,
522 ~CTXSW_FREQ_STATE_ENABLE);
523}
524
525void r600_power_level_set_voltage_index(struct radeon_device *rdev,
526 enum r600_power_level index, u32 voltage_index)
527{
528 u32 ix = 3 - (3 & index);
529
530 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
531 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK);
532}
533
534void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
535 enum r600_power_level index, u32 mem_clock_index)
536{
537 u32 ix = 3 - (3 & index);
538
539 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
540 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK);
541}
542
543void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
544 enum r600_power_level index, u32 eng_clock_index)
545{
546 u32 ix = 3 - (3 & index);
547
548 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
549 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK);
550}
551
552void r600_power_level_set_watermark_id(struct radeon_device *rdev,
553 enum r600_power_level index,
554 enum r600_display_watermark watermark_id)
555{
556 u32 ix = 3 - (3 & index);
557 u32 tmp = 0;
558
559 if (watermark_id == R600_DISPLAY_WATERMARK_HIGH)
560 tmp = CTXSW_FREQ_DISPLAY_WATERMARK;
561 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK);
562}
563
564void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
565 enum r600_power_level index, bool compatible)
566{
567 u32 ix = 3 - (3 & index);
568 u32 tmp = 0;
569
570 if (compatible)
571 tmp = CTXSW_FREQ_GEN2PCIE_VOLT;
572 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT);
573}
574
575enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev)
576{
577 u32 tmp;
578
579 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK;
580 tmp >>= CURRENT_PROFILE_INDEX_SHIFT;
581 return tmp;
582}
583
584enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev)
585{
586 u32 tmp;
587
588 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK;
589 tmp >>= TARGET_PROFILE_INDEX_SHIFT;
590 return tmp;
591}
592
593void r600_power_level_set_enter_index(struct radeon_device *rdev,
594 enum r600_power_level index)
595{
596 WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index),
597 ~DYN_PWR_ENTER_INDEX_MASK);
598}
599
600void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
601 enum r600_power_level index)
602{
603 int i;
604
605 for (i = 0; i < rdev->usec_timeout; i++) {
606 if (r600_power_level_get_target_index(rdev) != index)
607 break;
608 udelay(1);
609 }
610
611 for (i = 0; i < rdev->usec_timeout; i++) {
612 if (r600_power_level_get_current_index(rdev) != index)
613 break;
614 udelay(1);
615 }
616}
617
618void r600_wait_for_power_level(struct radeon_device *rdev,
619 enum r600_power_level index)
620{
621 int i;
622
623 for (i = 0; i < rdev->usec_timeout; i++) {
624 if (r600_power_level_get_target_index(rdev) == index)
625 break;
626 udelay(1);
627 }
628
629 for (i = 0; i < rdev->usec_timeout; i++) {
630 if (r600_power_level_get_current_index(rdev) == index)
631 break;
632 udelay(1);
633 }
634}
635
636void r600_start_dpm(struct radeon_device *rdev)
637{
638 r600_enable_sclk_control(rdev, false);
639 r600_enable_mclk_control(rdev, false);
640
641 r600_dynamicpm_enable(rdev, true);
642
643 radeon_wait_for_vblank(rdev, 0);
644 radeon_wait_for_vblank(rdev, 1);
645
646 r600_enable_spll_bypass(rdev, true);
647 r600_wait_for_spll_change(rdev);
648 r600_enable_spll_bypass(rdev, false);
649 r600_wait_for_spll_change(rdev);
650
651 r600_enable_spll_bypass(rdev, true);
652 r600_wait_for_spll_change(rdev);
653 r600_enable_spll_bypass(rdev, false);
654 r600_wait_for_spll_change(rdev);
655
656 r600_enable_sclk_control(rdev, true);
657 r600_enable_mclk_control(rdev, true);
658}
659
660void r600_stop_dpm(struct radeon_device *rdev)
661{
662 r600_dynamicpm_enable(rdev, false);
663}
664
665int r600_dpm_pre_set_power_state(struct radeon_device *rdev)
666{
667 return 0;
668}
669
670void r600_dpm_post_set_power_state(struct radeon_device *rdev)
671{
672
673}
674
675bool r600_is_uvd_state(u32 class, u32 class2)
676{
677 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
678 return true;
679 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
680 return true;
681 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
682 return true;
683 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
684 return true;
685 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
686 return true;
687 return false;
688}
689
690int r600_set_thermal_temperature_range(struct radeon_device *rdev,
691 int min_temp, int max_temp)
692{
693 int low_temp = 0 * 1000;
694 int high_temp = 255 * 1000;
695
696 if (low_temp < min_temp)
697 low_temp = min_temp;
698 if (high_temp > max_temp)
699 high_temp = max_temp;
700 if (high_temp < low_temp) {
701 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
702 return -EINVAL;
703 }
704
705 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
706 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
707 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
708
709 rdev->pm.dpm.thermal.min_temp = low_temp;
710 rdev->pm.dpm.thermal.max_temp = high_temp;
711
712 return 0;
713}
714
715bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
716{
717 switch (sensor) {
718 case THERMAL_TYPE_RV6XX:
719 case THERMAL_TYPE_RV770:
720 case THERMAL_TYPE_EVERGREEN:
721 case THERMAL_TYPE_SUMO:
722 case THERMAL_TYPE_NI:
723 case THERMAL_TYPE_SI:
724 return true;
725 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
726 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
727 return false; /* need special handling */
728 case THERMAL_TYPE_NONE:
729 case THERMAL_TYPE_EXTERNAL:
730 case THERMAL_TYPE_EXTERNAL_GPIO:
731 default:
732 return false;
733 }
734}
735
736union power_info {
737 struct _ATOM_POWERPLAY_INFO info;
738 struct _ATOM_POWERPLAY_INFO_V2 info_2;
739 struct _ATOM_POWERPLAY_INFO_V3 info_3;
740 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
741 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
742 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
743 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
744 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
745};
746
747union fan_info {
748 struct _ATOM_PPLIB_FANTABLE fan;
749 struct _ATOM_PPLIB_FANTABLE2 fan2;
750};
751
752static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
753 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
754{
755 u32 size = atom_table->ucNumEntries *
756 sizeof(struct radeon_clock_voltage_dependency_entry);
757 int i;
758
759 radeon_table->entries = kzalloc(size, GFP_KERNEL);
760 if (!radeon_table->entries)
761 return -ENOMEM;
762
763 for (i = 0; i < atom_table->ucNumEntries; i++) {
764 radeon_table->entries[i].clk = le16_to_cpu(atom_table->entries[i].usClockLow) |
765 (atom_table->entries[i].ucClockHigh << 16);
766 radeon_table->entries[i].v = le16_to_cpu(atom_table->entries[i].usVoltage);
767 }
768 radeon_table->count = atom_table->ucNumEntries;
769
770 return 0;
771}
772
773/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
774#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
775#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
776#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
777#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
778#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
779#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
780
781int r600_parse_extended_power_table(struct radeon_device *rdev)
782{
783 struct radeon_mode_info *mode_info = &rdev->mode_info;
784 union power_info *power_info;
785 union fan_info *fan_info;
786 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
787 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
788 u16 data_offset;
789 u8 frev, crev;
790 int ret, i;
791
792 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
793 &frev, &crev, &data_offset))
794 return -EINVAL;
795 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
796
797 /* fan table */
798 if (le16_to_cpu(power_info->pplib.usTableSize) >=
799 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
800 if (power_info->pplib3.usFanTableOffset) {
801 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
802 le16_to_cpu(power_info->pplib3.usFanTableOffset));
803 rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
804 rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
805 rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
806 rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
807 rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
808 rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
809 rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
810 if (fan_info->fan.ucFanTableFormat >= 2)
811 rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
812 else
813 rdev->pm.dpm.fan.t_max = 10900;
814 rdev->pm.dpm.fan.cycle_delay = 100000;
815 rdev->pm.dpm.fan.ucode_fan_control = true;
816 }
817 }
818
819 /* clock dependancy tables, shedding tables */
820 if (le16_to_cpu(power_info->pplib.usTableSize) >=
821 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
822 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
823 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
824 (mode_info->atom_context->bios + data_offset +
825 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
826 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
827 dep_table);
828 if (ret)
829 return ret;
830 }
831 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
832 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
833 (mode_info->atom_context->bios + data_offset +
834 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
835 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
836 dep_table);
837 if (ret) {
838 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
839 return ret;
840 }
841 }
842 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
843 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
844 (mode_info->atom_context->bios + data_offset +
845 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
846 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
847 dep_table);
848 if (ret) {
849 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
850 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
851 return ret;
852 }
853 }
854 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
855 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
856 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
857 (mode_info->atom_context->bios + data_offset +
858 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
859 if (clk_v->ucNumEntries) {
860 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
861 le16_to_cpu(clk_v->entries[0].usSclkLow) |
862 (clk_v->entries[0].ucSclkHigh << 16);
863 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
864 le16_to_cpu(clk_v->entries[0].usMclkLow) |
865 (clk_v->entries[0].ucMclkHigh << 16);
866 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
867 le16_to_cpu(clk_v->entries[0].usVddc);
868 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
869 le16_to_cpu(clk_v->entries[0].usVddci);
870 }
871 }
872 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
873 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
874 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
875 (mode_info->atom_context->bios + data_offset +
876 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
877
878 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
879 kzalloc(psl->ucNumEntries *
880 sizeof(struct radeon_phase_shedding_limits_entry),
881 GFP_KERNEL);
882 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
883 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
884 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
885 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
886 return -ENOMEM;
887 }
888
889 for (i = 0; i < psl->ucNumEntries; i++) {
890 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
891 le16_to_cpu(psl->entries[i].usSclkLow) |
892 (psl->entries[i].ucSclkHigh << 16);
893 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
894 le16_to_cpu(psl->entries[i].usMclkLow) |
895 (psl->entries[i].ucMclkHigh << 16);
896 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
897 le16_to_cpu(psl->entries[i].usVoltage);
898 }
899 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
900 psl->ucNumEntries;
901 }
902 }
903
904 /* cac data */
905 if (le16_to_cpu(power_info->pplib.usTableSize) >=
906 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
907 rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
908 rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
909 rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit;
910 rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
911 if (rdev->pm.dpm.tdp_od_limit)
912 rdev->pm.dpm.power_control = true;
913 else
914 rdev->pm.dpm.power_control = false;
915 rdev->pm.dpm.tdp_adjustment = 0;
916 rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
917 rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
918 rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
919 if (power_info->pplib5.usCACLeakageTableOffset) {
920 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
921 (ATOM_PPLIB_CAC_Leakage_Table *)
922 (mode_info->atom_context->bios + data_offset +
923 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
924 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
925 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
926 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
927 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
928 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
929 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
930 return -ENOMEM;
931 }
932 for (i = 0; i < cac_table->ucNumEntries; i++) {
933 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
934 le16_to_cpu(cac_table->entries[i].usVddc);
935 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
936 le32_to_cpu(cac_table->entries[i].ulLeakageValue);
937 }
938 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
939 }
940 }
941
942 /* ppm table */
943 if (le16_to_cpu(power_info->pplib.usTableSize) >=
944 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
945 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
946 (mode_info->atom_context->bios + data_offset +
947 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
948 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
949 ext_hdr->usPPMTableOffset) {
950 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
951 (mode_info->atom_context->bios + data_offset +
952 le16_to_cpu(ext_hdr->usPPMTableOffset));
953 rdev->pm.dpm.dyn_state.ppm_table =
954 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
955 if (!rdev->pm.dpm.dyn_state.ppm_table) {
956 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
957 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
958 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
959 kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
960 return -ENOMEM;
961 }
962 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
963 rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
964 le16_to_cpu(ppm->usCpuCoreNumber);
965 rdev->pm.dpm.dyn_state.ppm_table->platform_tdp =
966 le32_to_cpu(ppm->ulPlatformTDP);
967 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
968 le32_to_cpu(ppm->ulSmallACPlatformTDP);
969 rdev->pm.dpm.dyn_state.ppm_table->platform_tdc =
970 le32_to_cpu(ppm->ulPlatformTDC);
971 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
972 le32_to_cpu(ppm->ulSmallACPlatformTDC);
973 rdev->pm.dpm.dyn_state.ppm_table->apu_tdp =
974 le32_to_cpu(ppm->ulApuTDP);
975 rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
976 le32_to_cpu(ppm->ulDGpuTDP);
977 rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
978 le32_to_cpu(ppm->ulDGpuUlvPower);
979 rdev->pm.dpm.dyn_state.ppm_table->tj_max =
980 le32_to_cpu(ppm->ulTjmax);
981 }
982 }
983
984 return 0;
985}
986
987void r600_free_extended_power_table(struct radeon_device *rdev)
988{
989 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries)
990 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
991 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries)
992 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
993 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries)
994 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
995 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries)
996 kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
997 if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
998 kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries);
999 if (rdev->pm.dpm.dyn_state.ppm_table)
1000 kfree(rdev->pm.dpm.dyn_state.ppm_table);
1001}
1002
1003enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
1004 u32 sys_mask,
1005 enum radeon_pcie_gen asic_gen,
1006 enum radeon_pcie_gen default_gen)
1007{
1008 switch (asic_gen) {
1009 case RADEON_PCIE_GEN1:
1010 return RADEON_PCIE_GEN1;
1011 case RADEON_PCIE_GEN2:
1012 return RADEON_PCIE_GEN2;
1013 case RADEON_PCIE_GEN3:
1014 return RADEON_PCIE_GEN3;
1015 default:
1016 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
1017 return RADEON_PCIE_GEN3;
1018 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
1019 return RADEON_PCIE_GEN2;
1020 else
1021 return RADEON_PCIE_GEN1;
1022 }
1023 return RADEON_PCIE_GEN1;
1024}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
new file mode 100644
index 000000000000..a95ab214289b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -0,0 +1,226 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __R600_DPM_H__
24#define __R600_DPM_H__
25
26#define R600_ASI_DFLT 10000
27#define R600_BSP_DFLT 0x41EB
28#define R600_BSU_DFLT 0x2
29#define R600_AH_DFLT 5
30#define R600_RLP_DFLT 25
31#define R600_RMP_DFLT 65
32#define R600_LHP_DFLT 40
33#define R600_LMP_DFLT 15
34#define R600_TD_DFLT 0
35#define R600_UTC_DFLT_00 0x24
36#define R600_UTC_DFLT_01 0x22
37#define R600_UTC_DFLT_02 0x22
38#define R600_UTC_DFLT_03 0x22
39#define R600_UTC_DFLT_04 0x22
40#define R600_UTC_DFLT_05 0x22
41#define R600_UTC_DFLT_06 0x22
42#define R600_UTC_DFLT_07 0x22
43#define R600_UTC_DFLT_08 0x22
44#define R600_UTC_DFLT_09 0x22
45#define R600_UTC_DFLT_10 0x22
46#define R600_UTC_DFLT_11 0x22
47#define R600_UTC_DFLT_12 0x22
48#define R600_UTC_DFLT_13 0x22
49#define R600_UTC_DFLT_14 0x22
50#define R600_DTC_DFLT_00 0x24
51#define R600_DTC_DFLT_01 0x22
52#define R600_DTC_DFLT_02 0x22
53#define R600_DTC_DFLT_03 0x22
54#define R600_DTC_DFLT_04 0x22
55#define R600_DTC_DFLT_05 0x22
56#define R600_DTC_DFLT_06 0x22
57#define R600_DTC_DFLT_07 0x22
58#define R600_DTC_DFLT_08 0x22
59#define R600_DTC_DFLT_09 0x22
60#define R600_DTC_DFLT_10 0x22
61#define R600_DTC_DFLT_11 0x22
62#define R600_DTC_DFLT_12 0x22
63#define R600_DTC_DFLT_13 0x22
64#define R600_DTC_DFLT_14 0x22
65#define R600_VRC_DFLT 0x0000C003
66#define R600_VOLTAGERESPONSETIME_DFLT 1000
67#define R600_BACKBIASRESPONSETIME_DFLT 1000
68#define R600_VRU_DFLT 0x3
69#define R600_SPLLSTEPTIME_DFLT 0x1000
70#define R600_SPLLSTEPUNIT_DFLT 0x3
71#define R600_TPU_DFLT 0
72#define R600_TPC_DFLT 0x200
73#define R600_SSTU_DFLT 0
74#define R600_SST_DFLT 0x00C8
75#define R600_GICST_DFLT 0x200
76#define R600_FCT_DFLT 0x0400
77#define R600_FCTU_DFLT 0
78#define R600_CTXCGTT3DRPHC_DFLT 0x20
79#define R600_CTXCGTT3DRSDC_DFLT 0x40
80#define R600_VDDC3DOORPHC_DFLT 0x100
81#define R600_VDDC3DOORSDC_DFLT 0x7
82#define R600_VDDC3DOORSU_DFLT 0
83#define R600_MPLLLOCKTIME_DFLT 100
84#define R600_MPLLRESETTIME_DFLT 150
85#define R600_VCOSTEPPCT_DFLT 20
86#define R600_ENDINGVCOSTEPPCT_DFLT 5
87#define R600_REFERENCEDIVIDER_DFLT 4
88
89#define R600_PM_NUMBER_OF_TC 15
90#define R600_PM_NUMBER_OF_SCLKS 20
91#define R600_PM_NUMBER_OF_MCLKS 4
92#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4
93#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3
94
95/* XXX are these ok? */
96#define R600_TEMP_RANGE_MIN (90 * 1000)
97#define R600_TEMP_RANGE_MAX (120 * 1000)
98
99enum r600_power_level {
100 R600_POWER_LEVEL_LOW = 0,
101 R600_POWER_LEVEL_MEDIUM = 1,
102 R600_POWER_LEVEL_HIGH = 2,
103 R600_POWER_LEVEL_CTXSW = 3,
104};
105
106enum r600_td {
107 R600_TD_AUTO,
108 R600_TD_UP,
109 R600_TD_DOWN,
110};
111
112enum r600_display_watermark {
113 R600_DISPLAY_WATERMARK_LOW = 0,
114 R600_DISPLAY_WATERMARK_HIGH = 1,
115};
116
117enum r600_display_gap
118{
119 R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
120 R600_PM_DISPLAY_GAP_VBLANK = 1,
121 R600_PM_DISPLAY_GAP_WATERMARK = 2,
122 R600_PM_DISPLAY_GAP_IGNORE = 3,
123};
124
125extern const u32 r600_utc[R600_PM_NUMBER_OF_TC];
126extern const u32 r600_dtc[R600_PM_NUMBER_OF_TC];
127
128void r600_dpm_print_class_info(u32 class, u32 class2);
129void r600_dpm_print_cap_info(u32 caps);
130void r600_dpm_print_ps_status(struct radeon_device *rdev,
131 struct radeon_ps *rps);
132bool r600_is_uvd_state(u32 class, u32 class2);
133void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
134 u32 *p, u32 *u);
135int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th);
136void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable);
137void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable);
138void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable);
139void r600_enable_acpi_pm(struct radeon_device *rdev);
140void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable);
141bool r600_dynamicpm_enabled(struct radeon_device *rdev);
142void r600_enable_sclk_control(struct radeon_device *rdev, bool enable);
143void r600_enable_mclk_control(struct radeon_device *rdev, bool enable);
144void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable);
145void r600_wait_for_spll_change(struct radeon_device *rdev);
146void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p);
147void r600_set_at(struct radeon_device *rdev,
148 u32 l_to_m, u32 m_to_h,
149 u32 h_to_m, u32 m_to_l);
150void r600_set_tc(struct radeon_device *rdev, u32 index, u32 u_t, u32 d_t);
151void r600_select_td(struct radeon_device *rdev, enum r600_td td);
152void r600_set_vrc(struct radeon_device *rdev, u32 vrv);
153void r600_set_tpu(struct radeon_device *rdev, u32 u);
154void r600_set_tpc(struct radeon_device *rdev, u32 c);
155void r600_set_sstu(struct radeon_device *rdev, u32 u);
156void r600_set_sst(struct radeon_device *rdev, u32 t);
157void r600_set_git(struct radeon_device *rdev, u32 t);
158void r600_set_fctu(struct radeon_device *rdev, u32 u);
159void r600_set_fct(struct radeon_device *rdev, u32 t);
160void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p);
161void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s);
162void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u);
163void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p);
164void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s);
165void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time);
166void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time);
167void r600_engine_clock_entry_enable(struct radeon_device *rdev,
168 u32 index, bool enable);
169void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
170 u32 index, bool enable);
171void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
172 u32 index, bool enable);
173void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
174 u32 index, u32 divider);
175void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
176 u32 index, u32 divider);
177void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
178 u32 index, u32 divider);
179void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
180 u32 index, u32 step_time);
181void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u);
182void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u);
183void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt);
184void r600_voltage_control_enable_pins(struct radeon_device *rdev,
185 u64 mask);
186void r600_voltage_control_program_voltages(struct radeon_device *rdev,
187 enum r600_power_level index, u64 pins);
188void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
189 u64 mask);
190void r600_power_level_enable(struct radeon_device *rdev,
191 enum r600_power_level index, bool enable);
192void r600_power_level_set_voltage_index(struct radeon_device *rdev,
193 enum r600_power_level index, u32 voltage_index);
194void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
195 enum r600_power_level index, u32 mem_clock_index);
196void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
197 enum r600_power_level index, u32 eng_clock_index);
198void r600_power_level_set_watermark_id(struct radeon_device *rdev,
199 enum r600_power_level index,
200 enum r600_display_watermark watermark_id);
201void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
202 enum r600_power_level index, bool compatible);
203enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev);
204enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev);
205void r600_power_level_set_enter_index(struct radeon_device *rdev,
206 enum r600_power_level index);
207void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
208 enum r600_power_level index);
209void r600_wait_for_power_level(struct radeon_device *rdev,
210 enum r600_power_level index);
211void r600_start_dpm(struct radeon_device *rdev);
212void r600_stop_dpm(struct radeon_device *rdev);
213
214int r600_set_thermal_temperature_range(struct radeon_device *rdev,
215 int min_temp, int max_temp);
216bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor);
217
218int r600_parse_extended_power_table(struct radeon_device *rdev);
219void r600_free_extended_power_table(struct radeon_device *rdev);
220
221enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
222 u32 sys_mask,
223 enum radeon_pcie_gen asic_gen,
224 enum radeon_pcie_gen default_gen);
225
226#endif
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 456750a0daa5..e73b2a73494a 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -133,14 +133,7 @@ static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
133 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 133 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
134 uint32_t offset = dig->afmt->offset; 134 uint32_t offset = dig->afmt->offset;
135 uint8_t *frame = buffer + 3; 135 uint8_t *frame = buffer + 3;
136 136 uint8_t *header = buffer;
137 /* Our header values (type, version, length) should be alright, Intel
138 * is using the same. Checksum function also seems to be OK, it works
139 * fine for audio infoframe. However calculated value is always lower
140 * by 2 in comparison to fglrx. It breaks displaying anything in case
141 * of TVs that strictly check the checksum. Hack it manually here to
142 * workaround this issue. */
143 frame[0x0] += 2;
144 137
145 WREG32(HDMI0_AVI_INFO0 + offset, 138 WREG32(HDMI0_AVI_INFO0 + offset,
146 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 139 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -149,7 +142,7 @@ static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
149 WREG32(HDMI0_AVI_INFO2 + offset, 142 WREG32(HDMI0_AVI_INFO2 + offset,
150 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); 143 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
151 WREG32(HDMI0_AVI_INFO3 + offset, 144 WREG32(HDMI0_AVI_INFO3 + offset,
152 frame[0xC] | (frame[0xD] << 8)); 145 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
153} 146}
154 147
155/* 148/*
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 909219b1bf80..3ef202629e7e 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -31,6 +31,12 @@
31#define R600_PCIE_PORT_INDEX 0x0038 31#define R600_PCIE_PORT_INDEX 0x0038
32#define R600_PCIE_PORT_DATA 0x003c 32#define R600_PCIE_PORT_DATA 0x003c
33 33
34#define R600_RCU_INDEX 0x0100
35#define R600_RCU_DATA 0x0104
36
37#define R600_UVD_CTX_INDEX 0xf4a0
38#define R600_UVD_CTX_DATA 0xf4a4
39
34#define R600_MC_VM_FB_LOCATION 0x2180 40#define R600_MC_VM_FB_LOCATION 0x2180
35#define R600_MC_FB_BASE_MASK 0x0000FFFF 41#define R600_MC_FB_BASE_MASK 0x0000FFFF
36#define R600_MC_FB_BASE_SHIFT 0 42#define R600_MC_FB_BASE_SHIFT 0
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 79df558f8c40..f1b3084d8f51 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -302,10 +302,25 @@
302#define GRBM_SOFT_RESET 0x8020 302#define GRBM_SOFT_RESET 0x8020
303#define SOFT_RESET_CP (1<<0) 303#define SOFT_RESET_CP (1<<0)
304 304
305#define CG_THERMAL_CTRL 0x7F0
306#define DIG_THERM_DPM(x) ((x) << 12)
307#define DIG_THERM_DPM_MASK 0x000FF000
308#define DIG_THERM_DPM_SHIFT 12
305#define CG_THERMAL_STATUS 0x7F4 309#define CG_THERMAL_STATUS 0x7F4
306#define ASIC_T(x) ((x) << 0) 310#define ASIC_T(x) ((x) << 0)
307#define ASIC_T_MASK 0x1FF 311#define ASIC_T_MASK 0x1FF
308#define ASIC_T_SHIFT 0 312#define ASIC_T_SHIFT 0
313#define CG_THERMAL_INT 0x7F8
314#define DIG_THERM_INTH(x) ((x) << 8)
315#define DIG_THERM_INTH_MASK 0x0000FF00
316#define DIG_THERM_INTH_SHIFT 8
317#define DIG_THERM_INTL(x) ((x) << 16)
318#define DIG_THERM_INTL_MASK 0x00FF0000
319#define DIG_THERM_INTL_SHIFT 16
320#define THERM_INT_MASK_HIGH (1 << 24)
321#define THERM_INT_MASK_LOW (1 << 25)
322
323#define RV770_CG_THERMAL_INT 0x734
309 324
310#define HDP_HOST_PATH_CNTL 0x2C00 325#define HDP_HOST_PATH_CNTL 0x2C00
311#define HDP_NONSURFACE_BASE 0x2C04 326#define HDP_NONSURFACE_BASE 0x2C04
@@ -684,10 +699,6 @@
684#define RLC_UCODE_ADDR 0x3f2c 699#define RLC_UCODE_ADDR 0x3f2c
685#define RLC_UCODE_DATA 0x3f30 700#define RLC_UCODE_DATA 0x3f30
686 701
687/* new for TN */
688#define TN_RLC_SAVE_AND_RESTORE_BASE 0x3f10
689#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
690
691#define SRBM_SOFT_RESET 0xe60 702#define SRBM_SOFT_RESET 0xe60
692# define SOFT_RESET_DMA (1 << 12) 703# define SOFT_RESET_DMA (1 << 12)
693# define SOFT_RESET_RLC (1 << 13) 704# define SOFT_RESET_RLC (1 << 13)
@@ -1148,6 +1159,219 @@
1148# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29) 1159# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
1149# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30) 1160# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
1150 1161
1162/* Power management */
1163#define CG_SPLL_FUNC_CNTL 0x600
1164# define SPLL_RESET (1 << 0)
1165# define SPLL_SLEEP (1 << 1)
1166# define SPLL_REF_DIV(x) ((x) << 2)
1167# define SPLL_REF_DIV_MASK (7 << 2)
1168# define SPLL_FB_DIV(x) ((x) << 5)
1169# define SPLL_FB_DIV_MASK (0xff << 5)
1170# define SPLL_PULSEEN (1 << 13)
1171# define SPLL_PULSENUM(x) ((x) << 14)
1172# define SPLL_PULSENUM_MASK (3 << 14)
1173# define SPLL_SW_HILEN(x) ((x) << 16)
1174# define SPLL_SW_HILEN_MASK (0xf << 16)
1175# define SPLL_SW_LOLEN(x) ((x) << 20)
1176# define SPLL_SW_LOLEN_MASK (0xf << 20)
1177# define SPLL_DIVEN (1 << 24)
1178# define SPLL_BYPASS_EN (1 << 25)
1179# define SPLL_CHG_STATUS (1 << 29)
1180# define SPLL_CTLREQ (1 << 30)
1181# define SPLL_CTLACK (1 << 31)
1182
1183#define GENERAL_PWRMGT 0x618
1184# define GLOBAL_PWRMGT_EN (1 << 0)
1185# define STATIC_PM_EN (1 << 1)
1186# define MOBILE_SU (1 << 2)
1187# define THERMAL_PROTECTION_DIS (1 << 3)
1188# define THERMAL_PROTECTION_TYPE (1 << 4)
1189# define ENABLE_GEN2PCIE (1 << 5)
1190# define SW_GPIO_INDEX(x) ((x) << 6)
1191# define SW_GPIO_INDEX_MASK (3 << 6)
1192# define LOW_VOLT_D2_ACPI (1 << 8)
1193# define LOW_VOLT_D3_ACPI (1 << 9)
1194# define VOLT_PWRMGT_EN (1 << 10)
1195#define CG_TPC 0x61c
1196# define TPCC(x) ((x) << 0)
1197# define TPCC_MASK (0x7fffff << 0)
1198# define TPU(x) ((x) << 23)
1199# define TPU_MASK (0x1f << 23)
1200#define SCLK_PWRMGT_CNTL 0x620
1201# define SCLK_PWRMGT_OFF (1 << 0)
1202# define SCLK_TURNOFF (1 << 1)
1203# define SPLL_TURNOFF (1 << 2)
1204# define SU_SCLK_USE_BCLK (1 << 3)
1205# define DYNAMIC_GFX_ISLAND_PWR_DOWN (1 << 4)
1206# define DYNAMIC_GFX_ISLAND_PWR_LP (1 << 5)
1207# define CLK_TURN_ON_STAGGER (1 << 6)
1208# define CLK_TURN_OFF_STAGGER (1 << 7)
1209# define FIR_FORCE_TREND_SEL (1 << 8)
1210# define FIR_TREND_MODE (1 << 9)
1211# define DYN_GFX_CLK_OFF_EN (1 << 10)
1212# define VDDC3D_TURNOFF_D1 (1 << 11)
1213# define VDDC3D_TURNOFF_D2 (1 << 12)
1214# define VDDC3D_TURNOFF_D3 (1 << 13)
1215# define SPLL_TURNOFF_D2 (1 << 14)
1216# define SCLK_LOW_D1 (1 << 15)
1217# define DYN_GFX_CLK_OFF_MC_EN (1 << 16)
1218#define MCLK_PWRMGT_CNTL 0x624
1219# define MPLL_PWRMGT_OFF (1 << 0)
1220# define YCLK_TURNOFF (1 << 1)
1221# define MPLL_TURNOFF (1 << 2)
1222# define SU_MCLK_USE_BCLK (1 << 3)
1223# define DLL_READY (1 << 4)
1224# define MC_BUSY (1 << 5)
1225# define MC_INT_CNTL (1 << 7)
1226# define MRDCKA_SLEEP (1 << 8)
1227# define MRDCKB_SLEEP (1 << 9)
1228# define MRDCKC_SLEEP (1 << 10)
1229# define MRDCKD_SLEEP (1 << 11)
1230# define MRDCKE_SLEEP (1 << 12)
1231# define MRDCKF_SLEEP (1 << 13)
1232# define MRDCKG_SLEEP (1 << 14)
1233# define MRDCKH_SLEEP (1 << 15)
1234# define MRDCKA_RESET (1 << 16)
1235# define MRDCKB_RESET (1 << 17)
1236# define MRDCKC_RESET (1 << 18)
1237# define MRDCKD_RESET (1 << 19)
1238# define MRDCKE_RESET (1 << 20)
1239# define MRDCKF_RESET (1 << 21)
1240# define MRDCKG_RESET (1 << 22)
1241# define MRDCKH_RESET (1 << 23)
1242# define DLL_READY_READ (1 << 24)
1243# define USE_DISPLAY_GAP (1 << 25)
1244# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
1245# define USE_DISPLAY_GAP_CTXSW (1 << 27)
1246# define MPLL_TURNOFF_D2 (1 << 28)
1247# define USE_DISPLAY_URGENT_CTXSW (1 << 29)
1248
1249#define MPLL_TIME 0x634
1250# define MPLL_LOCK_TIME(x) ((x) << 0)
1251# define MPLL_LOCK_TIME_MASK (0xffff << 0)
1252# define MPLL_RESET_TIME(x) ((x) << 16)
1253# define MPLL_RESET_TIME_MASK (0xffff << 16)
1254
1255#define SCLK_FREQ_SETTING_STEP_0_PART1 0x648
1256# define STEP_0_SPLL_POST_DIV(x) ((x) << 0)
1257# define STEP_0_SPLL_POST_DIV_MASK (0xff << 0)
1258# define STEP_0_SPLL_FB_DIV(x) ((x) << 8)
1259# define STEP_0_SPLL_FB_DIV_MASK (0xff << 8)
1260# define STEP_0_SPLL_REF_DIV(x) ((x) << 16)
1261# define STEP_0_SPLL_REF_DIV_MASK (7 << 16)
1262# define STEP_0_SPLL_STEP_TIME(x) ((x) << 19)
1263# define STEP_0_SPLL_STEP_TIME_MASK (0x1fff << 19)
1264#define SCLK_FREQ_SETTING_STEP_0_PART2 0x64c
1265# define STEP_0_PULSE_HIGH_CNT(x) ((x) << 0)
1266# define STEP_0_PULSE_HIGH_CNT_MASK (0x1ff << 0)
1267# define STEP_0_POST_DIV_EN (1 << 9)
1268# define STEP_0_SPLL_STEP_ENABLE (1 << 30)
1269# define STEP_0_SPLL_ENTRY_VALID (1 << 31)
1270
1271#define VID_RT 0x6f8
1272# define VID_CRT(x) ((x) << 0)
1273# define VID_CRT_MASK (0x1fff << 0)
1274# define VID_CRTU(x) ((x) << 13)
1275# define VID_CRTU_MASK (7 << 13)
1276# define SSTU(x) ((x) << 16)
1277# define SSTU_MASK (7 << 16)
1278#define CTXSW_PROFILE_INDEX 0x6fc
1279# define CTXSW_FREQ_VIDS_CFG_INDEX(x) ((x) << 0)
1280# define CTXSW_FREQ_VIDS_CFG_INDEX_MASK (3 << 0)
1281# define CTXSW_FREQ_VIDS_CFG_INDEX_SHIFT 0
1282# define CTXSW_FREQ_MCLK_CFG_INDEX(x) ((x) << 2)
1283# define CTXSW_FREQ_MCLK_CFG_INDEX_MASK (3 << 2)
1284# define CTXSW_FREQ_MCLK_CFG_INDEX_SHIFT 2
1285# define CTXSW_FREQ_SCLK_CFG_INDEX(x) ((x) << 4)
1286# define CTXSW_FREQ_SCLK_CFG_INDEX_MASK (0x1f << 4)
1287# define CTXSW_FREQ_SCLK_CFG_INDEX_SHIFT 4
1288# define CTXSW_FREQ_STATE_SPLL_RESET_EN (1 << 9)
1289# define CTXSW_FREQ_STATE_ENABLE (1 << 10)
1290# define CTXSW_FREQ_DISPLAY_WATERMARK (1 << 11)
1291# define CTXSW_FREQ_GEN2PCIE_VOLT (1 << 12)
1292
1293#define TARGET_AND_CURRENT_PROFILE_INDEX 0x70c
1294# define TARGET_PROFILE_INDEX_MASK (3 << 0)
1295# define TARGET_PROFILE_INDEX_SHIFT 0
1296# define CURRENT_PROFILE_INDEX_MASK (3 << 2)
1297# define CURRENT_PROFILE_INDEX_SHIFT 2
1298# define DYN_PWR_ENTER_INDEX(x) ((x) << 4)
1299# define DYN_PWR_ENTER_INDEX_MASK (3 << 4)
1300# define DYN_PWR_ENTER_INDEX_SHIFT 4
1301# define CURR_MCLK_INDEX_MASK (3 << 6)
1302# define CURR_MCLK_INDEX_SHIFT 6
1303# define CURR_SCLK_INDEX_MASK (0x1f << 8)
1304# define CURR_SCLK_INDEX_SHIFT 8
1305# define CURR_VID_INDEX_MASK (3 << 13)
1306# define CURR_VID_INDEX_SHIFT 13
1307
1308#define LOWER_GPIO_ENABLE 0x710
1309#define UPPER_GPIO_ENABLE 0x714
1310#define CTXSW_VID_LOWER_GPIO_CNTL 0x718
1311
1312#define VID_UPPER_GPIO_CNTL 0x740
1313#define CG_CTX_CGTT3D_R 0x744
1314# define PHC(x) ((x) << 0)
1315# define PHC_MASK (0x1ff << 0)
1316# define SDC(x) ((x) << 9)
1317# define SDC_MASK (0x3fff << 9)
1318#define CG_VDDC3D_OOR 0x748
1319# define SU(x) ((x) << 23)
1320# define SU_MASK (0xf << 23)
1321#define CG_FTV 0x74c
1322#define CG_FFCT_0 0x750
1323# define UTC_0(x) ((x) << 0)
1324# define UTC_0_MASK (0x3ff << 0)
1325# define DTC_0(x) ((x) << 10)
1326# define DTC_0_MASK (0x3ff << 10)
1327
1328#define CG_BSP 0x78c
1329# define BSP(x) ((x) << 0)
1330# define BSP_MASK (0xffff << 0)
1331# define BSU(x) ((x) << 16)
1332# define BSU_MASK (0xf << 16)
1333#define CG_RT 0x790
1334# define FLS(x) ((x) << 0)
1335# define FLS_MASK (0xffff << 0)
1336# define FMS(x) ((x) << 16)
1337# define FMS_MASK (0xffff << 16)
1338#define CG_LT 0x794
1339# define FHS(x) ((x) << 0)
1340# define FHS_MASK (0xffff << 0)
1341#define CG_GIT 0x798
1342# define CG_GICST(x) ((x) << 0)
1343# define CG_GICST_MASK (0xffff << 0)
1344# define CG_GIPOT(x) ((x) << 16)
1345# define CG_GIPOT_MASK (0xffff << 16)
1346
1347#define CG_SSP 0x7a8
1348# define CG_SST(x) ((x) << 0)
1349# define CG_SST_MASK (0xffff << 0)
1350# define CG_SSTU(x) ((x) << 16)
1351# define CG_SSTU_MASK (0xf << 16)
1352
1353#define CG_RLC_REQ_AND_RSP 0x7c4
1354# define RLC_CG_REQ_TYPE_MASK 0xf
1355# define RLC_CG_REQ_TYPE_SHIFT 0
1356# define CG_RLC_RSP_TYPE_MASK 0xf0
1357# define CG_RLC_RSP_TYPE_SHIFT 4
1358
1359#define CG_FC_T 0x7cc
1360# define FC_T(x) ((x) << 0)
1361# define FC_T_MASK (0xffff << 0)
1362# define FC_TU(x) ((x) << 16)
1363# define FC_TU_MASK (0x1f << 16)
1364
1365#define GPIOPAD_MASK 0x1798
1366#define GPIOPAD_A 0x179c
1367#define GPIOPAD_EN 0x17a0
1368
1369#define GRBM_PWR_CNTL 0x800c
1370# define REQ_TYPE_MASK 0xf
1371# define REQ_TYPE_SHIFT 0
1372# define RSP_TYPE_MASK 0xf0
1373# define RSP_TYPE_SHIFT 4
1374
1151/* 1375/*
1152 * UVD 1376 * UVD
1153 */ 1377 */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 142ce6cc69f5..f51807f04f65 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -96,6 +96,7 @@ extern int radeon_pcie_gen2;
96extern int radeon_msi; 96extern int radeon_msi;
97extern int radeon_lockup_timeout; 97extern int radeon_lockup_timeout;
98extern int radeon_fastfb; 98extern int radeon_fastfb;
99extern int radeon_dpm;
99 100
100/* 101/*
101 * Copy from radeon_drv.h so we don't have to include both and have conflicting 102 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -150,6 +151,13 @@ extern int radeon_fastfb;
150#define RADEON_RESET_MC (1 << 10) 151#define RADEON_RESET_MC (1 << 10)
151#define RADEON_RESET_DISPLAY (1 << 11) 152#define RADEON_RESET_DISPLAY (1 << 11)
152 153
154/* max cursor sizes (in pixels) */
155#define CURSOR_WIDTH 64
156#define CURSOR_HEIGHT 64
157
158#define CIK_CURSOR_WIDTH 128
159#define CIK_CURSOR_HEIGHT 128
160
153/* 161/*
154 * Errata workarounds. 162 * Errata workarounds.
155 */ 163 */
@@ -192,6 +200,7 @@ struct radeon_clock {
192 uint32_t default_mclk; 200 uint32_t default_mclk;
193 uint32_t default_sclk; 201 uint32_t default_sclk;
194 uint32_t default_dispclk; 202 uint32_t default_dispclk;
203 uint32_t current_dispclk;
195 uint32_t dp_extclk; 204 uint32_t dp_extclk;
196 uint32_t max_pixel_clock; 205 uint32_t max_pixel_clock;
197}; 206};
@@ -211,13 +220,51 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
211 u32 clock, 220 u32 clock,
212 bool strobe_mode, 221 bool strobe_mode,
213 struct atom_clock_dividers *dividers); 222 struct atom_clock_dividers *dividers);
223int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
224 u32 clock,
225 bool strobe_mode,
226 struct atom_mpll_param *mpll_param);
214void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); 227void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
228int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
229 u16 voltage_level, u8 voltage_type,
230 u32 *gpio_value, u32 *gpio_mask);
231void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
232 u32 eng_clock, u32 mem_clock);
233int radeon_atom_get_voltage_step(struct radeon_device *rdev,
234 u8 voltage_type, u16 *voltage_step);
235int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
236 u16 voltage_id, u16 *voltage);
237int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
238 u16 *voltage,
239 u16 leakage_idx);
240int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
241 u8 voltage_type,
242 u16 nominal_voltage,
243 u16 *true_voltage);
244int radeon_atom_get_min_voltage(struct radeon_device *rdev,
245 u8 voltage_type, u16 *min_voltage);
246int radeon_atom_get_max_voltage(struct radeon_device *rdev,
247 u8 voltage_type, u16 *max_voltage);
248int radeon_atom_get_voltage_table(struct radeon_device *rdev,
249 u8 voltage_type, u8 voltage_mode,
250 struct atom_voltage_table *voltage_table);
251bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
252 u8 voltage_type, u8 voltage_mode);
253void radeon_atom_update_memory_dll(struct radeon_device *rdev,
254 u32 mem_clock);
255void radeon_atom_set_ac_timing(struct radeon_device *rdev,
256 u32 mem_clock);
257int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
258 u8 module_index,
259 struct atom_mc_reg_table *reg_table);
260int radeon_atom_get_memory_info(struct radeon_device *rdev,
261 u8 module_index, struct atom_memory_info *mem_info);
262int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
263 bool gddr5, u8 module_index,
264 struct atom_memory_clock_range_table *mclk_range_table);
265int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
266 u16 voltage_id, u16 *voltage);
215void rs690_pm_info(struct radeon_device *rdev); 267void rs690_pm_info(struct radeon_device *rdev);
216extern int rv6xx_get_temp(struct radeon_device *rdev);
217extern int rv770_get_temp(struct radeon_device *rdev);
218extern int evergreen_get_temp(struct radeon_device *rdev);
219extern int sumo_get_temp(struct radeon_device *rdev);
220extern int si_get_temp(struct radeon_device *rdev);
221extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, 268extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
222 unsigned *bankh, unsigned *mtaspect, 269 unsigned *bankh, unsigned *mtaspect,
223 unsigned *tile_split); 270 unsigned *tile_split);
@@ -549,6 +596,20 @@ struct radeon_scratch {
549int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg); 596int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
550void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); 597void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
551 598
599/*
600 * GPU doorbell structures, functions & helpers
601 */
602struct radeon_doorbell {
603 u32 num_pages;
604 bool free[1024];
605 /* doorbell mmio */
606 resource_size_t base;
607 resource_size_t size;
608 void __iomem *ptr;
609};
610
611int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
612void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
552 613
553/* 614/*
554 * IRQS. 615 * IRQS.
@@ -600,10 +661,21 @@ struct evergreen_irq_stat_regs {
600 u32 afmt_status6; 661 u32 afmt_status6;
601}; 662};
602 663
664struct cik_irq_stat_regs {
665 u32 disp_int;
666 u32 disp_int_cont;
667 u32 disp_int_cont2;
668 u32 disp_int_cont3;
669 u32 disp_int_cont4;
670 u32 disp_int_cont5;
671 u32 disp_int_cont6;
672};
673
603union radeon_irq_stat_regs { 674union radeon_irq_stat_regs {
604 struct r500_irq_stat_regs r500; 675 struct r500_irq_stat_regs r500;
605 struct r600_irq_stat_regs r600; 676 struct r600_irq_stat_regs r600;
606 struct evergreen_irq_stat_regs evergreen; 677 struct evergreen_irq_stat_regs evergreen;
678 struct cik_irq_stat_regs cik;
607}; 679};
608 680
609#define RADEON_MAX_HPD_PINS 6 681#define RADEON_MAX_HPD_PINS 6
@@ -620,6 +692,7 @@ struct radeon_irq {
620 bool hpd[RADEON_MAX_HPD_PINS]; 692 bool hpd[RADEON_MAX_HPD_PINS];
621 bool afmt[RADEON_MAX_AFMT_BLOCKS]; 693 bool afmt[RADEON_MAX_AFMT_BLOCKS];
622 union radeon_irq_stat_regs stat_regs; 694 union radeon_irq_stat_regs stat_regs;
695 bool dpm_thermal;
623}; 696};
624 697
625int radeon_irq_kms_init(struct radeon_device *rdev); 698int radeon_irq_kms_init(struct radeon_device *rdev);
@@ -677,6 +750,22 @@ struct radeon_ring {
677 u32 idx; 750 u32 idx;
678 u64 last_semaphore_signal_addr; 751 u64 last_semaphore_signal_addr;
679 u64 last_semaphore_wait_addr; 752 u64 last_semaphore_wait_addr;
753 /* for CIK queues */
754 u32 me;
755 u32 pipe;
756 u32 queue;
757 struct radeon_bo *mqd_obj;
758 u32 doorbell_page_num;
759 u32 doorbell_offset;
760 unsigned wptr_offs;
761};
762
763struct radeon_mec {
764 struct radeon_bo *hpd_eop_obj;
765 u64 hpd_eop_gpu_addr;
766 u32 num_pipe;
767 u32 num_mec;
768 u32 num_queue;
680}; 769};
681 770
682/* 771/*
@@ -778,15 +867,22 @@ struct r600_blit {
778}; 867};
779 868
780/* 869/*
781 * SI RLC stuff 870 * RLC stuff
782 */ 871 */
783struct si_rlc { 872#include "clearstate_defs.h"
873
874struct radeon_rlc {
784 /* for power gating */ 875 /* for power gating */
785 struct radeon_bo *save_restore_obj; 876 struct radeon_bo *save_restore_obj;
786 uint64_t save_restore_gpu_addr; 877 uint64_t save_restore_gpu_addr;
878 volatile uint32_t *sr_ptr;
879 u32 *reg_list;
880 u32 reg_list_size;
787 /* for clear state */ 881 /* for clear state */
788 struct radeon_bo *clear_state_obj; 882 struct radeon_bo *clear_state_obj;
789 uint64_t clear_state_gpu_addr; 883 uint64_t clear_state_gpu_addr;
884 volatile uint32_t *cs_ptr;
885 struct cs_section_def *cs_data;
790}; 886};
791 887
792int radeon_ib_get(struct radeon_device *rdev, int ring, 888int radeon_ib_get(struct radeon_device *rdev, int ring,
@@ -883,6 +979,7 @@ struct radeon_cs_parser {
883 u32 cs_flags; 979 u32 cs_flags;
884 u32 ring; 980 u32 ring;
885 s32 priority; 981 s32 priority;
982 struct ww_acquire_ctx ticket;
886}; 983};
887 984
888extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); 985extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
@@ -934,6 +1031,8 @@ struct radeon_wb {
934#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304 1031#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
935#define R600_WB_UVD_RPTR_OFFSET 2560 1032#define R600_WB_UVD_RPTR_OFFSET 2560
936#define R600_WB_EVENT_OFFSET 3072 1033#define R600_WB_EVENT_OFFSET 3072
1034#define CIK_WB_CP1_WPTR_OFFSET 3328
1035#define CIK_WB_CP2_WPTR_OFFSET 3584
937 1036
938/** 1037/**
939 * struct radeon_pm - power management datas 1038 * struct radeon_pm - power management datas
@@ -958,6 +1057,7 @@ struct radeon_wb {
958enum radeon_pm_method { 1057enum radeon_pm_method {
959 PM_METHOD_PROFILE, 1058 PM_METHOD_PROFILE,
960 PM_METHOD_DYNPM, 1059 PM_METHOD_DYNPM,
1060 PM_METHOD_DPM,
961}; 1061};
962 1062
963enum radeon_dynpm_state { 1063enum radeon_dynpm_state {
@@ -983,11 +1083,23 @@ enum radeon_voltage_type {
983}; 1083};
984 1084
985enum radeon_pm_state_type { 1085enum radeon_pm_state_type {
1086 /* not used for dpm */
986 POWER_STATE_TYPE_DEFAULT, 1087 POWER_STATE_TYPE_DEFAULT,
987 POWER_STATE_TYPE_POWERSAVE, 1088 POWER_STATE_TYPE_POWERSAVE,
1089 /* user selectable states */
988 POWER_STATE_TYPE_BATTERY, 1090 POWER_STATE_TYPE_BATTERY,
989 POWER_STATE_TYPE_BALANCED, 1091 POWER_STATE_TYPE_BALANCED,
990 POWER_STATE_TYPE_PERFORMANCE, 1092 POWER_STATE_TYPE_PERFORMANCE,
1093 /* internal states */
1094 POWER_STATE_TYPE_INTERNAL_UVD,
1095 POWER_STATE_TYPE_INTERNAL_UVD_SD,
1096 POWER_STATE_TYPE_INTERNAL_UVD_HD,
1097 POWER_STATE_TYPE_INTERNAL_UVD_HD2,
1098 POWER_STATE_TYPE_INTERNAL_UVD_MVC,
1099 POWER_STATE_TYPE_INTERNAL_BOOT,
1100 POWER_STATE_TYPE_INTERNAL_THERMAL,
1101 POWER_STATE_TYPE_INTERNAL_ACPI,
1102 POWER_STATE_TYPE_INTERNAL_ULV,
991}; 1103};
992 1104
993enum radeon_pm_profile_type { 1105enum radeon_pm_profile_type {
@@ -1016,12 +1128,17 @@ struct radeon_pm_profile {
1016 1128
1017enum radeon_int_thermal_type { 1129enum radeon_int_thermal_type {
1018 THERMAL_TYPE_NONE, 1130 THERMAL_TYPE_NONE,
1131 THERMAL_TYPE_EXTERNAL,
1132 THERMAL_TYPE_EXTERNAL_GPIO,
1019 THERMAL_TYPE_RV6XX, 1133 THERMAL_TYPE_RV6XX,
1020 THERMAL_TYPE_RV770, 1134 THERMAL_TYPE_RV770,
1135 THERMAL_TYPE_ADT7473_WITH_INTERNAL,
1021 THERMAL_TYPE_EVERGREEN, 1136 THERMAL_TYPE_EVERGREEN,
1022 THERMAL_TYPE_SUMO, 1137 THERMAL_TYPE_SUMO,
1023 THERMAL_TYPE_NI, 1138 THERMAL_TYPE_NI,
1024 THERMAL_TYPE_SI, 1139 THERMAL_TYPE_SI,
1140 THERMAL_TYPE_EMC2103_WITH_INTERNAL,
1141 THERMAL_TYPE_CI,
1025}; 1142};
1026 1143
1027struct radeon_voltage { 1144struct radeon_voltage {
@@ -1075,6 +1192,193 @@ struct radeon_power_state {
1075 */ 1192 */
1076#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */ 1193#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
1077 1194
1195enum radeon_dpm_auto_throttle_src {
1196 RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL,
1197 RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL
1198};
1199
1200enum radeon_dpm_event_src {
1201 RADEON_DPM_EVENT_SRC_ANALOG = 0,
1202 RADEON_DPM_EVENT_SRC_EXTERNAL = 1,
1203 RADEON_DPM_EVENT_SRC_DIGITAL = 2,
1204 RADEON_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
1205 RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
1206};
1207
1208struct radeon_ps {
1209 u32 caps; /* vbios flags */
1210 u32 class; /* vbios flags */
1211 u32 class2; /* vbios flags */
1212 /* UVD clocks */
1213 u32 vclk;
1214 u32 dclk;
1215 /* asic priv */
1216 void *ps_priv;
1217};
1218
1219struct radeon_dpm_thermal {
1220 /* thermal interrupt work */
1221 struct work_struct work;
1222 /* low temperature threshold */
1223 int min_temp;
1224 /* high temperature threshold */
1225 int max_temp;
1226 /* was interrupt low to high or high to low */
1227 bool high_to_low;
1228};
1229
1230enum radeon_clk_action
1231{
1232 RADEON_SCLK_UP = 1,
1233 RADEON_SCLK_DOWN
1234};
1235
1236struct radeon_blacklist_clocks
1237{
1238 u32 sclk;
1239 u32 mclk;
1240 enum radeon_clk_action action;
1241};
1242
1243struct radeon_clock_and_voltage_limits {
1244 u32 sclk;
1245 u32 mclk;
1246 u32 vddc;
1247 u32 vddci;
1248};
1249
1250struct radeon_clock_array {
1251 u32 count;
1252 u32 *values;
1253};
1254
1255struct radeon_clock_voltage_dependency_entry {
1256 u32 clk;
1257 u16 v;
1258};
1259
1260struct radeon_clock_voltage_dependency_table {
1261 u32 count;
1262 struct radeon_clock_voltage_dependency_entry *entries;
1263};
1264
1265struct radeon_cac_leakage_entry {
1266 u16 vddc;
1267 u32 leakage;
1268};
1269
1270struct radeon_cac_leakage_table {
1271 u32 count;
1272 struct radeon_cac_leakage_entry *entries;
1273};
1274
1275struct radeon_phase_shedding_limits_entry {
1276 u16 voltage;
1277 u32 sclk;
1278 u32 mclk;
1279};
1280
1281struct radeon_phase_shedding_limits_table {
1282 u32 count;
1283 struct radeon_phase_shedding_limits_entry *entries;
1284};
1285
1286struct radeon_ppm_table {
1287 u8 ppm_design;
1288 u16 cpu_core_number;
1289 u32 platform_tdp;
1290 u32 small_ac_platform_tdp;
1291 u32 platform_tdc;
1292 u32 small_ac_platform_tdc;
1293 u32 apu_tdp;
1294 u32 dgpu_tdp;
1295 u32 dgpu_ulv_power;
1296 u32 tj_max;
1297};
1298
1299struct radeon_dpm_dynamic_state {
1300 struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk;
1301 struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk;
1302 struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk;
1303 struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk;
1304 struct radeon_clock_array valid_sclk_values;
1305 struct radeon_clock_array valid_mclk_values;
1306 struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc;
1307 struct radeon_clock_and_voltage_limits max_clock_voltage_on_ac;
1308 u32 mclk_sclk_ratio;
1309 u32 sclk_mclk_delta;
1310 u16 vddc_vddci_delta;
1311 u16 min_vddc_for_pcie_gen2;
1312 struct radeon_cac_leakage_table cac_leakage_table;
1313 struct radeon_phase_shedding_limits_table phase_shedding_limits_table;
1314 struct radeon_ppm_table *ppm_table;
1315};
1316
1317struct radeon_dpm_fan {
1318 u16 t_min;
1319 u16 t_med;
1320 u16 t_high;
1321 u16 pwm_min;
1322 u16 pwm_med;
1323 u16 pwm_high;
1324 u8 t_hyst;
1325 u32 cycle_delay;
1326 u16 t_max;
1327 bool ucode_fan_control;
1328};
1329
1330enum radeon_pcie_gen {
1331 RADEON_PCIE_GEN1 = 0,
1332 RADEON_PCIE_GEN2 = 1,
1333 RADEON_PCIE_GEN3 = 2,
1334 RADEON_PCIE_GEN_INVALID = 0xffff
1335};
1336
1337struct radeon_dpm {
1338 struct radeon_ps *ps;
1339 /* number of valid power states */
1340 int num_ps;
1341 /* current power state that is active */
1342 struct radeon_ps *current_ps;
1343 /* requested power state */
1344 struct radeon_ps *requested_ps;
1345 /* boot up power state */
1346 struct radeon_ps *boot_ps;
1347 /* default uvd power state */
1348 struct radeon_ps *uvd_ps;
1349 enum radeon_pm_state_type state;
1350 enum radeon_pm_state_type user_state;
1351 u32 platform_caps;
1352 u32 voltage_response_time;
1353 u32 backbias_response_time;
1354 void *priv;
1355 u32 new_active_crtcs;
1356 int new_active_crtc_count;
1357 u32 current_active_crtcs;
1358 int current_active_crtc_count;
1359 struct radeon_dpm_dynamic_state dyn_state;
1360 struct radeon_dpm_fan fan;
1361 u32 tdp_limit;
1362 u32 near_tdp_limit;
1363 u32 near_tdp_limit_adjusted;
1364 u32 sq_ramping_threshold;
1365 u32 cac_leakage;
1366 u16 tdp_od_limit;
1367 u32 tdp_adjustment;
1368 u16 load_line_slope;
1369 bool power_control;
1370 bool ac_power;
1371 /* special states active */
1372 bool thermal_active;
1373 bool uvd_active;
1374 /* thermal handling */
1375 struct radeon_dpm_thermal thermal;
1376};
1377
1378void radeon_dpm_enable_power_state(struct radeon_device *rdev,
1379 enum radeon_pm_state_type dpm_state);
1380
1381
1078struct radeon_pm { 1382struct radeon_pm {
1079 struct mutex mutex; 1383 struct mutex mutex;
1080 /* write locked while reprogramming mclk */ 1384 /* write locked while reprogramming mclk */
@@ -1128,6 +1432,9 @@ struct radeon_pm {
1128 /* internal thermal controller on rv6xx+ */ 1432 /* internal thermal controller on rv6xx+ */
1129 enum radeon_int_thermal_type int_thermal_type; 1433 enum radeon_int_thermal_type int_thermal_type;
1130 struct device *int_hwmon_dev; 1434 struct device *int_hwmon_dev;
1435 /* dpm */
1436 bool dpm_enabled;
1437 struct radeon_dpm dpm;
1131}; 1438};
1132 1439
1133int radeon_pm_get_type_index(struct radeon_device *rdev, 1440int radeon_pm_get_type_index(struct radeon_device *rdev,
@@ -1266,6 +1573,10 @@ struct radeon_asic {
1266 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1573 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1267 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); 1574 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1268 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 1575 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
1576
1577 u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1578 u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1579 void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1269 } ring[RADEON_NUM_RINGS]; 1580 } ring[RADEON_NUM_RINGS];
1270 /* irqs */ 1581 /* irqs */
1271 struct { 1582 struct {
@@ -1325,7 +1636,7 @@ struct radeon_asic {
1325 bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 1636 bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1326 void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 1637 void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1327 } hpd; 1638 } hpd;
1328 /* power management */ 1639 /* static power management */
1329 struct { 1640 struct {
1330 void (*misc)(struct radeon_device *rdev); 1641 void (*misc)(struct radeon_device *rdev);
1331 void (*prepare)(struct radeon_device *rdev); 1642 void (*prepare)(struct radeon_device *rdev);
@@ -1340,7 +1651,24 @@ struct radeon_asic {
1340 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 1651 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
1341 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 1652 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
1342 int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk); 1653 int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
1654 int (*get_temperature)(struct radeon_device *rdev);
1343 } pm; 1655 } pm;
1656 /* dynamic power management */
1657 struct {
1658 int (*init)(struct radeon_device *rdev);
1659 void (*setup_asic)(struct radeon_device *rdev);
1660 int (*enable)(struct radeon_device *rdev);
1661 void (*disable)(struct radeon_device *rdev);
1662 int (*pre_set_power_state)(struct radeon_device *rdev);
1663 int (*set_power_state)(struct radeon_device *rdev);
1664 void (*post_set_power_state)(struct radeon_device *rdev);
1665 void (*display_configuration_changed)(struct radeon_device *rdev);
1666 void (*fini)(struct radeon_device *rdev);
1667 u32 (*get_sclk)(struct radeon_device *rdev, bool low);
1668 u32 (*get_mclk)(struct radeon_device *rdev, bool low);
1669 void (*print_power_state)(struct radeon_device *rdev, struct radeon_ps *ps);
1670 void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
1671 } dpm;
1344 /* pageflipping */ 1672 /* pageflipping */
1345 struct { 1673 struct {
1346 void (*pre_page_flip)(struct radeon_device *rdev, int crtc); 1674 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
@@ -1505,6 +1833,36 @@ struct si_asic {
1505 uint32_t tile_mode_array[32]; 1833 uint32_t tile_mode_array[32];
1506}; 1834};
1507 1835
1836struct cik_asic {
1837 unsigned max_shader_engines;
1838 unsigned max_tile_pipes;
1839 unsigned max_cu_per_sh;
1840 unsigned max_sh_per_se;
1841 unsigned max_backends_per_se;
1842 unsigned max_texture_channel_caches;
1843 unsigned max_gprs;
1844 unsigned max_gs_threads;
1845 unsigned max_hw_contexts;
1846 unsigned sc_prim_fifo_size_frontend;
1847 unsigned sc_prim_fifo_size_backend;
1848 unsigned sc_hiz_tile_fifo_size;
1849 unsigned sc_earlyz_tile_fifo_size;
1850
1851 unsigned num_tile_pipes;
1852 unsigned num_backends_per_se;
1853 unsigned backend_disable_mask_per_asic;
1854 unsigned backend_map;
1855 unsigned num_texture_channel_caches;
1856 unsigned mem_max_burst_length_bytes;
1857 unsigned mem_row_size_in_kb;
1858 unsigned shader_engine_tile_size;
1859 unsigned num_gpus;
1860 unsigned multi_gpu_tile_size;
1861
1862 unsigned tile_config;
1863 uint32_t tile_mode_array[32];
1864};
1865
1508union radeon_asic_config { 1866union radeon_asic_config {
1509 struct r300_asic r300; 1867 struct r300_asic r300;
1510 struct r100_asic r100; 1868 struct r100_asic r100;
@@ -1513,6 +1871,7 @@ union radeon_asic_config {
1513 struct evergreen_asic evergreen; 1871 struct evergreen_asic evergreen;
1514 struct cayman_asic cayman; 1872 struct cayman_asic cayman;
1515 struct si_asic si; 1873 struct si_asic si;
1874 struct cik_asic cik;
1516}; 1875};
1517 1876
1518/* 1877/*
@@ -1657,6 +2016,7 @@ struct radeon_device {
1657 struct radeon_gart gart; 2016 struct radeon_gart gart;
1658 struct radeon_mode_info mode_info; 2017 struct radeon_mode_info mode_info;
1659 struct radeon_scratch scratch; 2018 struct radeon_scratch scratch;
2019 struct radeon_doorbell doorbell;
1660 struct radeon_mman mman; 2020 struct radeon_mman mman;
1661 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; 2021 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
1662 wait_queue_head_t fence_queue; 2022 wait_queue_head_t fence_queue;
@@ -1684,13 +2044,18 @@ struct radeon_device {
1684 const struct firmware *mc_fw; /* NI MC firmware */ 2044 const struct firmware *mc_fw; /* NI MC firmware */
1685 const struct firmware *ce_fw; /* SI CE firmware */ 2045 const struct firmware *ce_fw; /* SI CE firmware */
1686 const struct firmware *uvd_fw; /* UVD firmware */ 2046 const struct firmware *uvd_fw; /* UVD firmware */
2047 const struct firmware *mec_fw; /* CIK MEC firmware */
2048 const struct firmware *sdma_fw; /* CIK SDMA firmware */
2049 const struct firmware *smc_fw; /* SMC firmware */
1687 struct r600_blit r600_blit; 2050 struct r600_blit r600_blit;
1688 struct r600_vram_scratch vram_scratch; 2051 struct r600_vram_scratch vram_scratch;
1689 int msi_enabled; /* msi enabled */ 2052 int msi_enabled; /* msi enabled */
1690 struct r600_ih ih; /* r6/700 interrupt ring */ 2053 struct r600_ih ih; /* r6/700 interrupt ring */
1691 struct si_rlc rlc; 2054 struct radeon_rlc rlc;
2055 struct radeon_mec mec;
1692 struct work_struct hotplug_work; 2056 struct work_struct hotplug_work;
1693 struct work_struct audio_work; 2057 struct work_struct audio_work;
2058 struct work_struct reset_work;
1694 int num_crtc; /* number of crtcs */ 2059 int num_crtc; /* number of crtcs */
1695 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ 2060 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
1696 bool audio_enabled; 2061 bool audio_enabled;
@@ -1727,6 +2092,9 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
1727u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); 2092u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
1728void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); 2093void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1729 2094
2095u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset);
2096void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
2097
1730/* 2098/*
1731 * Cast helper 2099 * Cast helper
1732 */ 2100 */
@@ -1754,6 +2122,18 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1754#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) 2122#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
1755#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg)) 2123#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
1756#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v)) 2124#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
2125#define RREG32_SMC(reg) tn_smc_rreg(rdev, (reg))
2126#define WREG32_SMC(reg, v) tn_smc_wreg(rdev, (reg), (v))
2127#define RREG32_RCU(reg) r600_rcu_rreg(rdev, (reg))
2128#define WREG32_RCU(reg, v) r600_rcu_wreg(rdev, (reg), (v))
2129#define RREG32_CG(reg) eg_cg_rreg(rdev, (reg))
2130#define WREG32_CG(reg, v) eg_cg_wreg(rdev, (reg), (v))
2131#define RREG32_PIF_PHY0(reg) eg_pif_phy0_rreg(rdev, (reg))
2132#define WREG32_PIF_PHY0(reg, v) eg_pif_phy0_wreg(rdev, (reg), (v))
2133#define RREG32_PIF_PHY1(reg) eg_pif_phy1_rreg(rdev, (reg))
2134#define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v))
2135#define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg))
2136#define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v))
1757#define WREG32_P(reg, val, mask) \ 2137#define WREG32_P(reg, val, mask) \
1758 do { \ 2138 do { \
1759 uint32_t tmp_ = RREG32(reg); \ 2139 uint32_t tmp_ = RREG32(reg); \
@@ -1774,6 +2154,9 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1774#define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) 2154#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
1775#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) 2155#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
1776 2156
2157#define RDOORBELL32(offset) cik_mm_rdoorbell(rdev, (offset))
2158#define WDOORBELL32(offset, v) cik_mm_wdoorbell(rdev, (offset), (v))
2159
1777/* 2160/*
1778 * Indirect registers accessor 2161 * Indirect registers accessor
1779 */ 2162 */
@@ -1792,6 +2175,96 @@ static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uin
1792 WREG32(RADEON_PCIE_DATA, (v)); 2175 WREG32(RADEON_PCIE_DATA, (v));
1793} 2176}
1794 2177
2178static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
2179{
2180 u32 r;
2181
2182 WREG32(TN_SMC_IND_INDEX_0, (reg));
2183 r = RREG32(TN_SMC_IND_DATA_0);
2184 return r;
2185}
2186
2187static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2188{
2189 WREG32(TN_SMC_IND_INDEX_0, (reg));
2190 WREG32(TN_SMC_IND_DATA_0, (v));
2191}
2192
2193static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
2194{
2195 u32 r;
2196
2197 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
2198 r = RREG32(R600_RCU_DATA);
2199 return r;
2200}
2201
2202static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2203{
2204 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
2205 WREG32(R600_RCU_DATA, (v));
2206}
2207
2208static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
2209{
2210 u32 r;
2211
2212 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
2213 r = RREG32(EVERGREEN_CG_IND_DATA);
2214 return r;
2215}
2216
2217static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2218{
2219 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
2220 WREG32(EVERGREEN_CG_IND_DATA, (v));
2221}
2222
2223static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
2224{
2225 u32 r;
2226
2227 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
2228 r = RREG32(EVERGREEN_PIF_PHY0_DATA);
2229 return r;
2230}
2231
2232static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2233{
2234 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
2235 WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
2236}
2237
2238static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
2239{
2240 u32 r;
2241
2242 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
2243 r = RREG32(EVERGREEN_PIF_PHY1_DATA);
2244 return r;
2245}
2246
2247static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2248{
2249 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
2250 WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
2251}
2252
2253static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
2254{
2255 u32 r;
2256
2257 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
2258 r = RREG32(R600_UVD_CTX_DATA);
2259 return r;
2260}
2261
2262static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2263{
2264 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
2265 WREG32(R600_UVD_CTX_DATA, (v));
2266}
2267
1795void r100_pll_errata_after_index(struct radeon_device *rdev); 2268void r100_pll_errata_after_index(struct radeon_device *rdev);
1796 2269
1797 2270
@@ -1840,6 +2313,16 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
1840 (rdev->flags & RADEON_IS_IGP)) 2313 (rdev->flags & RADEON_IS_IGP))
1841#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND)) 2314#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
1842#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN)) 2315#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
2316#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
2317
2318#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
2319 (rdev->ddev->pdev->device == 0x6850) || \
2320 (rdev->ddev->pdev->device == 0x6858) || \
2321 (rdev->ddev->pdev->device == 0x6859) || \
2322 (rdev->ddev->pdev->device == 0x6840) || \
2323 (rdev->ddev->pdev->device == 0x6841) || \
2324 (rdev->ddev->pdev->device == 0x6842) || \
2325 (rdev->ddev->pdev->device == 0x6843))
1843 2326
1844/* 2327/*
1845 * BIOS helpers. 2328 * BIOS helpers.
@@ -1892,6 +2375,9 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1892#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) 2375#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
1893#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp)) 2376#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
1894#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm)) 2377#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
2378#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_rptr((rdev), (r))
2379#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_wptr((rdev), (r))
2380#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].set_wptr((rdev), (r))
1895#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) 2381#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
1896#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) 2382#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
1897#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) 2383#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
@@ -1915,6 +2401,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1915#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) 2401#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
1916#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e)) 2402#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
1917#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d)) 2403#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
2404#define radeon_get_temperature(rdev) (rdev)->asic->pm.get_temperature((rdev))
1918#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s))) 2405#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
1919#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r))) 2406#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
1920#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev)) 2407#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
@@ -1935,6 +2422,19 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1935#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) 2422#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
1936#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev)) 2423#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
1937#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev)) 2424#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
2425#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
2426#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
2427#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
2428#define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev))
2429#define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev))
2430#define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev))
2431#define radeon_dpm_post_set_power_state(rdev) rdev->asic->dpm.post_set_power_state((rdev))
2432#define radeon_dpm_display_configuration_changed(rdev) rdev->asic->dpm.display_configuration_changed((rdev))
2433#define radeon_dpm_fini(rdev) rdev->asic->dpm.fini((rdev))
2434#define radeon_dpm_get_sclk(rdev, l) rdev->asic->dpm.get_sclk((rdev), (l))
2435#define radeon_dpm_get_mclk(rdev, l) rdev->asic->dpm.get_mclk((rdev), (l))
2436#define radeon_dpm_print_power_state(rdev, ps) rdev->asic->dpm.print_power_state((rdev), (ps))
2437#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
1938 2438
1939/* Common functions */ 2439/* Common functions */
1940/* AGP */ 2440/* AGP */
@@ -2054,6 +2554,10 @@ extern int ni_mc_load_microcode(struct radeon_device *rdev);
2054#if defined(CONFIG_ACPI) 2554#if defined(CONFIG_ACPI)
2055extern int radeon_acpi_init(struct radeon_device *rdev); 2555extern int radeon_acpi_init(struct radeon_device *rdev);
2056extern void radeon_acpi_fini(struct radeon_device *rdev); 2556extern void radeon_acpi_fini(struct radeon_device *rdev);
2557extern bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev);
2558extern int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
2559 u8 perf_req, bool advertise);
2560extern int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev);
2057#else 2561#else
2058static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } 2562static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
2059static inline void radeon_acpi_fini(struct radeon_device *rdev) { } 2563static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 196d28d99570..10f98c7742d8 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -78,6 +78,22 @@ struct atcs_verify_interface {
78 u32 function_bits; /* supported functions bit vector */ 78 u32 function_bits; /* supported functions bit vector */
79} __packed; 79} __packed;
80 80
81#define ATCS_VALID_FLAGS_MASK 0x3
82
83struct atcs_pref_req_input {
84 u16 size; /* structure size in bytes (includes size field) */
85 u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
86 u16 valid_flags_mask; /* valid flags mask */
87 u16 flags; /* flags */
88 u8 req_type; /* request type */
89 u8 perf_req; /* performance request */
90} __packed;
91
92struct atcs_pref_req_output {
93 u16 size; /* structure size in bytes (includes size field) */
94 u8 ret_val; /* return value */
95} __packed;
96
81/* Call the ATIF method 97/* Call the ATIF method
82 */ 98 */
83/** 99/**
@@ -506,6 +522,135 @@ out:
506} 522}
507 523
508/** 524/**
525 * radeon_acpi_is_pcie_performance_request_supported
526 *
527 * @rdev: radeon_device pointer
528 *
529 * Check if the ATCS pcie_perf_req and pcie_dev_rdy methods
530 * are supported (all asics).
531 * returns true if supported, false if not.
532 */
533bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev)
534{
535 struct radeon_atcs *atcs = &rdev->atcs;
536
537 if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy)
538 return true;
539
540 return false;
541}
542
543/**
544 * radeon_acpi_pcie_notify_device_ready
545 *
546 * @rdev: radeon_device pointer
547 *
548 * Executes the PCIE_DEVICE_READY_NOTIFICATION method
549 * (all asics).
550 * returns 0 on success, error on failure.
551 */
552int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev)
553{
554 acpi_handle handle;
555 union acpi_object *info;
556 struct radeon_atcs *atcs = &rdev->atcs;
557
558 /* Get the device handle */
559 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
560 if (!handle)
561 return -EINVAL;
562
563 if (!atcs->functions.pcie_dev_rdy)
564 return -EINVAL;
565
566 info = radeon_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
567 if (!info)
568 return -EIO;
569
570 kfree(info);
571
572 return 0;
573}
574
575/**
576 * radeon_acpi_pcie_performance_request
577 *
578 * @rdev: radeon_device pointer
579 * @perf_req: requested perf level (pcie gen speed)
580 * @advertise: set advertise caps flag if set
581 *
582 * Executes the PCIE_PERFORMANCE_REQUEST method to
583 * change the pcie gen speed (all asics).
584 * returns 0 on success, error on failure.
585 */
586int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
587 u8 perf_req, bool advertise)
588{
589 acpi_handle handle;
590 union acpi_object *info;
591 struct radeon_atcs *atcs = &rdev->atcs;
592 struct atcs_pref_req_input atcs_input;
593 struct atcs_pref_req_output atcs_output;
594 struct acpi_buffer params;
595 size_t size;
596 u32 retry = 3;
597
598 /* Get the device handle */
599 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
600 if (!handle)
601 return -EINVAL;
602
603 if (!atcs->functions.pcie_perf_req)
604 return -EINVAL;
605
606 atcs_input.size = sizeof(struct atcs_pref_req_input);
607 /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
608 atcs_input.client_id = rdev->pdev->devfn | (rdev->pdev->bus->number << 8);
609 atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
610 atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
611 if (advertise)
612 atcs_input.flags |= ATCS_ADVERTISE_CAPS;
613 atcs_input.req_type = ATCS_PCIE_LINK_SPEED;
614 atcs_input.perf_req = perf_req;
615
616 params.length = sizeof(struct atcs_pref_req_input);
617 params.pointer = &atcs_input;
618
619 while (retry--) {
620 info = radeon_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &params);
621 if (!info)
622 return -EIO;
623
624 memset(&atcs_output, 0, sizeof(atcs_output));
625
626 size = *(u16 *) info->buffer.pointer;
627 if (size < 3) {
628 DRM_INFO("ATCS buffer is too small: %zu\n", size);
629 kfree(info);
630 return -EINVAL;
631 }
632 size = min(sizeof(atcs_output), size);
633
634 memcpy(&atcs_output, info->buffer.pointer, size);
635
636 kfree(info);
637
638 switch (atcs_output.ret_val) {
639 case ATCS_REQUEST_REFUSED:
640 default:
641 return -EINVAL;
642 case ATCS_REQUEST_COMPLETE:
643 return 0;
644 case ATCS_REQUEST_IN_PROGRESS:
645 udelay(10);
646 break;
647 }
648 }
649
650 return 0;
651}
652
653/**
509 * radeon_acpi_event - handle notify events 654 * radeon_acpi_event - handle notify events
510 * 655 *
511 * @nb: notifier block 656 * @nb: notifier block
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a2802b47ee95..a5b244dc50ca 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -126,7 +126,11 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
126 rdev->mc_rreg = &rs780_mc_rreg; 126 rdev->mc_rreg = &rs780_mc_rreg;
127 rdev->mc_wreg = &rs780_mc_wreg; 127 rdev->mc_wreg = &rs780_mc_wreg;
128 } 128 }
129 if (rdev->family >= CHIP_R600) { 129
130 if (rdev->family >= CHIP_BONAIRE) {
131 rdev->pciep_rreg = &cik_pciep_rreg;
132 rdev->pciep_wreg = &cik_pciep_wreg;
133 } else if (rdev->family >= CHIP_R600) {
130 rdev->pciep_rreg = &r600_pciep_rreg; 134 rdev->pciep_rreg = &r600_pciep_rreg;
131 rdev->pciep_wreg = &r600_pciep_wreg; 135 rdev->pciep_wreg = &r600_pciep_wreg;
132 } 136 }
@@ -192,6 +196,9 @@ static struct radeon_asic r100_asic = {
192 .ring_test = &r100_ring_test, 196 .ring_test = &r100_ring_test,
193 .ib_test = &r100_ib_test, 197 .ib_test = &r100_ib_test,
194 .is_lockup = &r100_gpu_is_lockup, 198 .is_lockup = &r100_gpu_is_lockup,
199 .get_rptr = &radeon_ring_generic_get_rptr,
200 .get_wptr = &radeon_ring_generic_get_wptr,
201 .set_wptr = &radeon_ring_generic_set_wptr,
195 } 202 }
196 }, 203 },
197 .irq = { 204 .irq = {
@@ -268,6 +275,9 @@ static struct radeon_asic r200_asic = {
268 .ring_test = &r100_ring_test, 275 .ring_test = &r100_ring_test,
269 .ib_test = &r100_ib_test, 276 .ib_test = &r100_ib_test,
270 .is_lockup = &r100_gpu_is_lockup, 277 .is_lockup = &r100_gpu_is_lockup,
278 .get_rptr = &radeon_ring_generic_get_rptr,
279 .get_wptr = &radeon_ring_generic_get_wptr,
280 .set_wptr = &radeon_ring_generic_set_wptr,
271 } 281 }
272 }, 282 },
273 .irq = { 283 .irq = {
@@ -344,6 +354,9 @@ static struct radeon_asic r300_asic = {
344 .ring_test = &r100_ring_test, 354 .ring_test = &r100_ring_test,
345 .ib_test = &r100_ib_test, 355 .ib_test = &r100_ib_test,
346 .is_lockup = &r100_gpu_is_lockup, 356 .is_lockup = &r100_gpu_is_lockup,
357 .get_rptr = &radeon_ring_generic_get_rptr,
358 .get_wptr = &radeon_ring_generic_get_wptr,
359 .set_wptr = &radeon_ring_generic_set_wptr,
347 } 360 }
348 }, 361 },
349 .irq = { 362 .irq = {
@@ -420,6 +433,9 @@ static struct radeon_asic r300_asic_pcie = {
420 .ring_test = &r100_ring_test, 433 .ring_test = &r100_ring_test,
421 .ib_test = &r100_ib_test, 434 .ib_test = &r100_ib_test,
422 .is_lockup = &r100_gpu_is_lockup, 435 .is_lockup = &r100_gpu_is_lockup,
436 .get_rptr = &radeon_ring_generic_get_rptr,
437 .get_wptr = &radeon_ring_generic_get_wptr,
438 .set_wptr = &radeon_ring_generic_set_wptr,
423 } 439 }
424 }, 440 },
425 .irq = { 441 .irq = {
@@ -496,6 +512,9 @@ static struct radeon_asic r420_asic = {
496 .ring_test = &r100_ring_test, 512 .ring_test = &r100_ring_test,
497 .ib_test = &r100_ib_test, 513 .ib_test = &r100_ib_test,
498 .is_lockup = &r100_gpu_is_lockup, 514 .is_lockup = &r100_gpu_is_lockup,
515 .get_rptr = &radeon_ring_generic_get_rptr,
516 .get_wptr = &radeon_ring_generic_get_wptr,
517 .set_wptr = &radeon_ring_generic_set_wptr,
499 } 518 }
500 }, 519 },
501 .irq = { 520 .irq = {
@@ -572,6 +591,9 @@ static struct radeon_asic rs400_asic = {
572 .ring_test = &r100_ring_test, 591 .ring_test = &r100_ring_test,
573 .ib_test = &r100_ib_test, 592 .ib_test = &r100_ib_test,
574 .is_lockup = &r100_gpu_is_lockup, 593 .is_lockup = &r100_gpu_is_lockup,
594 .get_rptr = &radeon_ring_generic_get_rptr,
595 .get_wptr = &radeon_ring_generic_get_wptr,
596 .set_wptr = &radeon_ring_generic_set_wptr,
575 } 597 }
576 }, 598 },
577 .irq = { 599 .irq = {
@@ -648,6 +670,9 @@ static struct radeon_asic rs600_asic = {
648 .ring_test = &r100_ring_test, 670 .ring_test = &r100_ring_test,
649 .ib_test = &r100_ib_test, 671 .ib_test = &r100_ib_test,
650 .is_lockup = &r100_gpu_is_lockup, 672 .is_lockup = &r100_gpu_is_lockup,
673 .get_rptr = &radeon_ring_generic_get_rptr,
674 .get_wptr = &radeon_ring_generic_get_wptr,
675 .set_wptr = &radeon_ring_generic_set_wptr,
651 } 676 }
652 }, 677 },
653 .irq = { 678 .irq = {
@@ -726,6 +751,9 @@ static struct radeon_asic rs690_asic = {
726 .ring_test = &r100_ring_test, 751 .ring_test = &r100_ring_test,
727 .ib_test = &r100_ib_test, 752 .ib_test = &r100_ib_test,
728 .is_lockup = &r100_gpu_is_lockup, 753 .is_lockup = &r100_gpu_is_lockup,
754 .get_rptr = &radeon_ring_generic_get_rptr,
755 .get_wptr = &radeon_ring_generic_get_wptr,
756 .set_wptr = &radeon_ring_generic_set_wptr,
729 } 757 }
730 }, 758 },
731 .irq = { 759 .irq = {
@@ -804,6 +832,9 @@ static struct radeon_asic rv515_asic = {
804 .ring_test = &r100_ring_test, 832 .ring_test = &r100_ring_test,
805 .ib_test = &r100_ib_test, 833 .ib_test = &r100_ib_test,
806 .is_lockup = &r100_gpu_is_lockup, 834 .is_lockup = &r100_gpu_is_lockup,
835 .get_rptr = &radeon_ring_generic_get_rptr,
836 .get_wptr = &radeon_ring_generic_get_wptr,
837 .set_wptr = &radeon_ring_generic_set_wptr,
807 } 838 }
808 }, 839 },
809 .irq = { 840 .irq = {
@@ -880,6 +911,9 @@ static struct radeon_asic r520_asic = {
880 .ring_test = &r100_ring_test, 911 .ring_test = &r100_ring_test,
881 .ib_test = &r100_ib_test, 912 .ib_test = &r100_ib_test,
882 .is_lockup = &r100_gpu_is_lockup, 913 .is_lockup = &r100_gpu_is_lockup,
914 .get_rptr = &radeon_ring_generic_get_rptr,
915 .get_wptr = &radeon_ring_generic_get_wptr,
916 .set_wptr = &radeon_ring_generic_set_wptr,
883 } 917 }
884 }, 918 },
885 .irq = { 919 .irq = {
@@ -957,6 +991,9 @@ static struct radeon_asic r600_asic = {
957 .ring_test = &r600_ring_test, 991 .ring_test = &r600_ring_test,
958 .ib_test = &r600_ib_test, 992 .ib_test = &r600_ib_test,
959 .is_lockup = &r600_gfx_is_lockup, 993 .is_lockup = &r600_gfx_is_lockup,
994 .get_rptr = &radeon_ring_generic_get_rptr,
995 .get_wptr = &radeon_ring_generic_get_wptr,
996 .set_wptr = &radeon_ring_generic_set_wptr,
960 }, 997 },
961 [R600_RING_TYPE_DMA_INDEX] = { 998 [R600_RING_TYPE_DMA_INDEX] = {
962 .ib_execute = &r600_dma_ring_ib_execute, 999 .ib_execute = &r600_dma_ring_ib_execute,
@@ -966,6 +1003,9 @@ static struct radeon_asic r600_asic = {
966 .ring_test = &r600_dma_ring_test, 1003 .ring_test = &r600_dma_ring_test,
967 .ib_test = &r600_dma_ib_test, 1004 .ib_test = &r600_dma_ib_test,
968 .is_lockup = &r600_dma_is_lockup, 1005 .is_lockup = &r600_dma_is_lockup,
1006 .get_rptr = &radeon_ring_generic_get_rptr,
1007 .get_wptr = &radeon_ring_generic_get_wptr,
1008 .set_wptr = &radeon_ring_generic_set_wptr,
969 } 1009 }
970 }, 1010 },
971 .irq = { 1011 .irq = {
@@ -1012,6 +1052,115 @@ static struct radeon_asic r600_asic = {
1012 .get_pcie_lanes = &r600_get_pcie_lanes, 1052 .get_pcie_lanes = &r600_get_pcie_lanes,
1013 .set_pcie_lanes = &r600_set_pcie_lanes, 1053 .set_pcie_lanes = &r600_set_pcie_lanes,
1014 .set_clock_gating = NULL, 1054 .set_clock_gating = NULL,
1055 .get_temperature = &rv6xx_get_temp,
1056 },
1057 .pflip = {
1058 .pre_page_flip = &rs600_pre_page_flip,
1059 .page_flip = &rs600_page_flip,
1060 .post_page_flip = &rs600_post_page_flip,
1061 },
1062};
1063
1064static struct radeon_asic rv6xx_asic = {
1065 .init = &r600_init,
1066 .fini = &r600_fini,
1067 .suspend = &r600_suspend,
1068 .resume = &r600_resume,
1069 .vga_set_state = &r600_vga_set_state,
1070 .asic_reset = &r600_asic_reset,
1071 .ioctl_wait_idle = r600_ioctl_wait_idle,
1072 .gui_idle = &r600_gui_idle,
1073 .mc_wait_for_idle = &r600_mc_wait_for_idle,
1074 .get_xclk = &r600_get_xclk,
1075 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1076 .gart = {
1077 .tlb_flush = &r600_pcie_gart_tlb_flush,
1078 .set_page = &rs600_gart_set_page,
1079 },
1080 .ring = {
1081 [RADEON_RING_TYPE_GFX_INDEX] = {
1082 .ib_execute = &r600_ring_ib_execute,
1083 .emit_fence = &r600_fence_ring_emit,
1084 .emit_semaphore = &r600_semaphore_ring_emit,
1085 .cs_parse = &r600_cs_parse,
1086 .ring_test = &r600_ring_test,
1087 .ib_test = &r600_ib_test,
1088 .is_lockup = &r600_gfx_is_lockup,
1089 .get_rptr = &radeon_ring_generic_get_rptr,
1090 .get_wptr = &radeon_ring_generic_get_wptr,
1091 .set_wptr = &radeon_ring_generic_set_wptr,
1092 },
1093 [R600_RING_TYPE_DMA_INDEX] = {
1094 .ib_execute = &r600_dma_ring_ib_execute,
1095 .emit_fence = &r600_dma_fence_ring_emit,
1096 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1097 .cs_parse = &r600_dma_cs_parse,
1098 .ring_test = &r600_dma_ring_test,
1099 .ib_test = &r600_dma_ib_test,
1100 .is_lockup = &r600_dma_is_lockup,
1101 .get_rptr = &radeon_ring_generic_get_rptr,
1102 .get_wptr = &radeon_ring_generic_get_wptr,
1103 .set_wptr = &radeon_ring_generic_set_wptr,
1104 }
1105 },
1106 .irq = {
1107 .set = &r600_irq_set,
1108 .process = &r600_irq_process,
1109 },
1110 .display = {
1111 .bandwidth_update = &rv515_bandwidth_update,
1112 .get_vblank_counter = &rs600_get_vblank_counter,
1113 .wait_for_vblank = &avivo_wait_for_vblank,
1114 .set_backlight_level = &atombios_set_backlight_level,
1115 .get_backlight_level = &atombios_get_backlight_level,
1116 },
1117 .copy = {
1118 .blit = &r600_copy_blit,
1119 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1120 .dma = &r600_copy_dma,
1121 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1122 .copy = &r600_copy_dma,
1123 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1124 },
1125 .surface = {
1126 .set_reg = r600_set_surface_reg,
1127 .clear_reg = r600_clear_surface_reg,
1128 },
1129 .hpd = {
1130 .init = &r600_hpd_init,
1131 .fini = &r600_hpd_fini,
1132 .sense = &r600_hpd_sense,
1133 .set_polarity = &r600_hpd_set_polarity,
1134 },
1135 .pm = {
1136 .misc = &r600_pm_misc,
1137 .prepare = &rs600_pm_prepare,
1138 .finish = &rs600_pm_finish,
1139 .init_profile = &r600_pm_init_profile,
1140 .get_dynpm_state = &r600_pm_get_dynpm_state,
1141 .get_engine_clock = &radeon_atom_get_engine_clock,
1142 .set_engine_clock = &radeon_atom_set_engine_clock,
1143 .get_memory_clock = &radeon_atom_get_memory_clock,
1144 .set_memory_clock = &radeon_atom_set_memory_clock,
1145 .get_pcie_lanes = &r600_get_pcie_lanes,
1146 .set_pcie_lanes = &r600_set_pcie_lanes,
1147 .set_clock_gating = NULL,
1148 .get_temperature = &rv6xx_get_temp,
1149 },
1150 .dpm = {
1151 .init = &rv6xx_dpm_init,
1152 .setup_asic = &rv6xx_setup_asic,
1153 .enable = &rv6xx_dpm_enable,
1154 .disable = &rv6xx_dpm_disable,
1155 .pre_set_power_state = &r600_dpm_pre_set_power_state,
1156 .set_power_state = &rv6xx_dpm_set_power_state,
1157 .post_set_power_state = &r600_dpm_post_set_power_state,
1158 .display_configuration_changed = &rv6xx_dpm_display_configuration_changed,
1159 .fini = &rv6xx_dpm_fini,
1160 .get_sclk = &rv6xx_dpm_get_sclk,
1161 .get_mclk = &rv6xx_dpm_get_mclk,
1162 .print_power_state = &rv6xx_dpm_print_power_state,
1163 .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level,
1015 }, 1164 },
1016 .pflip = { 1165 .pflip = {
1017 .pre_page_flip = &rs600_pre_page_flip, 1166 .pre_page_flip = &rs600_pre_page_flip,
@@ -1045,6 +1194,9 @@ static struct radeon_asic rs780_asic = {
1045 .ring_test = &r600_ring_test, 1194 .ring_test = &r600_ring_test,
1046 .ib_test = &r600_ib_test, 1195 .ib_test = &r600_ib_test,
1047 .is_lockup = &r600_gfx_is_lockup, 1196 .is_lockup = &r600_gfx_is_lockup,
1197 .get_rptr = &radeon_ring_generic_get_rptr,
1198 .get_wptr = &radeon_ring_generic_get_wptr,
1199 .set_wptr = &radeon_ring_generic_set_wptr,
1048 }, 1200 },
1049 [R600_RING_TYPE_DMA_INDEX] = { 1201 [R600_RING_TYPE_DMA_INDEX] = {
1050 .ib_execute = &r600_dma_ring_ib_execute, 1202 .ib_execute = &r600_dma_ring_ib_execute,
@@ -1054,6 +1206,9 @@ static struct radeon_asic rs780_asic = {
1054 .ring_test = &r600_dma_ring_test, 1206 .ring_test = &r600_dma_ring_test,
1055 .ib_test = &r600_dma_ib_test, 1207 .ib_test = &r600_dma_ib_test,
1056 .is_lockup = &r600_dma_is_lockup, 1208 .is_lockup = &r600_dma_is_lockup,
1209 .get_rptr = &radeon_ring_generic_get_rptr,
1210 .get_wptr = &radeon_ring_generic_get_wptr,
1211 .set_wptr = &radeon_ring_generic_set_wptr,
1057 } 1212 }
1058 }, 1213 },
1059 .irq = { 1214 .irq = {
@@ -1100,6 +1255,21 @@ static struct radeon_asic rs780_asic = {
1100 .get_pcie_lanes = NULL, 1255 .get_pcie_lanes = NULL,
1101 .set_pcie_lanes = NULL, 1256 .set_pcie_lanes = NULL,
1102 .set_clock_gating = NULL, 1257 .set_clock_gating = NULL,
1258 .get_temperature = &rv6xx_get_temp,
1259 },
1260 .dpm = {
1261 .init = &rs780_dpm_init,
1262 .setup_asic = &rs780_dpm_setup_asic,
1263 .enable = &rs780_dpm_enable,
1264 .disable = &rs780_dpm_disable,
1265 .pre_set_power_state = &r600_dpm_pre_set_power_state,
1266 .set_power_state = &rs780_dpm_set_power_state,
1267 .post_set_power_state = &r600_dpm_post_set_power_state,
1268 .display_configuration_changed = &rs780_dpm_display_configuration_changed,
1269 .fini = &rs780_dpm_fini,
1270 .get_sclk = &rs780_dpm_get_sclk,
1271 .get_mclk = &rs780_dpm_get_mclk,
1272 .print_power_state = &rs780_dpm_print_power_state,
1103 }, 1273 },
1104 .pflip = { 1274 .pflip = {
1105 .pre_page_flip = &rs600_pre_page_flip, 1275 .pre_page_flip = &rs600_pre_page_flip,
@@ -1133,6 +1303,9 @@ static struct radeon_asic rv770_asic = {
1133 .ring_test = &r600_ring_test, 1303 .ring_test = &r600_ring_test,
1134 .ib_test = &r600_ib_test, 1304 .ib_test = &r600_ib_test,
1135 .is_lockup = &r600_gfx_is_lockup, 1305 .is_lockup = &r600_gfx_is_lockup,
1306 .get_rptr = &radeon_ring_generic_get_rptr,
1307 .get_wptr = &radeon_ring_generic_get_wptr,
1308 .set_wptr = &radeon_ring_generic_set_wptr,
1136 }, 1309 },
1137 [R600_RING_TYPE_DMA_INDEX] = { 1310 [R600_RING_TYPE_DMA_INDEX] = {
1138 .ib_execute = &r600_dma_ring_ib_execute, 1311 .ib_execute = &r600_dma_ring_ib_execute,
@@ -1142,6 +1315,9 @@ static struct radeon_asic rv770_asic = {
1142 .ring_test = &r600_dma_ring_test, 1315 .ring_test = &r600_dma_ring_test,
1143 .ib_test = &r600_dma_ib_test, 1316 .ib_test = &r600_dma_ib_test,
1144 .is_lockup = &r600_dma_is_lockup, 1317 .is_lockup = &r600_dma_is_lockup,
1318 .get_rptr = &radeon_ring_generic_get_rptr,
1319 .get_wptr = &radeon_ring_generic_get_wptr,
1320 .set_wptr = &radeon_ring_generic_set_wptr,
1145 }, 1321 },
1146 [R600_RING_TYPE_UVD_INDEX] = { 1322 [R600_RING_TYPE_UVD_INDEX] = {
1147 .ib_execute = &r600_uvd_ib_execute, 1323 .ib_execute = &r600_uvd_ib_execute,
@@ -1151,6 +1327,9 @@ static struct radeon_asic rv770_asic = {
1151 .ring_test = &r600_uvd_ring_test, 1327 .ring_test = &r600_uvd_ring_test,
1152 .ib_test = &r600_uvd_ib_test, 1328 .ib_test = &r600_uvd_ib_test,
1153 .is_lockup = &radeon_ring_test_lockup, 1329 .is_lockup = &radeon_ring_test_lockup,
1330 .get_rptr = &radeon_ring_generic_get_rptr,
1331 .get_wptr = &radeon_ring_generic_get_wptr,
1332 .set_wptr = &radeon_ring_generic_set_wptr,
1154 } 1333 }
1155 }, 1334 },
1156 .irq = { 1335 .irq = {
@@ -1198,6 +1377,22 @@ static struct radeon_asic rv770_asic = {
1198 .set_pcie_lanes = &r600_set_pcie_lanes, 1377 .set_pcie_lanes = &r600_set_pcie_lanes,
1199 .set_clock_gating = &radeon_atom_set_clock_gating, 1378 .set_clock_gating = &radeon_atom_set_clock_gating,
1200 .set_uvd_clocks = &rv770_set_uvd_clocks, 1379 .set_uvd_clocks = &rv770_set_uvd_clocks,
1380 .get_temperature = &rv770_get_temp,
1381 },
1382 .dpm = {
1383 .init = &rv770_dpm_init,
1384 .setup_asic = &rv770_dpm_setup_asic,
1385 .enable = &rv770_dpm_enable,
1386 .disable = &rv770_dpm_disable,
1387 .pre_set_power_state = &r600_dpm_pre_set_power_state,
1388 .set_power_state = &rv770_dpm_set_power_state,
1389 .post_set_power_state = &r600_dpm_post_set_power_state,
1390 .display_configuration_changed = &rv770_dpm_display_configuration_changed,
1391 .fini = &rv770_dpm_fini,
1392 .get_sclk = &rv770_dpm_get_sclk,
1393 .get_mclk = &rv770_dpm_get_mclk,
1394 .print_power_state = &rv770_dpm_print_power_state,
1395 .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
1201 }, 1396 },
1202 .pflip = { 1397 .pflip = {
1203 .pre_page_flip = &rs600_pre_page_flip, 1398 .pre_page_flip = &rs600_pre_page_flip,
@@ -1231,6 +1426,9 @@ static struct radeon_asic evergreen_asic = {
1231 .ring_test = &r600_ring_test, 1426 .ring_test = &r600_ring_test,
1232 .ib_test = &r600_ib_test, 1427 .ib_test = &r600_ib_test,
1233 .is_lockup = &evergreen_gfx_is_lockup, 1428 .is_lockup = &evergreen_gfx_is_lockup,
1429 .get_rptr = &radeon_ring_generic_get_rptr,
1430 .get_wptr = &radeon_ring_generic_get_wptr,
1431 .set_wptr = &radeon_ring_generic_set_wptr,
1234 }, 1432 },
1235 [R600_RING_TYPE_DMA_INDEX] = { 1433 [R600_RING_TYPE_DMA_INDEX] = {
1236 .ib_execute = &evergreen_dma_ring_ib_execute, 1434 .ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1240,6 +1438,9 @@ static struct radeon_asic evergreen_asic = {
1240 .ring_test = &r600_dma_ring_test, 1438 .ring_test = &r600_dma_ring_test,
1241 .ib_test = &r600_dma_ib_test, 1439 .ib_test = &r600_dma_ib_test,
1242 .is_lockup = &evergreen_dma_is_lockup, 1440 .is_lockup = &evergreen_dma_is_lockup,
1441 .get_rptr = &radeon_ring_generic_get_rptr,
1442 .get_wptr = &radeon_ring_generic_get_wptr,
1443 .set_wptr = &radeon_ring_generic_set_wptr,
1243 }, 1444 },
1244 [R600_RING_TYPE_UVD_INDEX] = { 1445 [R600_RING_TYPE_UVD_INDEX] = {
1245 .ib_execute = &r600_uvd_ib_execute, 1446 .ib_execute = &r600_uvd_ib_execute,
@@ -1249,6 +1450,9 @@ static struct radeon_asic evergreen_asic = {
1249 .ring_test = &r600_uvd_ring_test, 1450 .ring_test = &r600_uvd_ring_test,
1250 .ib_test = &r600_uvd_ib_test, 1451 .ib_test = &r600_uvd_ib_test,
1251 .is_lockup = &radeon_ring_test_lockup, 1452 .is_lockup = &radeon_ring_test_lockup,
1453 .get_rptr = &radeon_ring_generic_get_rptr,
1454 .get_wptr = &radeon_ring_generic_get_wptr,
1455 .set_wptr = &radeon_ring_generic_set_wptr,
1252 } 1456 }
1253 }, 1457 },
1254 .irq = { 1458 .irq = {
@@ -1296,6 +1500,22 @@ static struct radeon_asic evergreen_asic = {
1296 .set_pcie_lanes = &r600_set_pcie_lanes, 1500 .set_pcie_lanes = &r600_set_pcie_lanes,
1297 .set_clock_gating = NULL, 1501 .set_clock_gating = NULL,
1298 .set_uvd_clocks = &evergreen_set_uvd_clocks, 1502 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1503 .get_temperature = &evergreen_get_temp,
1504 },
1505 .dpm = {
1506 .init = &cypress_dpm_init,
1507 .setup_asic = &cypress_dpm_setup_asic,
1508 .enable = &cypress_dpm_enable,
1509 .disable = &cypress_dpm_disable,
1510 .pre_set_power_state = &r600_dpm_pre_set_power_state,
1511 .set_power_state = &cypress_dpm_set_power_state,
1512 .post_set_power_state = &r600_dpm_post_set_power_state,
1513 .display_configuration_changed = &cypress_dpm_display_configuration_changed,
1514 .fini = &cypress_dpm_fini,
1515 .get_sclk = &rv770_dpm_get_sclk,
1516 .get_mclk = &rv770_dpm_get_mclk,
1517 .print_power_state = &rv770_dpm_print_power_state,
1518 .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
1299 }, 1519 },
1300 .pflip = { 1520 .pflip = {
1301 .pre_page_flip = &evergreen_pre_page_flip, 1521 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1329,6 +1549,9 @@ static struct radeon_asic sumo_asic = {
1329 .ring_test = &r600_ring_test, 1549 .ring_test = &r600_ring_test,
1330 .ib_test = &r600_ib_test, 1550 .ib_test = &r600_ib_test,
1331 .is_lockup = &evergreen_gfx_is_lockup, 1551 .is_lockup = &evergreen_gfx_is_lockup,
1552 .get_rptr = &radeon_ring_generic_get_rptr,
1553 .get_wptr = &radeon_ring_generic_get_wptr,
1554 .set_wptr = &radeon_ring_generic_set_wptr,
1332 }, 1555 },
1333 [R600_RING_TYPE_DMA_INDEX] = { 1556 [R600_RING_TYPE_DMA_INDEX] = {
1334 .ib_execute = &evergreen_dma_ring_ib_execute, 1557 .ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1338,6 +1561,9 @@ static struct radeon_asic sumo_asic = {
1338 .ring_test = &r600_dma_ring_test, 1561 .ring_test = &r600_dma_ring_test,
1339 .ib_test = &r600_dma_ib_test, 1562 .ib_test = &r600_dma_ib_test,
1340 .is_lockup = &evergreen_dma_is_lockup, 1563 .is_lockup = &evergreen_dma_is_lockup,
1564 .get_rptr = &radeon_ring_generic_get_rptr,
1565 .get_wptr = &radeon_ring_generic_get_wptr,
1566 .set_wptr = &radeon_ring_generic_set_wptr,
1341 }, 1567 },
1342 [R600_RING_TYPE_UVD_INDEX] = { 1568 [R600_RING_TYPE_UVD_INDEX] = {
1343 .ib_execute = &r600_uvd_ib_execute, 1569 .ib_execute = &r600_uvd_ib_execute,
@@ -1347,6 +1573,9 @@ static struct radeon_asic sumo_asic = {
1347 .ring_test = &r600_uvd_ring_test, 1573 .ring_test = &r600_uvd_ring_test,
1348 .ib_test = &r600_uvd_ib_test, 1574 .ib_test = &r600_uvd_ib_test,
1349 .is_lockup = &radeon_ring_test_lockup, 1575 .is_lockup = &radeon_ring_test_lockup,
1576 .get_rptr = &radeon_ring_generic_get_rptr,
1577 .get_wptr = &radeon_ring_generic_get_wptr,
1578 .set_wptr = &radeon_ring_generic_set_wptr,
1350 } 1579 }
1351 }, 1580 },
1352 .irq = { 1581 .irq = {
@@ -1394,6 +1623,22 @@ static struct radeon_asic sumo_asic = {
1394 .set_pcie_lanes = NULL, 1623 .set_pcie_lanes = NULL,
1395 .set_clock_gating = NULL, 1624 .set_clock_gating = NULL,
1396 .set_uvd_clocks = &sumo_set_uvd_clocks, 1625 .set_uvd_clocks = &sumo_set_uvd_clocks,
1626 .get_temperature = &sumo_get_temp,
1627 },
1628 .dpm = {
1629 .init = &sumo_dpm_init,
1630 .setup_asic = &sumo_dpm_setup_asic,
1631 .enable = &sumo_dpm_enable,
1632 .disable = &sumo_dpm_disable,
1633 .pre_set_power_state = &sumo_dpm_pre_set_power_state,
1634 .set_power_state = &sumo_dpm_set_power_state,
1635 .post_set_power_state = &sumo_dpm_post_set_power_state,
1636 .display_configuration_changed = &sumo_dpm_display_configuration_changed,
1637 .fini = &sumo_dpm_fini,
1638 .get_sclk = &sumo_dpm_get_sclk,
1639 .get_mclk = &sumo_dpm_get_mclk,
1640 .print_power_state = &sumo_dpm_print_power_state,
1641 .debugfs_print_current_performance_level = &sumo_dpm_debugfs_print_current_performance_level,
1397 }, 1642 },
1398 .pflip = { 1643 .pflip = {
1399 .pre_page_flip = &evergreen_pre_page_flip, 1644 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1427,6 +1672,9 @@ static struct radeon_asic btc_asic = {
1427 .ring_test = &r600_ring_test, 1672 .ring_test = &r600_ring_test,
1428 .ib_test = &r600_ib_test, 1673 .ib_test = &r600_ib_test,
1429 .is_lockup = &evergreen_gfx_is_lockup, 1674 .is_lockup = &evergreen_gfx_is_lockup,
1675 .get_rptr = &radeon_ring_generic_get_rptr,
1676 .get_wptr = &radeon_ring_generic_get_wptr,
1677 .set_wptr = &radeon_ring_generic_set_wptr,
1430 }, 1678 },
1431 [R600_RING_TYPE_DMA_INDEX] = { 1679 [R600_RING_TYPE_DMA_INDEX] = {
1432 .ib_execute = &evergreen_dma_ring_ib_execute, 1680 .ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1436,6 +1684,9 @@ static struct radeon_asic btc_asic = {
1436 .ring_test = &r600_dma_ring_test, 1684 .ring_test = &r600_dma_ring_test,
1437 .ib_test = &r600_dma_ib_test, 1685 .ib_test = &r600_dma_ib_test,
1438 .is_lockup = &evergreen_dma_is_lockup, 1686 .is_lockup = &evergreen_dma_is_lockup,
1687 .get_rptr = &radeon_ring_generic_get_rptr,
1688 .get_wptr = &radeon_ring_generic_get_wptr,
1689 .set_wptr = &radeon_ring_generic_set_wptr,
1439 }, 1690 },
1440 [R600_RING_TYPE_UVD_INDEX] = { 1691 [R600_RING_TYPE_UVD_INDEX] = {
1441 .ib_execute = &r600_uvd_ib_execute, 1692 .ib_execute = &r600_uvd_ib_execute,
@@ -1445,6 +1696,9 @@ static struct radeon_asic btc_asic = {
1445 .ring_test = &r600_uvd_ring_test, 1696 .ring_test = &r600_uvd_ring_test,
1446 .ib_test = &r600_uvd_ib_test, 1697 .ib_test = &r600_uvd_ib_test,
1447 .is_lockup = &radeon_ring_test_lockup, 1698 .is_lockup = &radeon_ring_test_lockup,
1699 .get_rptr = &radeon_ring_generic_get_rptr,
1700 .get_wptr = &radeon_ring_generic_get_wptr,
1701 .set_wptr = &radeon_ring_generic_set_wptr,
1448 } 1702 }
1449 }, 1703 },
1450 .irq = { 1704 .irq = {
@@ -1492,6 +1746,22 @@ static struct radeon_asic btc_asic = {
1492 .set_pcie_lanes = &r600_set_pcie_lanes, 1746 .set_pcie_lanes = &r600_set_pcie_lanes,
1493 .set_clock_gating = NULL, 1747 .set_clock_gating = NULL,
1494 .set_uvd_clocks = &evergreen_set_uvd_clocks, 1748 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1749 .get_temperature = &evergreen_get_temp,
1750 },
1751 .dpm = {
1752 .init = &btc_dpm_init,
1753 .setup_asic = &btc_dpm_setup_asic,
1754 .enable = &btc_dpm_enable,
1755 .disable = &btc_dpm_disable,
1756 .pre_set_power_state = &btc_dpm_pre_set_power_state,
1757 .set_power_state = &btc_dpm_set_power_state,
1758 .post_set_power_state = &btc_dpm_post_set_power_state,
1759 .display_configuration_changed = &cypress_dpm_display_configuration_changed,
1760 .fini = &btc_dpm_fini,
1761 .get_sclk = &btc_dpm_get_sclk,
1762 .get_mclk = &btc_dpm_get_mclk,
1763 .print_power_state = &rv770_dpm_print_power_state,
1764 .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
1495 }, 1765 },
1496 .pflip = { 1766 .pflip = {
1497 .pre_page_flip = &evergreen_pre_page_flip, 1767 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1533,6 +1803,9 @@ static struct radeon_asic cayman_asic = {
1533 .ib_test = &r600_ib_test, 1803 .ib_test = &r600_ib_test,
1534 .is_lockup = &cayman_gfx_is_lockup, 1804 .is_lockup = &cayman_gfx_is_lockup,
1535 .vm_flush = &cayman_vm_flush, 1805 .vm_flush = &cayman_vm_flush,
1806 .get_rptr = &radeon_ring_generic_get_rptr,
1807 .get_wptr = &radeon_ring_generic_get_wptr,
1808 .set_wptr = &radeon_ring_generic_set_wptr,
1536 }, 1809 },
1537 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1810 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1538 .ib_execute = &cayman_ring_ib_execute, 1811 .ib_execute = &cayman_ring_ib_execute,
@@ -1544,6 +1817,9 @@ static struct radeon_asic cayman_asic = {
1544 .ib_test = &r600_ib_test, 1817 .ib_test = &r600_ib_test,
1545 .is_lockup = &cayman_gfx_is_lockup, 1818 .is_lockup = &cayman_gfx_is_lockup,
1546 .vm_flush = &cayman_vm_flush, 1819 .vm_flush = &cayman_vm_flush,
1820 .get_rptr = &radeon_ring_generic_get_rptr,
1821 .get_wptr = &radeon_ring_generic_get_wptr,
1822 .set_wptr = &radeon_ring_generic_set_wptr,
1547 }, 1823 },
1548 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1824 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1549 .ib_execute = &cayman_ring_ib_execute, 1825 .ib_execute = &cayman_ring_ib_execute,
@@ -1555,6 +1831,9 @@ static struct radeon_asic cayman_asic = {
1555 .ib_test = &r600_ib_test, 1831 .ib_test = &r600_ib_test,
1556 .is_lockup = &cayman_gfx_is_lockup, 1832 .is_lockup = &cayman_gfx_is_lockup,
1557 .vm_flush = &cayman_vm_flush, 1833 .vm_flush = &cayman_vm_flush,
1834 .get_rptr = &radeon_ring_generic_get_rptr,
1835 .get_wptr = &radeon_ring_generic_get_wptr,
1836 .set_wptr = &radeon_ring_generic_set_wptr,
1558 }, 1837 },
1559 [R600_RING_TYPE_DMA_INDEX] = { 1838 [R600_RING_TYPE_DMA_INDEX] = {
1560 .ib_execute = &cayman_dma_ring_ib_execute, 1839 .ib_execute = &cayman_dma_ring_ib_execute,
@@ -1566,6 +1845,9 @@ static struct radeon_asic cayman_asic = {
1566 .ib_test = &r600_dma_ib_test, 1845 .ib_test = &r600_dma_ib_test,
1567 .is_lockup = &cayman_dma_is_lockup, 1846 .is_lockup = &cayman_dma_is_lockup,
1568 .vm_flush = &cayman_dma_vm_flush, 1847 .vm_flush = &cayman_dma_vm_flush,
1848 .get_rptr = &radeon_ring_generic_get_rptr,
1849 .get_wptr = &radeon_ring_generic_get_wptr,
1850 .set_wptr = &radeon_ring_generic_set_wptr,
1569 }, 1851 },
1570 [CAYMAN_RING_TYPE_DMA1_INDEX] = { 1852 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
1571 .ib_execute = &cayman_dma_ring_ib_execute, 1853 .ib_execute = &cayman_dma_ring_ib_execute,
@@ -1577,6 +1859,9 @@ static struct radeon_asic cayman_asic = {
1577 .ib_test = &r600_dma_ib_test, 1859 .ib_test = &r600_dma_ib_test,
1578 .is_lockup = &cayman_dma_is_lockup, 1860 .is_lockup = &cayman_dma_is_lockup,
1579 .vm_flush = &cayman_dma_vm_flush, 1861 .vm_flush = &cayman_dma_vm_flush,
1862 .get_rptr = &radeon_ring_generic_get_rptr,
1863 .get_wptr = &radeon_ring_generic_get_wptr,
1864 .set_wptr = &radeon_ring_generic_set_wptr,
1580 }, 1865 },
1581 [R600_RING_TYPE_UVD_INDEX] = { 1866 [R600_RING_TYPE_UVD_INDEX] = {
1582 .ib_execute = &r600_uvd_ib_execute, 1867 .ib_execute = &r600_uvd_ib_execute,
@@ -1586,6 +1871,9 @@ static struct radeon_asic cayman_asic = {
1586 .ring_test = &r600_uvd_ring_test, 1871 .ring_test = &r600_uvd_ring_test,
1587 .ib_test = &r600_uvd_ib_test, 1872 .ib_test = &r600_uvd_ib_test,
1588 .is_lockup = &radeon_ring_test_lockup, 1873 .is_lockup = &radeon_ring_test_lockup,
1874 .get_rptr = &radeon_ring_generic_get_rptr,
1875 .get_wptr = &radeon_ring_generic_get_wptr,
1876 .set_wptr = &radeon_ring_generic_set_wptr,
1589 } 1877 }
1590 }, 1878 },
1591 .irq = { 1879 .irq = {
@@ -1633,6 +1921,22 @@ static struct radeon_asic cayman_asic = {
1633 .set_pcie_lanes = &r600_set_pcie_lanes, 1921 .set_pcie_lanes = &r600_set_pcie_lanes,
1634 .set_clock_gating = NULL, 1922 .set_clock_gating = NULL,
1635 .set_uvd_clocks = &evergreen_set_uvd_clocks, 1923 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1924 .get_temperature = &evergreen_get_temp,
1925 },
1926 .dpm = {
1927 .init = &ni_dpm_init,
1928 .setup_asic = &ni_dpm_setup_asic,
1929 .enable = &ni_dpm_enable,
1930 .disable = &ni_dpm_disable,
1931 .pre_set_power_state = &ni_dpm_pre_set_power_state,
1932 .set_power_state = &ni_dpm_set_power_state,
1933 .post_set_power_state = &ni_dpm_post_set_power_state,
1934 .display_configuration_changed = &cypress_dpm_display_configuration_changed,
1935 .fini = &ni_dpm_fini,
1936 .get_sclk = &ni_dpm_get_sclk,
1937 .get_mclk = &ni_dpm_get_mclk,
1938 .print_power_state = &ni_dpm_print_power_state,
1939 .debugfs_print_current_performance_level = &ni_dpm_debugfs_print_current_performance_level,
1636 }, 1940 },
1637 .pflip = { 1941 .pflip = {
1638 .pre_page_flip = &evergreen_pre_page_flip, 1942 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1674,6 +1978,9 @@ static struct radeon_asic trinity_asic = {
1674 .ib_test = &r600_ib_test, 1978 .ib_test = &r600_ib_test,
1675 .is_lockup = &cayman_gfx_is_lockup, 1979 .is_lockup = &cayman_gfx_is_lockup,
1676 .vm_flush = &cayman_vm_flush, 1980 .vm_flush = &cayman_vm_flush,
1981 .get_rptr = &radeon_ring_generic_get_rptr,
1982 .get_wptr = &radeon_ring_generic_get_wptr,
1983 .set_wptr = &radeon_ring_generic_set_wptr,
1677 }, 1984 },
1678 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1985 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1679 .ib_execute = &cayman_ring_ib_execute, 1986 .ib_execute = &cayman_ring_ib_execute,
@@ -1685,6 +1992,9 @@ static struct radeon_asic trinity_asic = {
1685 .ib_test = &r600_ib_test, 1992 .ib_test = &r600_ib_test,
1686 .is_lockup = &cayman_gfx_is_lockup, 1993 .is_lockup = &cayman_gfx_is_lockup,
1687 .vm_flush = &cayman_vm_flush, 1994 .vm_flush = &cayman_vm_flush,
1995 .get_rptr = &radeon_ring_generic_get_rptr,
1996 .get_wptr = &radeon_ring_generic_get_wptr,
1997 .set_wptr = &radeon_ring_generic_set_wptr,
1688 }, 1998 },
1689 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1999 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1690 .ib_execute = &cayman_ring_ib_execute, 2000 .ib_execute = &cayman_ring_ib_execute,
@@ -1696,6 +2006,9 @@ static struct radeon_asic trinity_asic = {
1696 .ib_test = &r600_ib_test, 2006 .ib_test = &r600_ib_test,
1697 .is_lockup = &cayman_gfx_is_lockup, 2007 .is_lockup = &cayman_gfx_is_lockup,
1698 .vm_flush = &cayman_vm_flush, 2008 .vm_flush = &cayman_vm_flush,
2009 .get_rptr = &radeon_ring_generic_get_rptr,
2010 .get_wptr = &radeon_ring_generic_get_wptr,
2011 .set_wptr = &radeon_ring_generic_set_wptr,
1699 }, 2012 },
1700 [R600_RING_TYPE_DMA_INDEX] = { 2013 [R600_RING_TYPE_DMA_INDEX] = {
1701 .ib_execute = &cayman_dma_ring_ib_execute, 2014 .ib_execute = &cayman_dma_ring_ib_execute,
@@ -1707,6 +2020,9 @@ static struct radeon_asic trinity_asic = {
1707 .ib_test = &r600_dma_ib_test, 2020 .ib_test = &r600_dma_ib_test,
1708 .is_lockup = &cayman_dma_is_lockup, 2021 .is_lockup = &cayman_dma_is_lockup,
1709 .vm_flush = &cayman_dma_vm_flush, 2022 .vm_flush = &cayman_dma_vm_flush,
2023 .get_rptr = &radeon_ring_generic_get_rptr,
2024 .get_wptr = &radeon_ring_generic_get_wptr,
2025 .set_wptr = &radeon_ring_generic_set_wptr,
1710 }, 2026 },
1711 [CAYMAN_RING_TYPE_DMA1_INDEX] = { 2027 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
1712 .ib_execute = &cayman_dma_ring_ib_execute, 2028 .ib_execute = &cayman_dma_ring_ib_execute,
@@ -1718,6 +2034,9 @@ static struct radeon_asic trinity_asic = {
1718 .ib_test = &r600_dma_ib_test, 2034 .ib_test = &r600_dma_ib_test,
1719 .is_lockup = &cayman_dma_is_lockup, 2035 .is_lockup = &cayman_dma_is_lockup,
1720 .vm_flush = &cayman_dma_vm_flush, 2036 .vm_flush = &cayman_dma_vm_flush,
2037 .get_rptr = &radeon_ring_generic_get_rptr,
2038 .get_wptr = &radeon_ring_generic_get_wptr,
2039 .set_wptr = &radeon_ring_generic_set_wptr,
1721 }, 2040 },
1722 [R600_RING_TYPE_UVD_INDEX] = { 2041 [R600_RING_TYPE_UVD_INDEX] = {
1723 .ib_execute = &r600_uvd_ib_execute, 2042 .ib_execute = &r600_uvd_ib_execute,
@@ -1727,6 +2046,9 @@ static struct radeon_asic trinity_asic = {
1727 .ring_test = &r600_uvd_ring_test, 2046 .ring_test = &r600_uvd_ring_test,
1728 .ib_test = &r600_uvd_ib_test, 2047 .ib_test = &r600_uvd_ib_test,
1729 .is_lockup = &radeon_ring_test_lockup, 2048 .is_lockup = &radeon_ring_test_lockup,
2049 .get_rptr = &radeon_ring_generic_get_rptr,
2050 .get_wptr = &radeon_ring_generic_get_wptr,
2051 .set_wptr = &radeon_ring_generic_set_wptr,
1730 } 2052 }
1731 }, 2053 },
1732 .irq = { 2054 .irq = {
@@ -1772,6 +2094,22 @@ static struct radeon_asic trinity_asic = {
1772 .set_pcie_lanes = NULL, 2094 .set_pcie_lanes = NULL,
1773 .set_clock_gating = NULL, 2095 .set_clock_gating = NULL,
1774 .set_uvd_clocks = &sumo_set_uvd_clocks, 2096 .set_uvd_clocks = &sumo_set_uvd_clocks,
2097 .get_temperature = &tn_get_temp,
2098 },
2099 .dpm = {
2100 .init = &trinity_dpm_init,
2101 .setup_asic = &trinity_dpm_setup_asic,
2102 .enable = &trinity_dpm_enable,
2103 .disable = &trinity_dpm_disable,
2104 .pre_set_power_state = &trinity_dpm_pre_set_power_state,
2105 .set_power_state = &trinity_dpm_set_power_state,
2106 .post_set_power_state = &trinity_dpm_post_set_power_state,
2107 .display_configuration_changed = &trinity_dpm_display_configuration_changed,
2108 .fini = &trinity_dpm_fini,
2109 .get_sclk = &trinity_dpm_get_sclk,
2110 .get_mclk = &trinity_dpm_get_mclk,
2111 .print_power_state = &trinity_dpm_print_power_state,
2112 .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
1775 }, 2113 },
1776 .pflip = { 2114 .pflip = {
1777 .pre_page_flip = &evergreen_pre_page_flip, 2115 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1813,6 +2151,9 @@ static struct radeon_asic si_asic = {
1813 .ib_test = &r600_ib_test, 2151 .ib_test = &r600_ib_test,
1814 .is_lockup = &si_gfx_is_lockup, 2152 .is_lockup = &si_gfx_is_lockup,
1815 .vm_flush = &si_vm_flush, 2153 .vm_flush = &si_vm_flush,
2154 .get_rptr = &radeon_ring_generic_get_rptr,
2155 .get_wptr = &radeon_ring_generic_get_wptr,
2156 .set_wptr = &radeon_ring_generic_set_wptr,
1816 }, 2157 },
1817 [CAYMAN_RING_TYPE_CP1_INDEX] = { 2158 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1818 .ib_execute = &si_ring_ib_execute, 2159 .ib_execute = &si_ring_ib_execute,
@@ -1824,6 +2165,9 @@ static struct radeon_asic si_asic = {
1824 .ib_test = &r600_ib_test, 2165 .ib_test = &r600_ib_test,
1825 .is_lockup = &si_gfx_is_lockup, 2166 .is_lockup = &si_gfx_is_lockup,
1826 .vm_flush = &si_vm_flush, 2167 .vm_flush = &si_vm_flush,
2168 .get_rptr = &radeon_ring_generic_get_rptr,
2169 .get_wptr = &radeon_ring_generic_get_wptr,
2170 .set_wptr = &radeon_ring_generic_set_wptr,
1827 }, 2171 },
1828 [CAYMAN_RING_TYPE_CP2_INDEX] = { 2172 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1829 .ib_execute = &si_ring_ib_execute, 2173 .ib_execute = &si_ring_ib_execute,
@@ -1835,6 +2179,9 @@ static struct radeon_asic si_asic = {
1835 .ib_test = &r600_ib_test, 2179 .ib_test = &r600_ib_test,
1836 .is_lockup = &si_gfx_is_lockup, 2180 .is_lockup = &si_gfx_is_lockup,
1837 .vm_flush = &si_vm_flush, 2181 .vm_flush = &si_vm_flush,
2182 .get_rptr = &radeon_ring_generic_get_rptr,
2183 .get_wptr = &radeon_ring_generic_get_wptr,
2184 .set_wptr = &radeon_ring_generic_set_wptr,
1838 }, 2185 },
1839 [R600_RING_TYPE_DMA_INDEX] = { 2186 [R600_RING_TYPE_DMA_INDEX] = {
1840 .ib_execute = &cayman_dma_ring_ib_execute, 2187 .ib_execute = &cayman_dma_ring_ib_execute,
@@ -1846,6 +2193,9 @@ static struct radeon_asic si_asic = {
1846 .ib_test = &r600_dma_ib_test, 2193 .ib_test = &r600_dma_ib_test,
1847 .is_lockup = &si_dma_is_lockup, 2194 .is_lockup = &si_dma_is_lockup,
1848 .vm_flush = &si_dma_vm_flush, 2195 .vm_flush = &si_dma_vm_flush,
2196 .get_rptr = &radeon_ring_generic_get_rptr,
2197 .get_wptr = &radeon_ring_generic_get_wptr,
2198 .set_wptr = &radeon_ring_generic_set_wptr,
1849 }, 2199 },
1850 [CAYMAN_RING_TYPE_DMA1_INDEX] = { 2200 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
1851 .ib_execute = &cayman_dma_ring_ib_execute, 2201 .ib_execute = &cayman_dma_ring_ib_execute,
@@ -1857,6 +2207,9 @@ static struct radeon_asic si_asic = {
1857 .ib_test = &r600_dma_ib_test, 2207 .ib_test = &r600_dma_ib_test,
1858 .is_lockup = &si_dma_is_lockup, 2208 .is_lockup = &si_dma_is_lockup,
1859 .vm_flush = &si_dma_vm_flush, 2209 .vm_flush = &si_dma_vm_flush,
2210 .get_rptr = &radeon_ring_generic_get_rptr,
2211 .get_wptr = &radeon_ring_generic_get_wptr,
2212 .set_wptr = &radeon_ring_generic_set_wptr,
1860 }, 2213 },
1861 [R600_RING_TYPE_UVD_INDEX] = { 2214 [R600_RING_TYPE_UVD_INDEX] = {
1862 .ib_execute = &r600_uvd_ib_execute, 2215 .ib_execute = &r600_uvd_ib_execute,
@@ -1866,6 +2219,9 @@ static struct radeon_asic si_asic = {
1866 .ring_test = &r600_uvd_ring_test, 2219 .ring_test = &r600_uvd_ring_test,
1867 .ib_test = &r600_uvd_ib_test, 2220 .ib_test = &r600_uvd_ib_test,
1868 .is_lockup = &radeon_ring_test_lockup, 2221 .is_lockup = &radeon_ring_test_lockup,
2222 .get_rptr = &radeon_ring_generic_get_rptr,
2223 .get_wptr = &radeon_ring_generic_get_wptr,
2224 .set_wptr = &radeon_ring_generic_set_wptr,
1869 } 2225 }
1870 }, 2226 },
1871 .irq = { 2227 .irq = {
@@ -1911,6 +2267,332 @@ static struct radeon_asic si_asic = {
1911 .set_pcie_lanes = &r600_set_pcie_lanes, 2267 .set_pcie_lanes = &r600_set_pcie_lanes,
1912 .set_clock_gating = NULL, 2268 .set_clock_gating = NULL,
1913 .set_uvd_clocks = &si_set_uvd_clocks, 2269 .set_uvd_clocks = &si_set_uvd_clocks,
2270 .get_temperature = &si_get_temp,
2271 },
2272 .dpm = {
2273 .init = &si_dpm_init,
2274 .setup_asic = &si_dpm_setup_asic,
2275 .enable = &si_dpm_enable,
2276 .disable = &si_dpm_disable,
2277 .pre_set_power_state = &si_dpm_pre_set_power_state,
2278 .set_power_state = &si_dpm_set_power_state,
2279 .post_set_power_state = &si_dpm_post_set_power_state,
2280 .display_configuration_changed = &si_dpm_display_configuration_changed,
2281 .fini = &si_dpm_fini,
2282 .get_sclk = &ni_dpm_get_sclk,
2283 .get_mclk = &ni_dpm_get_mclk,
2284 .print_power_state = &ni_dpm_print_power_state,
2285 .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
2286 },
2287 .pflip = {
2288 .pre_page_flip = &evergreen_pre_page_flip,
2289 .page_flip = &evergreen_page_flip,
2290 .post_page_flip = &evergreen_post_page_flip,
2291 },
2292};
2293
2294static struct radeon_asic ci_asic = {
2295 .init = &cik_init,
2296 .fini = &cik_fini,
2297 .suspend = &cik_suspend,
2298 .resume = &cik_resume,
2299 .asic_reset = &cik_asic_reset,
2300 .vga_set_state = &r600_vga_set_state,
2301 .ioctl_wait_idle = NULL,
2302 .gui_idle = &r600_gui_idle,
2303 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
2304 .get_xclk = &cik_get_xclk,
2305 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
2306 .gart = {
2307 .tlb_flush = &cik_pcie_gart_tlb_flush,
2308 .set_page = &rs600_gart_set_page,
2309 },
2310 .vm = {
2311 .init = &cik_vm_init,
2312 .fini = &cik_vm_fini,
2313 .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
2314 .set_page = &cik_vm_set_page,
2315 },
2316 .ring = {
2317 [RADEON_RING_TYPE_GFX_INDEX] = {
2318 .ib_execute = &cik_ring_ib_execute,
2319 .ib_parse = &cik_ib_parse,
2320 .emit_fence = &cik_fence_gfx_ring_emit,
2321 .emit_semaphore = &cik_semaphore_ring_emit,
2322 .cs_parse = NULL,
2323 .ring_test = &cik_ring_test,
2324 .ib_test = &cik_ib_test,
2325 .is_lockup = &cik_gfx_is_lockup,
2326 .vm_flush = &cik_vm_flush,
2327 .get_rptr = &radeon_ring_generic_get_rptr,
2328 .get_wptr = &radeon_ring_generic_get_wptr,
2329 .set_wptr = &radeon_ring_generic_set_wptr,
2330 },
2331 [CAYMAN_RING_TYPE_CP1_INDEX] = {
2332 .ib_execute = &cik_ring_ib_execute,
2333 .ib_parse = &cik_ib_parse,
2334 .emit_fence = &cik_fence_compute_ring_emit,
2335 .emit_semaphore = &cik_semaphore_ring_emit,
2336 .cs_parse = NULL,
2337 .ring_test = &cik_ring_test,
2338 .ib_test = &cik_ib_test,
2339 .is_lockup = &cik_gfx_is_lockup,
2340 .vm_flush = &cik_vm_flush,
2341 .get_rptr = &cik_compute_ring_get_rptr,
2342 .get_wptr = &cik_compute_ring_get_wptr,
2343 .set_wptr = &cik_compute_ring_set_wptr,
2344 },
2345 [CAYMAN_RING_TYPE_CP2_INDEX] = {
2346 .ib_execute = &cik_ring_ib_execute,
2347 .ib_parse = &cik_ib_parse,
2348 .emit_fence = &cik_fence_compute_ring_emit,
2349 .emit_semaphore = &cik_semaphore_ring_emit,
2350 .cs_parse = NULL,
2351 .ring_test = &cik_ring_test,
2352 .ib_test = &cik_ib_test,
2353 .is_lockup = &cik_gfx_is_lockup,
2354 .vm_flush = &cik_vm_flush,
2355 .get_rptr = &cik_compute_ring_get_rptr,
2356 .get_wptr = &cik_compute_ring_get_wptr,
2357 .set_wptr = &cik_compute_ring_set_wptr,
2358 },
2359 [R600_RING_TYPE_DMA_INDEX] = {
2360 .ib_execute = &cik_sdma_ring_ib_execute,
2361 .ib_parse = &cik_ib_parse,
2362 .emit_fence = &cik_sdma_fence_ring_emit,
2363 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2364 .cs_parse = NULL,
2365 .ring_test = &cik_sdma_ring_test,
2366 .ib_test = &cik_sdma_ib_test,
2367 .is_lockup = &cik_sdma_is_lockup,
2368 .vm_flush = &cik_dma_vm_flush,
2369 .get_rptr = &radeon_ring_generic_get_rptr,
2370 .get_wptr = &radeon_ring_generic_get_wptr,
2371 .set_wptr = &radeon_ring_generic_set_wptr,
2372 },
2373 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
2374 .ib_execute = &cik_sdma_ring_ib_execute,
2375 .ib_parse = &cik_ib_parse,
2376 .emit_fence = &cik_sdma_fence_ring_emit,
2377 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2378 .cs_parse = NULL,
2379 .ring_test = &cik_sdma_ring_test,
2380 .ib_test = &cik_sdma_ib_test,
2381 .is_lockup = &cik_sdma_is_lockup,
2382 .vm_flush = &cik_dma_vm_flush,
2383 .get_rptr = &radeon_ring_generic_get_rptr,
2384 .get_wptr = &radeon_ring_generic_get_wptr,
2385 .set_wptr = &radeon_ring_generic_set_wptr,
2386 },
2387 [R600_RING_TYPE_UVD_INDEX] = {
2388 .ib_execute = &r600_uvd_ib_execute,
2389 .emit_fence = &r600_uvd_fence_emit,
2390 .emit_semaphore = &cayman_uvd_semaphore_emit,
2391 .cs_parse = &radeon_uvd_cs_parse,
2392 .ring_test = &r600_uvd_ring_test,
2393 .ib_test = &r600_uvd_ib_test,
2394 .is_lockup = &radeon_ring_test_lockup,
2395 .get_rptr = &radeon_ring_generic_get_rptr,
2396 .get_wptr = &radeon_ring_generic_get_wptr,
2397 .set_wptr = &radeon_ring_generic_set_wptr,
2398 }
2399 },
2400 .irq = {
2401 .set = &cik_irq_set,
2402 .process = &cik_irq_process,
2403 },
2404 .display = {
2405 .bandwidth_update = &dce8_bandwidth_update,
2406 .get_vblank_counter = &evergreen_get_vblank_counter,
2407 .wait_for_vblank = &dce4_wait_for_vblank,
2408 },
2409 .copy = {
2410 .blit = NULL,
2411 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2412 .dma = &cik_copy_dma,
2413 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
2414 .copy = &cik_copy_dma,
2415 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
2416 },
2417 .surface = {
2418 .set_reg = r600_set_surface_reg,
2419 .clear_reg = r600_clear_surface_reg,
2420 },
2421 .hpd = {
2422 .init = &evergreen_hpd_init,
2423 .fini = &evergreen_hpd_fini,
2424 .sense = &evergreen_hpd_sense,
2425 .set_polarity = &evergreen_hpd_set_polarity,
2426 },
2427 .pm = {
2428 .misc = &evergreen_pm_misc,
2429 .prepare = &evergreen_pm_prepare,
2430 .finish = &evergreen_pm_finish,
2431 .init_profile = &sumo_pm_init_profile,
2432 .get_dynpm_state = &r600_pm_get_dynpm_state,
2433 .get_engine_clock = &radeon_atom_get_engine_clock,
2434 .set_engine_clock = &radeon_atom_set_engine_clock,
2435 .get_memory_clock = &radeon_atom_get_memory_clock,
2436 .set_memory_clock = &radeon_atom_set_memory_clock,
2437 .get_pcie_lanes = NULL,
2438 .set_pcie_lanes = NULL,
2439 .set_clock_gating = NULL,
2440 .set_uvd_clocks = &cik_set_uvd_clocks,
2441 },
2442 .pflip = {
2443 .pre_page_flip = &evergreen_pre_page_flip,
2444 .page_flip = &evergreen_page_flip,
2445 .post_page_flip = &evergreen_post_page_flip,
2446 },
2447};
2448
2449static struct radeon_asic kv_asic = {
2450 .init = &cik_init,
2451 .fini = &cik_fini,
2452 .suspend = &cik_suspend,
2453 .resume = &cik_resume,
2454 .asic_reset = &cik_asic_reset,
2455 .vga_set_state = &r600_vga_set_state,
2456 .ioctl_wait_idle = NULL,
2457 .gui_idle = &r600_gui_idle,
2458 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
2459 .get_xclk = &cik_get_xclk,
2460 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
2461 .gart = {
2462 .tlb_flush = &cik_pcie_gart_tlb_flush,
2463 .set_page = &rs600_gart_set_page,
2464 },
2465 .vm = {
2466 .init = &cik_vm_init,
2467 .fini = &cik_vm_fini,
2468 .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
2469 .set_page = &cik_vm_set_page,
2470 },
2471 .ring = {
2472 [RADEON_RING_TYPE_GFX_INDEX] = {
2473 .ib_execute = &cik_ring_ib_execute,
2474 .ib_parse = &cik_ib_parse,
2475 .emit_fence = &cik_fence_gfx_ring_emit,
2476 .emit_semaphore = &cik_semaphore_ring_emit,
2477 .cs_parse = NULL,
2478 .ring_test = &cik_ring_test,
2479 .ib_test = &cik_ib_test,
2480 .is_lockup = &cik_gfx_is_lockup,
2481 .vm_flush = &cik_vm_flush,
2482 .get_rptr = &radeon_ring_generic_get_rptr,
2483 .get_wptr = &radeon_ring_generic_get_wptr,
2484 .set_wptr = &radeon_ring_generic_set_wptr,
2485 },
2486 [CAYMAN_RING_TYPE_CP1_INDEX] = {
2487 .ib_execute = &cik_ring_ib_execute,
2488 .ib_parse = &cik_ib_parse,
2489 .emit_fence = &cik_fence_compute_ring_emit,
2490 .emit_semaphore = &cik_semaphore_ring_emit,
2491 .cs_parse = NULL,
2492 .ring_test = &cik_ring_test,
2493 .ib_test = &cik_ib_test,
2494 .is_lockup = &cik_gfx_is_lockup,
2495 .vm_flush = &cik_vm_flush,
2496 .get_rptr = &cik_compute_ring_get_rptr,
2497 .get_wptr = &cik_compute_ring_get_wptr,
2498 .set_wptr = &cik_compute_ring_set_wptr,
2499 },
2500 [CAYMAN_RING_TYPE_CP2_INDEX] = {
2501 .ib_execute = &cik_ring_ib_execute,
2502 .ib_parse = &cik_ib_parse,
2503 .emit_fence = &cik_fence_compute_ring_emit,
2504 .emit_semaphore = &cik_semaphore_ring_emit,
2505 .cs_parse = NULL,
2506 .ring_test = &cik_ring_test,
2507 .ib_test = &cik_ib_test,
2508 .is_lockup = &cik_gfx_is_lockup,
2509 .vm_flush = &cik_vm_flush,
2510 .get_rptr = &cik_compute_ring_get_rptr,
2511 .get_wptr = &cik_compute_ring_get_wptr,
2512 .set_wptr = &cik_compute_ring_set_wptr,
2513 },
2514 [R600_RING_TYPE_DMA_INDEX] = {
2515 .ib_execute = &cik_sdma_ring_ib_execute,
2516 .ib_parse = &cik_ib_parse,
2517 .emit_fence = &cik_sdma_fence_ring_emit,
2518 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2519 .cs_parse = NULL,
2520 .ring_test = &cik_sdma_ring_test,
2521 .ib_test = &cik_sdma_ib_test,
2522 .is_lockup = &cik_sdma_is_lockup,
2523 .vm_flush = &cik_dma_vm_flush,
2524 .get_rptr = &radeon_ring_generic_get_rptr,
2525 .get_wptr = &radeon_ring_generic_get_wptr,
2526 .set_wptr = &radeon_ring_generic_set_wptr,
2527 },
2528 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
2529 .ib_execute = &cik_sdma_ring_ib_execute,
2530 .ib_parse = &cik_ib_parse,
2531 .emit_fence = &cik_sdma_fence_ring_emit,
2532 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
2533 .cs_parse = NULL,
2534 .ring_test = &cik_sdma_ring_test,
2535 .ib_test = &cik_sdma_ib_test,
2536 .is_lockup = &cik_sdma_is_lockup,
2537 .vm_flush = &cik_dma_vm_flush,
2538 .get_rptr = &radeon_ring_generic_get_rptr,
2539 .get_wptr = &radeon_ring_generic_get_wptr,
2540 .set_wptr = &radeon_ring_generic_set_wptr,
2541 },
2542 [R600_RING_TYPE_UVD_INDEX] = {
2543 .ib_execute = &r600_uvd_ib_execute,
2544 .emit_fence = &r600_uvd_fence_emit,
2545 .emit_semaphore = &cayman_uvd_semaphore_emit,
2546 .cs_parse = &radeon_uvd_cs_parse,
2547 .ring_test = &r600_uvd_ring_test,
2548 .ib_test = &r600_uvd_ib_test,
2549 .is_lockup = &radeon_ring_test_lockup,
2550 .get_rptr = &radeon_ring_generic_get_rptr,
2551 .get_wptr = &radeon_ring_generic_get_wptr,
2552 .set_wptr = &radeon_ring_generic_set_wptr,
2553 }
2554 },
2555 .irq = {
2556 .set = &cik_irq_set,
2557 .process = &cik_irq_process,
2558 },
2559 .display = {
2560 .bandwidth_update = &dce8_bandwidth_update,
2561 .get_vblank_counter = &evergreen_get_vblank_counter,
2562 .wait_for_vblank = &dce4_wait_for_vblank,
2563 },
2564 .copy = {
2565 .blit = NULL,
2566 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2567 .dma = &cik_copy_dma,
2568 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
2569 .copy = &cik_copy_dma,
2570 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
2571 },
2572 .surface = {
2573 .set_reg = r600_set_surface_reg,
2574 .clear_reg = r600_clear_surface_reg,
2575 },
2576 .hpd = {
2577 .init = &evergreen_hpd_init,
2578 .fini = &evergreen_hpd_fini,
2579 .sense = &evergreen_hpd_sense,
2580 .set_polarity = &evergreen_hpd_set_polarity,
2581 },
2582 .pm = {
2583 .misc = &evergreen_pm_misc,
2584 .prepare = &evergreen_pm_prepare,
2585 .finish = &evergreen_pm_finish,
2586 .init_profile = &sumo_pm_init_profile,
2587 .get_dynpm_state = &r600_pm_get_dynpm_state,
2588 .get_engine_clock = &radeon_atom_get_engine_clock,
2589 .set_engine_clock = &radeon_atom_set_engine_clock,
2590 .get_memory_clock = &radeon_atom_get_memory_clock,
2591 .set_memory_clock = &radeon_atom_set_memory_clock,
2592 .get_pcie_lanes = NULL,
2593 .set_pcie_lanes = NULL,
2594 .set_clock_gating = NULL,
2595 .set_uvd_clocks = &cik_set_uvd_clocks,
1914 }, 2596 },
1915 .pflip = { 2597 .pflip = {
1916 .pre_page_flip = &evergreen_pre_page_flip, 2598 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1999,16 +2681,15 @@ int radeon_asic_init(struct radeon_device *rdev)
1999 rdev->asic = &r520_asic; 2681 rdev->asic = &r520_asic;
2000 break; 2682 break;
2001 case CHIP_R600: 2683 case CHIP_R600:
2684 rdev->asic = &r600_asic;
2685 break;
2002 case CHIP_RV610: 2686 case CHIP_RV610:
2003 case CHIP_RV630: 2687 case CHIP_RV630:
2004 case CHIP_RV620: 2688 case CHIP_RV620:
2005 case CHIP_RV635: 2689 case CHIP_RV635:
2006 case CHIP_RV670: 2690 case CHIP_RV670:
2007 rdev->asic = &r600_asic; 2691 rdev->asic = &rv6xx_asic;
2008 if (rdev->family == CHIP_R600) 2692 rdev->has_uvd = true;
2009 rdev->has_uvd = false;
2010 else
2011 rdev->has_uvd = true;
2012 break; 2693 break;
2013 case CHIP_RS780: 2694 case CHIP_RS780:
2014 case CHIP_RS880: 2695 case CHIP_RS880:
@@ -2082,6 +2763,19 @@ int radeon_asic_init(struct radeon_device *rdev)
2082 else 2763 else
2083 rdev->has_uvd = true; 2764 rdev->has_uvd = true;
2084 break; 2765 break;
2766 case CHIP_BONAIRE:
2767 rdev->asic = &ci_asic;
2768 rdev->num_crtc = 6;
2769 break;
2770 case CHIP_KAVERI:
2771 case CHIP_KABINI:
2772 rdev->asic = &kv_asic;
2773 /* set num crtcs */
2774 if (rdev->family == CHIP_KAVERI)
2775 rdev->num_crtc = 4;
2776 else
2777 rdev->num_crtc = 2;
2778 break;
2085 default: 2779 default:
2086 /* FIXME: not supported yet */ 2780 /* FIXME: not supported yet */
2087 return -EINVAL; 2781 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index a72759ede753..6822c7aeacaa 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -47,6 +47,12 @@ u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
47void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level); 47void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
48u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder); 48u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
49 49
50u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
51 struct radeon_ring *ring);
52u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
53 struct radeon_ring *ring);
54void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
55 struct radeon_ring *ring);
50 56
51/* 57/*
52 * r100,rv100,rs100,rv200,rs200 58 * r100,rv100,rs100,rv200,rs200
@@ -395,6 +401,35 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
395int r600_mc_wait_for_idle(struct radeon_device *rdev); 401int r600_mc_wait_for_idle(struct radeon_device *rdev);
396u32 r600_get_xclk(struct radeon_device *rdev); 402u32 r600_get_xclk(struct radeon_device *rdev);
397uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); 403uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
404int rv6xx_get_temp(struct radeon_device *rdev);
405int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
406void r600_dpm_post_set_power_state(struct radeon_device *rdev);
407/* rv6xx dpm */
408int rv6xx_dpm_init(struct radeon_device *rdev);
409int rv6xx_dpm_enable(struct radeon_device *rdev);
410void rv6xx_dpm_disable(struct radeon_device *rdev);
411int rv6xx_dpm_set_power_state(struct radeon_device *rdev);
412void rv6xx_setup_asic(struct radeon_device *rdev);
413void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev);
414void rv6xx_dpm_fini(struct radeon_device *rdev);
415u32 rv6xx_dpm_get_sclk(struct radeon_device *rdev, bool low);
416u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low);
417void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
418 struct radeon_ps *ps);
419void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
420 struct seq_file *m);
421/* rs780 dpm */
422int rs780_dpm_init(struct radeon_device *rdev);
423int rs780_dpm_enable(struct radeon_device *rdev);
424void rs780_dpm_disable(struct radeon_device *rdev);
425int rs780_dpm_set_power_state(struct radeon_device *rdev);
426void rs780_dpm_setup_asic(struct radeon_device *rdev);
427void rs780_dpm_display_configuration_changed(struct radeon_device *rdev);
428void rs780_dpm_fini(struct radeon_device *rdev);
429u32 rs780_dpm_get_sclk(struct radeon_device *rdev, bool low);
430u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low);
431void rs780_dpm_print_power_state(struct radeon_device *rdev,
432 struct radeon_ps *ps);
398 433
399/* uvd */ 434/* uvd */
400int r600_uvd_init(struct radeon_device *rdev); 435int r600_uvd_init(struct radeon_device *rdev);
@@ -428,6 +463,21 @@ int rv770_copy_dma(struct radeon_device *rdev,
428u32 rv770_get_xclk(struct radeon_device *rdev); 463u32 rv770_get_xclk(struct radeon_device *rdev);
429int rv770_uvd_resume(struct radeon_device *rdev); 464int rv770_uvd_resume(struct radeon_device *rdev);
430int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 465int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
466int rv770_get_temp(struct radeon_device *rdev);
467/* rv7xx pm */
468int rv770_dpm_init(struct radeon_device *rdev);
469int rv770_dpm_enable(struct radeon_device *rdev);
470void rv770_dpm_disable(struct radeon_device *rdev);
471int rv770_dpm_set_power_state(struct radeon_device *rdev);
472void rv770_dpm_setup_asic(struct radeon_device *rdev);
473void rv770_dpm_display_configuration_changed(struct radeon_device *rdev);
474void rv770_dpm_fini(struct radeon_device *rdev);
475u32 rv770_dpm_get_sclk(struct radeon_device *rdev, bool low);
476u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low);
477void rv770_dpm_print_power_state(struct radeon_device *rdev,
478 struct radeon_ps *ps);
479void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
480 struct seq_file *m);
431 481
432/* 482/*
433 * evergreen 483 * evergreen
@@ -482,6 +532,41 @@ int evergreen_copy_dma(struct radeon_device *rdev,
482 struct radeon_fence **fence); 532 struct radeon_fence **fence);
483void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); 533void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
484void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 534void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
535int evergreen_get_temp(struct radeon_device *rdev);
536int sumo_get_temp(struct radeon_device *rdev);
537int tn_get_temp(struct radeon_device *rdev);
538int cypress_dpm_init(struct radeon_device *rdev);
539void cypress_dpm_setup_asic(struct radeon_device *rdev);
540int cypress_dpm_enable(struct radeon_device *rdev);
541void cypress_dpm_disable(struct radeon_device *rdev);
542int cypress_dpm_set_power_state(struct radeon_device *rdev);
543void cypress_dpm_display_configuration_changed(struct radeon_device *rdev);
544void cypress_dpm_fini(struct radeon_device *rdev);
545int btc_dpm_init(struct radeon_device *rdev);
546void btc_dpm_setup_asic(struct radeon_device *rdev);
547int btc_dpm_enable(struct radeon_device *rdev);
548void btc_dpm_disable(struct radeon_device *rdev);
549int btc_dpm_pre_set_power_state(struct radeon_device *rdev);
550int btc_dpm_set_power_state(struct radeon_device *rdev);
551void btc_dpm_post_set_power_state(struct radeon_device *rdev);
552void btc_dpm_fini(struct radeon_device *rdev);
553u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
554u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
555int sumo_dpm_init(struct radeon_device *rdev);
556int sumo_dpm_enable(struct radeon_device *rdev);
557void sumo_dpm_disable(struct radeon_device *rdev);
558int sumo_dpm_pre_set_power_state(struct radeon_device *rdev);
559int sumo_dpm_set_power_state(struct radeon_device *rdev);
560void sumo_dpm_post_set_power_state(struct radeon_device *rdev);
561void sumo_dpm_setup_asic(struct radeon_device *rdev);
562void sumo_dpm_display_configuration_changed(struct radeon_device *rdev);
563void sumo_dpm_fini(struct radeon_device *rdev);
564u32 sumo_dpm_get_sclk(struct radeon_device *rdev, bool low);
565u32 sumo_dpm_get_mclk(struct radeon_device *rdev, bool low);
566void sumo_dpm_print_power_state(struct radeon_device *rdev,
567 struct radeon_ps *ps);
568void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
569 struct seq_file *m);
485 570
486/* 571/*
487 * cayman 572 * cayman
@@ -516,6 +601,36 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
516bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 601bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
517void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 602void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
518 603
604int ni_dpm_init(struct radeon_device *rdev);
605void ni_dpm_setup_asic(struct radeon_device *rdev);
606int ni_dpm_enable(struct radeon_device *rdev);
607void ni_dpm_disable(struct radeon_device *rdev);
608int ni_dpm_pre_set_power_state(struct radeon_device *rdev);
609int ni_dpm_set_power_state(struct radeon_device *rdev);
610void ni_dpm_post_set_power_state(struct radeon_device *rdev);
611void ni_dpm_fini(struct radeon_device *rdev);
612u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low);
613u32 ni_dpm_get_mclk(struct radeon_device *rdev, bool low);
614void ni_dpm_print_power_state(struct radeon_device *rdev,
615 struct radeon_ps *ps);
616void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
617 struct seq_file *m);
618int trinity_dpm_init(struct radeon_device *rdev);
619int trinity_dpm_enable(struct radeon_device *rdev);
620void trinity_dpm_disable(struct radeon_device *rdev);
621int trinity_dpm_pre_set_power_state(struct radeon_device *rdev);
622int trinity_dpm_set_power_state(struct radeon_device *rdev);
623void trinity_dpm_post_set_power_state(struct radeon_device *rdev);
624void trinity_dpm_setup_asic(struct radeon_device *rdev);
625void trinity_dpm_display_configuration_changed(struct radeon_device *rdev);
626void trinity_dpm_fini(struct radeon_device *rdev);
627u32 trinity_dpm_get_sclk(struct radeon_device *rdev, bool low);
628u32 trinity_dpm_get_mclk(struct radeon_device *rdev, bool low);
629void trinity_dpm_print_power_state(struct radeon_device *rdev,
630 struct radeon_ps *ps);
631void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
632 struct seq_file *m);
633
519/* DCE6 - SI */ 634/* DCE6 - SI */
520void dce6_bandwidth_update(struct radeon_device *rdev); 635void dce6_bandwidth_update(struct radeon_device *rdev);
521 636
@@ -552,5 +667,80 @@ void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
552u32 si_get_xclk(struct radeon_device *rdev); 667u32 si_get_xclk(struct radeon_device *rdev);
553uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); 668uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
554int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 669int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
670int si_get_temp(struct radeon_device *rdev);
671int si_dpm_init(struct radeon_device *rdev);
672void si_dpm_setup_asic(struct radeon_device *rdev);
673int si_dpm_enable(struct radeon_device *rdev);
674void si_dpm_disable(struct radeon_device *rdev);
675int si_dpm_pre_set_power_state(struct radeon_device *rdev);
676int si_dpm_set_power_state(struct radeon_device *rdev);
677void si_dpm_post_set_power_state(struct radeon_device *rdev);
678void si_dpm_fini(struct radeon_device *rdev);
679void si_dpm_display_configuration_changed(struct radeon_device *rdev);
680void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
681 struct seq_file *m);
682
683/* DCE8 - CIK */
684void dce8_bandwidth_update(struct radeon_device *rdev);
685
686/*
687 * cik
688 */
689uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev);
690u32 cik_get_xclk(struct radeon_device *rdev);
691uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
692void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
693int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
694int cik_uvd_resume(struct radeon_device *rdev);
695void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
696 struct radeon_fence *fence);
697void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
698 struct radeon_ring *ring,
699 struct radeon_semaphore *semaphore,
700 bool emit_wait);
701void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
702int cik_copy_dma(struct radeon_device *rdev,
703 uint64_t src_offset, uint64_t dst_offset,
704 unsigned num_gpu_pages,
705 struct radeon_fence **fence);
706int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
707int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
708bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
709void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
710 struct radeon_fence *fence);
711void cik_fence_compute_ring_emit(struct radeon_device *rdev,
712 struct radeon_fence *fence);
713void cik_semaphore_ring_emit(struct radeon_device *rdev,
714 struct radeon_ring *cp,
715 struct radeon_semaphore *semaphore,
716 bool emit_wait);
717void cik_pcie_gart_tlb_flush(struct radeon_device *rdev);
718int cik_init(struct radeon_device *rdev);
719void cik_fini(struct radeon_device *rdev);
720int cik_suspend(struct radeon_device *rdev);
721int cik_resume(struct radeon_device *rdev);
722bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
723int cik_asic_reset(struct radeon_device *rdev);
724void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
725int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
726int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
727int cik_irq_set(struct radeon_device *rdev);
728int cik_irq_process(struct radeon_device *rdev);
729int cik_vm_init(struct radeon_device *rdev);
730void cik_vm_fini(struct radeon_device *rdev);
731void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
732void cik_vm_set_page(struct radeon_device *rdev,
733 struct radeon_ib *ib,
734 uint64_t pe,
735 uint64_t addr, unsigned count,
736 uint32_t incr, uint32_t flags);
737void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
738int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
739u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
740 struct radeon_ring *ring);
741u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
742 struct radeon_ring *ring);
743void cik_compute_ring_set_wptr(struct radeon_device *rdev,
744 struct radeon_ring *ring);
555 745
556#endif 746#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index dea6f63c9724..b1777d10d0b5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -56,10 +56,6 @@ extern void
56radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, 56radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
57 uint32_t supported_device); 57 uint32_t supported_device);
58 58
59/* local */
60static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
61 u16 voltage_id, u16 *voltage);
62
63union atom_supported_devices { 59union atom_supported_devices {
64 struct _ATOM_SUPPORTED_DEVICES_INFO info; 60 struct _ATOM_SUPPORTED_DEVICES_INFO info;
65 struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2; 61 struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
@@ -1247,6 +1243,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1247 } 1243 }
1248 rdev->clock.dp_extclk = 1244 rdev->clock.dp_extclk =
1249 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); 1245 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
1246 rdev->clock.current_dispclk = rdev->clock.default_dispclk;
1250 } 1247 }
1251 *dcpll = *p1pll; 1248 *dcpll = *p1pll;
1252 1249
@@ -1269,6 +1266,7 @@ union igp_info {
1269 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 1266 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
1270 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 1267 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
1271 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 1268 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
1269 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
1272}; 1270};
1273 1271
1274bool radeon_atombios_sideport_present(struct radeon_device *rdev) 1272bool radeon_atombios_sideport_present(struct radeon_device *rdev)
@@ -1438,6 +1436,22 @@ static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
1438 break; 1436 break;
1439 } 1437 }
1440 break; 1438 break;
1439 case 8:
1440 switch (id) {
1441 case ASIC_INTERNAL_SS_ON_TMDS:
1442 percentage = le16_to_cpu(igp_info->info_8.usDVISSPercentage);
1443 rate = le16_to_cpu(igp_info->info_8.usDVISSpreadRateIn10Hz);
1444 break;
1445 case ASIC_INTERNAL_SS_ON_HDMI:
1446 percentage = le16_to_cpu(igp_info->info_8.usHDMISSPercentage);
1447 rate = le16_to_cpu(igp_info->info_8.usHDMISSpreadRateIn10Hz);
1448 break;
1449 case ASIC_INTERNAL_SS_ON_LVDS:
1450 percentage = le16_to_cpu(igp_info->info_8.usLvdsSSPercentage);
1451 rate = le16_to_cpu(igp_info->info_8.usLvdsSSpreadRateIn10Hz);
1452 break;
1453 }
1454 break;
1441 default: 1455 default:
1442 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 1456 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
1443 break; 1457 break;
@@ -1499,6 +1513,10 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1499 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1513 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1500 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1514 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
1501 ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1515 ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
1516 if ((crev == 2) &&
1517 ((id == ASIC_INTERNAL_ENGINE_SS) ||
1518 (id == ASIC_INTERNAL_MEMORY_SS)))
1519 ss->rate /= 100;
1502 return true; 1520 return true;
1503 } 1521 }
1504 } 1522 }
@@ -1513,6 +1531,9 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1513 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1531 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1514 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1532 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
1515 ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1533 ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
1534 if ((id == ASIC_INTERNAL_ENGINE_SS) ||
1535 (id == ASIC_INTERNAL_MEMORY_SS))
1536 ss->rate /= 100;
1516 if (rdev->flags & RADEON_IS_IGP) 1537 if (rdev->flags & RADEON_IS_IGP)
1517 radeon_atombios_get_igp_ss_overrides(rdev, ss, id); 1538 radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
1518 return true; 1539 return true;
@@ -1927,6 +1948,7 @@ static const char *pp_lib_thermal_controller_names[] = {
1927 "Northern Islands", 1948 "Northern Islands",
1928 "Southern Islands", 1949 "Southern Islands",
1929 "lm96163", 1950 "lm96163",
1951 "Sea Islands",
1930}; 1952};
1931 1953
1932union power_info { 1954union power_info {
@@ -1944,6 +1966,7 @@ union pplib_clock_info {
1944 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 1966 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
1945 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 1967 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
1946 struct _ATOM_PPLIB_SI_CLOCK_INFO si; 1968 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
1969 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
1947}; 1970};
1948 1971
1949union pplib_power_state { 1972union pplib_power_state {
@@ -2209,6 +2232,11 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2209 (controller->ucFanParameters & 2232 (controller->ucFanParameters &
2210 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 2233 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2211 rdev->pm.int_thermal_type = THERMAL_TYPE_SI; 2234 rdev->pm.int_thermal_type = THERMAL_TYPE_SI;
2235 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
2236 DRM_INFO("Internal thermal controller %s fan control\n",
2237 (controller->ucFanParameters &
2238 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2239 rdev->pm.int_thermal_type = THERMAL_TYPE_CI;
2212 } else if ((controller->ucType == 2240 } else if ((controller->ucType ==
2213 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || 2241 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
2214 (controller->ucType == 2242 (controller->ucType ==
@@ -2241,8 +2269,8 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2241 } 2269 }
2242} 2270}
2243 2271
2244static void radeon_atombios_get_default_voltages(struct radeon_device *rdev, 2272void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
2245 u16 *vddc, u16 *vddci) 2273 u16 *vddc, u16 *vddci, u16 *mvdd)
2246{ 2274{
2247 struct radeon_mode_info *mode_info = &rdev->mode_info; 2275 struct radeon_mode_info *mode_info = &rdev->mode_info;
2248 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); 2276 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
@@ -2252,6 +2280,7 @@ static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
2252 2280
2253 *vddc = 0; 2281 *vddc = 0;
2254 *vddci = 0; 2282 *vddci = 0;
2283 *mvdd = 0;
2255 2284
2256 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 2285 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
2257 &frev, &crev, &data_offset)) { 2286 &frev, &crev, &data_offset)) {
@@ -2259,8 +2288,10 @@ static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
2259 (union firmware_info *)(mode_info->atom_context->bios + 2288 (union firmware_info *)(mode_info->atom_context->bios +
2260 data_offset); 2289 data_offset);
2261 *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); 2290 *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
2262 if ((frev == 2) && (crev >= 2)) 2291 if ((frev == 2) && (crev >= 2)) {
2263 *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage); 2292 *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
2293 *mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage);
2294 }
2264 } 2295 }
2265} 2296}
2266 2297
@@ -2271,9 +2302,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2271 int j; 2302 int j;
2272 u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2303 u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2273 u32 misc2 = le16_to_cpu(non_clock_info->usClassification); 2304 u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
2274 u16 vddc, vddci; 2305 u16 vddc, vddci, mvdd;
2275 2306
2276 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci); 2307 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
2277 2308
2278 rdev->pm.power_state[state_index].misc = misc; 2309 rdev->pm.power_state[state_index].misc = misc;
2279 rdev->pm.power_state[state_index].misc2 = misc2; 2310 rdev->pm.power_state[state_index].misc2 = misc2;
@@ -2316,7 +2347,13 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2316 rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; 2347 rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
2317 rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci; 2348 rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
2318 } else { 2349 } else {
2319 /* patch the table values with the default slck/mclk from firmware info */ 2350 u16 max_vddci = 0;
2351
2352 if (ASIC_IS_DCE4(rdev))
2353 radeon_atom_get_max_voltage(rdev,
2354 SET_VOLTAGE_TYPE_ASIC_VDDCI,
2355 &max_vddci);
2356 /* patch the table values with the default sclk/mclk from firmware info */
2320 for (j = 0; j < mode_index; j++) { 2357 for (j = 0; j < mode_index; j++) {
2321 rdev->pm.power_state[state_index].clock_info[j].mclk = 2358 rdev->pm.power_state[state_index].clock_info[j].mclk =
2322 rdev->clock.default_mclk; 2359 rdev->clock.default_mclk;
@@ -2325,6 +2362,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2325 if (vddc) 2362 if (vddc)
2326 rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = 2363 rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
2327 vddc; 2364 vddc;
2365 if (max_vddci)
2366 rdev->pm.power_state[state_index].clock_info[j].voltage.vddci =
2367 max_vddci;
2328 } 2368 }
2329 } 2369 }
2330 } 2370 }
@@ -2347,6 +2387,15 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2347 sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; 2387 sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
2348 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; 2388 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
2349 } 2389 }
2390 } else if (rdev->family >= CHIP_BONAIRE) {
2391 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
2392 sclk |= clock_info->ci.ucEngineClockHigh << 16;
2393 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
2394 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
2395 rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
2396 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
2397 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
2398 VOLTAGE_NONE;
2350 } else if (rdev->family >= CHIP_TAHITI) { 2399 } else if (rdev->family >= CHIP_TAHITI) {
2351 sclk = le16_to_cpu(clock_info->si.usEngineClockLow); 2400 sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
2352 sclk |= clock_info->si.ucEngineClockHigh << 16; 2401 sclk |= clock_info->si.ucEngineClockHigh << 16;
@@ -2667,6 +2716,8 @@ union get_clock_dividers {
2667 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3; 2716 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
2668 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4; 2717 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
2669 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5; 2718 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
2719 struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 v6_in;
2720 struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 v6_out;
2670}; 2721};
2671 2722
2672int radeon_atom_get_clock_dividers(struct radeon_device *rdev, 2723int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
@@ -2699,7 +2750,8 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
2699 break; 2750 break;
2700 case 2: 2751 case 2:
2701 case 3: 2752 case 3:
2702 /* r6xx, r7xx, evergreen, ni */ 2753 case 5:
2754 /* r6xx, r7xx, evergreen, ni, si */
2703 if (rdev->family <= CHIP_RV770) { 2755 if (rdev->family <= CHIP_RV770) {
2704 args.v2.ucAction = clock_type; 2756 args.v2.ucAction = clock_type;
2705 args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */ 2757 args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */
@@ -2732,6 +2784,9 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
2732 dividers->vco_mode = (args.v3.ucCntlFlag & 2784 dividers->vco_mode = (args.v3.ucCntlFlag &
2733 ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0; 2785 ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
2734 } else { 2786 } else {
2787 /* for SI we use ComputeMemoryClockParam for memory plls */
2788 if (rdev->family >= CHIP_TAHITI)
2789 return -EINVAL;
2735 args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock); 2790 args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
2736 if (strobe_mode) 2791 if (strobe_mode)
2737 args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN; 2792 args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
@@ -2757,9 +2812,76 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
2757 2812
2758 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2813 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2759 2814
2760 dividers->post_div = args.v4.ucPostDiv; 2815 dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
2761 dividers->real_clock = le32_to_cpu(args.v4.ulClock); 2816 dividers->real_clock = le32_to_cpu(args.v4.ulClock);
2762 break; 2817 break;
2818 case 6:
2819 /* CI */
2820 /* COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, COMPUTE_GPUCLK_INPUT_FLAG_SCLK */
2821 args.v6_in.ulClock.ulComputeClockFlag = clock_type;
2822 args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
2823
2824 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2825
2826 dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
2827 dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
2828 dividers->ref_div = args.v6_out.ucPllRefDiv;
2829 dividers->post_div = args.v6_out.ucPllPostDiv;
2830 dividers->flags = args.v6_out.ucPllCntlFlag;
2831 dividers->real_clock = le32_to_cpu(args.v6_out.ulClock.ulClock);
2832 dividers->post_divider = args.v6_out.ulClock.ucPostDiv;
2833 break;
2834 default:
2835 return -EINVAL;
2836 }
2837 return 0;
2838}
2839
2840int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
2841 u32 clock,
2842 bool strobe_mode,
2843 struct atom_mpll_param *mpll_param)
2844{
2845 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 args;
2846 int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam);
2847 u8 frev, crev;
2848
2849 memset(&args, 0, sizeof(args));
2850 memset(mpll_param, 0, sizeof(struct atom_mpll_param));
2851
2852 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
2853 return -EINVAL;
2854
2855 switch (frev) {
2856 case 2:
2857 switch (crev) {
2858 case 1:
2859 /* SI */
2860 args.ulClock = cpu_to_le32(clock); /* 10 khz */
2861 args.ucInputFlag = 0;
2862 if (strobe_mode)
2863 args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
2864
2865 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2866
2867 mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
2868 mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
2869 mpll_param->post_div = args.ucPostDiv;
2870 mpll_param->dll_speed = args.ucDllSpeed;
2871 mpll_param->bwcntl = args.ucBWCntl;
2872 mpll_param->vco_mode =
2873 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK) ? 1 : 0;
2874 mpll_param->yclk_sel =
2875 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
2876 mpll_param->qdr =
2877 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0;
2878 mpll_param->half_rate =
2879 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0;
2880 break;
2881 default:
2882 return -EINVAL;
2883 }
2884 break;
2763 default: 2885 default:
2764 return -EINVAL; 2886 return -EINVAL;
2765 } 2887 }
@@ -2819,6 +2941,48 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
2819 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2941 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2820} 2942}
2821 2943
2944void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
2945 u32 eng_clock, u32 mem_clock)
2946{
2947 SET_ENGINE_CLOCK_PS_ALLOCATION args;
2948 int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
2949 u32 tmp;
2950
2951 memset(&args, 0, sizeof(args));
2952
2953 tmp = eng_clock & SET_CLOCK_FREQ_MASK;
2954 tmp |= (COMPUTE_ENGINE_PLL_PARAM << 24);
2955
2956 args.ulTargetEngineClock = cpu_to_le32(tmp);
2957 if (mem_clock)
2958 args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
2959
2960 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2961}
2962
2963void radeon_atom_update_memory_dll(struct radeon_device *rdev,
2964 u32 mem_clock)
2965{
2966 u32 args;
2967 int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
2968
2969 args = cpu_to_le32(mem_clock); /* 10 khz */
2970
2971 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2972}
2973
2974void radeon_atom_set_ac_timing(struct radeon_device *rdev,
2975 u32 mem_clock)
2976{
2977 SET_MEMORY_CLOCK_PS_ALLOCATION args;
2978 int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
2979 u32 tmp = mem_clock | (COMPUTE_MEMORY_PLL_PARAM << 24);
2980
2981 args.ulTargetMemoryClock = cpu_to_le32(tmp); /* 10 khz */
2982
2983 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2984}
2985
2822union set_voltage { 2986union set_voltage {
2823 struct _SET_VOLTAGE_PS_ALLOCATION alloc; 2987 struct _SET_VOLTAGE_PS_ALLOCATION alloc;
2824 struct _SET_VOLTAGE_PARAMETERS v1; 2988 struct _SET_VOLTAGE_PARAMETERS v1;
@@ -2863,8 +3027,8 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
2863 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 3027 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2864} 3028}
2865 3029
2866static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type, 3030int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
2867 u16 voltage_id, u16 *voltage) 3031 u16 voltage_id, u16 *voltage)
2868{ 3032{
2869 union set_voltage args; 3033 union set_voltage args;
2870 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); 3034 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
@@ -2902,6 +3066,695 @@ static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
2902 return 0; 3066 return 0;
2903} 3067}
2904 3068
3069int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
3070 u16 *voltage,
3071 u16 leakage_idx)
3072{
3073 return radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
3074}
3075
3076int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
3077 u16 voltage_level, u8 voltage_type,
3078 u32 *gpio_value, u32 *gpio_mask)
3079{
3080 union set_voltage args;
3081 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
3082 u8 frev, crev;
3083
3084 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
3085 return -EINVAL;
3086
3087 switch (crev) {
3088 case 1:
3089 return -EINVAL;
3090 case 2:
3091 args.v2.ucVoltageType = voltage_type;
3092 args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK;
3093 args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
3094
3095 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
3096
3097 *gpio_mask = le32_to_cpu(*(u32 *)&args.v2);
3098
3099 args.v2.ucVoltageType = voltage_type;
3100 args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL;
3101 args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
3102
3103 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
3104
3105 *gpio_value = le32_to_cpu(*(u32 *)&args.v2);
3106 break;
3107 default:
3108 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3109 return -EINVAL;
3110 }
3111
3112 return 0;
3113}
3114
3115union voltage_object_info {
3116 struct _ATOM_VOLTAGE_OBJECT_INFO v1;
3117 struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
3118 struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
3119};
3120
3121union voltage_object {
3122 struct _ATOM_VOLTAGE_OBJECT v1;
3123 struct _ATOM_VOLTAGE_OBJECT_V2 v2;
3124 union _ATOM_VOLTAGE_OBJECT_V3 v3;
3125};
3126
3127static ATOM_VOLTAGE_OBJECT *atom_lookup_voltage_object_v1(ATOM_VOLTAGE_OBJECT_INFO *v1,
3128 u8 voltage_type)
3129{
3130 u32 size = le16_to_cpu(v1->sHeader.usStructureSize);
3131 u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO, asVoltageObj[0]);
3132 u8 *start = (u8 *)v1;
3133
3134 while (offset < size) {
3135 ATOM_VOLTAGE_OBJECT *vo = (ATOM_VOLTAGE_OBJECT *)(start + offset);
3136 if (vo->ucVoltageType == voltage_type)
3137 return vo;
3138 offset += offsetof(ATOM_VOLTAGE_OBJECT, asFormula.ucVIDAdjustEntries) +
3139 vo->asFormula.ucNumOfVoltageEntries;
3140 }
3141 return NULL;
3142}
3143
3144static ATOM_VOLTAGE_OBJECT_V2 *atom_lookup_voltage_object_v2(ATOM_VOLTAGE_OBJECT_INFO_V2 *v2,
3145 u8 voltage_type)
3146{
3147 u32 size = le16_to_cpu(v2->sHeader.usStructureSize);
3148 u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V2, asVoltageObj[0]);
3149 u8 *start = (u8*)v2;
3150
3151 while (offset < size) {
3152 ATOM_VOLTAGE_OBJECT_V2 *vo = (ATOM_VOLTAGE_OBJECT_V2 *)(start + offset);
3153 if (vo->ucVoltageType == voltage_type)
3154 return vo;
3155 offset += offsetof(ATOM_VOLTAGE_OBJECT_V2, asFormula.asVIDAdjustEntries) +
3156 (vo->asFormula.ucNumOfVoltageEntries * sizeof(VOLTAGE_LUT_ENTRY));
3157 }
3158 return NULL;
3159}
3160
3161static ATOM_VOLTAGE_OBJECT_V3 *atom_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *v3,
3162 u8 voltage_type, u8 voltage_mode)
3163{
3164 u32 size = le16_to_cpu(v3->sHeader.usStructureSize);
3165 u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
3166 u8 *start = (u8*)v3;
3167
3168 while (offset < size) {
3169 ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
3170 if ((vo->asGpioVoltageObj.sHeader.ucVoltageType == voltage_type) &&
3171 (vo->asGpioVoltageObj.sHeader.ucVoltageMode == voltage_mode))
3172 return vo;
3173 offset += le16_to_cpu(vo->asGpioVoltageObj.sHeader.usSize);
3174 }
3175 return NULL;
3176}
3177
3178bool
3179radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
3180 u8 voltage_type, u8 voltage_mode)
3181{
3182 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
3183 u8 frev, crev;
3184 u16 data_offset, size;
3185 union voltage_object_info *voltage_info;
3186 union voltage_object *voltage_object = NULL;
3187
3188 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3189 &frev, &crev, &data_offset)) {
3190 voltage_info = (union voltage_object_info *)
3191 (rdev->mode_info.atom_context->bios + data_offset);
3192
3193 switch (frev) {
3194 case 1:
3195 case 2:
3196 switch (crev) {
3197 case 1:
3198 voltage_object = (union voltage_object *)
3199 atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
3200 if (voltage_object &&
3201 (voltage_object->v1.asControl.ucVoltageControlId == VOLTAGE_CONTROLLED_BY_GPIO))
3202 return true;
3203 break;
3204 case 2:
3205 voltage_object = (union voltage_object *)
3206 atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
3207 if (voltage_object &&
3208 (voltage_object->v2.asControl.ucVoltageControlId == VOLTAGE_CONTROLLED_BY_GPIO))
3209 return true;
3210 break;
3211 default:
3212 DRM_ERROR("unknown voltage object table\n");
3213 return false;
3214 }
3215 break;
3216 case 3:
3217 switch (crev) {
3218 case 1:
3219 if (atom_lookup_voltage_object_v3(&voltage_info->v3,
3220 voltage_type, voltage_mode))
3221 return true;
3222 break;
3223 default:
3224 DRM_ERROR("unknown voltage object table\n");
3225 return false;
3226 }
3227 break;
3228 default:
3229 DRM_ERROR("unknown voltage object table\n");
3230 return false;
3231 }
3232
3233 }
3234 return false;
3235}
3236
3237int radeon_atom_get_max_voltage(struct radeon_device *rdev,
3238 u8 voltage_type, u16 *max_voltage)
3239{
3240 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
3241 u8 frev, crev;
3242 u16 data_offset, size;
3243 union voltage_object_info *voltage_info;
3244 union voltage_object *voltage_object = NULL;
3245
3246 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3247 &frev, &crev, &data_offset)) {
3248 voltage_info = (union voltage_object_info *)
3249 (rdev->mode_info.atom_context->bios + data_offset);
3250
3251 switch (crev) {
3252 case 1:
3253 voltage_object = (union voltage_object *)
3254 atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
3255 if (voltage_object) {
3256 ATOM_VOLTAGE_FORMULA *formula =
3257 &voltage_object->v1.asFormula;
3258 if (formula->ucFlag & 1)
3259 *max_voltage =
3260 le16_to_cpu(formula->usVoltageBaseLevel) +
3261 formula->ucNumOfVoltageEntries / 2 *
3262 le16_to_cpu(formula->usVoltageStep);
3263 else
3264 *max_voltage =
3265 le16_to_cpu(formula->usVoltageBaseLevel) +
3266 (formula->ucNumOfVoltageEntries - 1) *
3267 le16_to_cpu(formula->usVoltageStep);
3268 return 0;
3269 }
3270 break;
3271 case 2:
3272 voltage_object = (union voltage_object *)
3273 atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
3274 if (voltage_object) {
3275 ATOM_VOLTAGE_FORMULA_V2 *formula =
3276 &voltage_object->v2.asFormula;
3277 if (formula->ucNumOfVoltageEntries) {
3278 *max_voltage =
3279 le16_to_cpu(formula->asVIDAdjustEntries[
3280 formula->ucNumOfVoltageEntries - 1
3281 ].usVoltageValue);
3282 return 0;
3283 }
3284 }
3285 break;
3286 default:
3287 DRM_ERROR("unknown voltage object table\n");
3288 return -EINVAL;
3289 }
3290
3291 }
3292 return -EINVAL;
3293}
3294
3295int radeon_atom_get_min_voltage(struct radeon_device *rdev,
3296 u8 voltage_type, u16 *min_voltage)
3297{
3298 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
3299 u8 frev, crev;
3300 u16 data_offset, size;
3301 union voltage_object_info *voltage_info;
3302 union voltage_object *voltage_object = NULL;
3303
3304 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3305 &frev, &crev, &data_offset)) {
3306 voltage_info = (union voltage_object_info *)
3307 (rdev->mode_info.atom_context->bios + data_offset);
3308
3309 switch (crev) {
3310 case 1:
3311 voltage_object = (union voltage_object *)
3312 atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
3313 if (voltage_object) {
3314 ATOM_VOLTAGE_FORMULA *formula =
3315 &voltage_object->v1.asFormula;
3316 *min_voltage =
3317 le16_to_cpu(formula->usVoltageBaseLevel);
3318 return 0;
3319 }
3320 break;
3321 case 2:
3322 voltage_object = (union voltage_object *)
3323 atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
3324 if (voltage_object) {
3325 ATOM_VOLTAGE_FORMULA_V2 *formula =
3326 &voltage_object->v2.asFormula;
3327 if (formula->ucNumOfVoltageEntries) {
3328 *min_voltage =
3329 le16_to_cpu(formula->asVIDAdjustEntries[
3330 0
3331 ].usVoltageValue);
3332 return 0;
3333 }
3334 }
3335 break;
3336 default:
3337 DRM_ERROR("unknown voltage object table\n");
3338 return -EINVAL;
3339 }
3340
3341 }
3342 return -EINVAL;
3343}
3344
3345int radeon_atom_get_voltage_step(struct radeon_device *rdev,
3346 u8 voltage_type, u16 *voltage_step)
3347{
3348 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
3349 u8 frev, crev;
3350 u16 data_offset, size;
3351 union voltage_object_info *voltage_info;
3352 union voltage_object *voltage_object = NULL;
3353
3354 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3355 &frev, &crev, &data_offset)) {
3356 voltage_info = (union voltage_object_info *)
3357 (rdev->mode_info.atom_context->bios + data_offset);
3358
3359 switch (crev) {
3360 case 1:
3361 voltage_object = (union voltage_object *)
3362 atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
3363 if (voltage_object) {
3364 ATOM_VOLTAGE_FORMULA *formula =
3365 &voltage_object->v1.asFormula;
3366 if (formula->ucFlag & 1)
3367 *voltage_step =
3368 (le16_to_cpu(formula->usVoltageStep) + 1) / 2;
3369 else
3370 *voltage_step =
3371 le16_to_cpu(formula->usVoltageStep);
3372 return 0;
3373 }
3374 break;
3375 case 2:
3376 return -EINVAL;
3377 default:
3378 DRM_ERROR("unknown voltage object table\n");
3379 return -EINVAL;
3380 }
3381
3382 }
3383 return -EINVAL;
3384}
3385
3386int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
3387 u8 voltage_type,
3388 u16 nominal_voltage,
3389 u16 *true_voltage)
3390{
3391 u16 min_voltage, max_voltage, voltage_step;
3392
3393 if (radeon_atom_get_max_voltage(rdev, voltage_type, &max_voltage))
3394 return -EINVAL;
3395 if (radeon_atom_get_min_voltage(rdev, voltage_type, &min_voltage))
3396 return -EINVAL;
3397 if (radeon_atom_get_voltage_step(rdev, voltage_type, &voltage_step))
3398 return -EINVAL;
3399
3400 if (nominal_voltage <= min_voltage)
3401 *true_voltage = min_voltage;
3402 else if (nominal_voltage >= max_voltage)
3403 *true_voltage = max_voltage;
3404 else
3405 *true_voltage = min_voltage +
3406 ((nominal_voltage - min_voltage) / voltage_step) *
3407 voltage_step;
3408
3409 return 0;
3410}
3411
3412int radeon_atom_get_voltage_table(struct radeon_device *rdev,
3413 u8 voltage_type, u8 voltage_mode,
3414 struct atom_voltage_table *voltage_table)
3415{
3416 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
3417 u8 frev, crev;
3418 u16 data_offset, size;
3419 int i, ret;
3420 union voltage_object_info *voltage_info;
3421 union voltage_object *voltage_object = NULL;
3422
3423 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3424 &frev, &crev, &data_offset)) {
3425 voltage_info = (union voltage_object_info *)
3426 (rdev->mode_info.atom_context->bios + data_offset);
3427
3428 switch (frev) {
3429 case 1:
3430 case 2:
3431 switch (crev) {
3432 case 1:
3433 DRM_ERROR("old table version %d, %d\n", frev, crev);
3434 return -EINVAL;
3435 case 2:
3436 voltage_object = (union voltage_object *)
3437 atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
3438 if (voltage_object) {
3439 ATOM_VOLTAGE_FORMULA_V2 *formula =
3440 &voltage_object->v2.asFormula;
3441 if (formula->ucNumOfVoltageEntries > MAX_VOLTAGE_ENTRIES)
3442 return -EINVAL;
3443 for (i = 0; i < formula->ucNumOfVoltageEntries; i++) {
3444 voltage_table->entries[i].value =
3445 le16_to_cpu(formula->asVIDAdjustEntries[i].usVoltageValue);
3446 ret = radeon_atom_get_voltage_gpio_settings(rdev,
3447 voltage_table->entries[i].value,
3448 voltage_type,
3449 &voltage_table->entries[i].smio_low,
3450 &voltage_table->mask_low);
3451 if (ret)
3452 return ret;
3453 }
3454 voltage_table->count = formula->ucNumOfVoltageEntries;
3455 return 0;
3456 }
3457 break;
3458 default:
3459 DRM_ERROR("unknown voltage object table\n");
3460 return -EINVAL;
3461 }
3462 break;
3463 case 3:
3464 switch (crev) {
3465 case 1:
3466 voltage_object = (union voltage_object *)
3467 atom_lookup_voltage_object_v3(&voltage_info->v3,
3468 voltage_type, voltage_mode);
3469 if (voltage_object) {
3470 ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio =
3471 &voltage_object->v3.asGpioVoltageObj;
3472 if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES)
3473 return -EINVAL;
3474 for (i = 0; i < gpio->ucGpioEntryNum; i++) {
3475 voltage_table->entries[i].value =
3476 le16_to_cpu(gpio->asVolGpioLut[i].usVoltageValue);
3477 voltage_table->entries[i].smio_low =
3478 le32_to_cpu(gpio->asVolGpioLut[i].ulVoltageId);
3479 }
3480 voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal);
3481 voltage_table->count = gpio->ucGpioEntryNum;
3482 voltage_table->phase_delay = gpio->ucPhaseDelay;
3483 return 0;
3484 }
3485 break;
3486 default:
3487 DRM_ERROR("unknown voltage object table\n");
3488 return -EINVAL;
3489 }
3490 break;
3491 default:
3492 DRM_ERROR("unknown voltage object table\n");
3493 return -EINVAL;
3494 }
3495 }
3496 return -EINVAL;
3497}
3498
3499union vram_info {
3500 struct _ATOM_VRAM_INFO_V3 v1_3;
3501 struct _ATOM_VRAM_INFO_V4 v1_4;
3502 struct _ATOM_VRAM_INFO_HEADER_V2_1 v2_1;
3503};
3504
3505int radeon_atom_get_memory_info(struct radeon_device *rdev,
3506 u8 module_index, struct atom_memory_info *mem_info)
3507{
3508 int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
3509 u8 frev, crev, i;
3510 u16 data_offset, size;
3511 union vram_info *vram_info;
3512 u8 *p;
3513
3514 memset(mem_info, 0, sizeof(struct atom_memory_info));
3515
3516 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3517 &frev, &crev, &data_offset)) {
3518 vram_info = (union vram_info *)
3519 (rdev->mode_info.atom_context->bios + data_offset);
3520 switch (frev) {
3521 case 1:
3522 switch (crev) {
3523 case 3:
3524 /* r6xx */
3525 if (module_index < vram_info->v1_3.ucNumOfVRAMModule) {
3526 ATOM_VRAM_MODULE_V3 *vram_module =
3527 (ATOM_VRAM_MODULE_V3 *)vram_info->v1_3.aVramInfo;
3528 p = (u8 *)vram_info->v1_3.aVramInfo;
3529
3530 for (i = 0; i < module_index; i++) {
3531 vram_module = (ATOM_VRAM_MODULE_V3 *)p;
3532 if (le16_to_cpu(vram_module->usSize) == 0)
3533 return -EINVAL;
3534 p += le16_to_cpu(vram_module->usSize);
3535 }
3536 mem_info->mem_vendor = vram_module->asMemory.ucMemoryVenderID & 0xf;
3537 mem_info->mem_type = vram_module->asMemory.ucMemoryType & 0xf0;
3538 } else
3539 return -EINVAL;
3540 break;
3541 case 4:
3542 /* r7xx, evergreen */
3543 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
3544 ATOM_VRAM_MODULE_V4 *vram_module =
3545 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
3546 p = (u8 *)vram_info->v1_4.aVramInfo;
3547
3548 for (i = 0; i < module_index; i++) {
3549 vram_module = (ATOM_VRAM_MODULE_V4 *)p;
3550 if (le16_to_cpu(vram_module->usModuleSize) == 0)
3551 return -EINVAL;
3552 p += le16_to_cpu(vram_module->usModuleSize);
3553 }
3554 mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
3555 mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
3556 } else
3557 return -EINVAL;
3558 break;
3559 default:
3560 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3561 return -EINVAL;
3562 }
3563 break;
3564 case 2:
3565 switch (crev) {
3566 case 1:
3567 /* ni */
3568 if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
3569 ATOM_VRAM_MODULE_V7 *vram_module =
3570 (ATOM_VRAM_MODULE_V7 *)vram_info->v2_1.aVramInfo;
3571 p = (u8 *)vram_info->v2_1.aVramInfo;
3572
3573 for (i = 0; i < module_index; i++) {
3574 vram_module = (ATOM_VRAM_MODULE_V7 *)p;
3575 if (le16_to_cpu(vram_module->usModuleSize) == 0)
3576 return -EINVAL;
3577 p += le16_to_cpu(vram_module->usModuleSize);
3578 }
3579 mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
3580 mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
3581 } else
3582 return -EINVAL;
3583 break;
3584 default:
3585 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3586 return -EINVAL;
3587 }
3588 break;
3589 default:
3590 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3591 return -EINVAL;
3592 }
3593 return 0;
3594 }
3595 return -EINVAL;
3596}
3597
3598int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
3599 bool gddr5, u8 module_index,
3600 struct atom_memory_clock_range_table *mclk_range_table)
3601{
3602 int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
3603 u8 frev, crev, i;
3604 u16 data_offset, size;
3605 union vram_info *vram_info;
3606 u32 mem_timing_size = gddr5 ?
3607 sizeof(ATOM_MEMORY_TIMING_FORMAT_V2) : sizeof(ATOM_MEMORY_TIMING_FORMAT);
3608 u8 *p;
3609
3610 memset(mclk_range_table, 0, sizeof(struct atom_memory_clock_range_table));
3611
3612 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3613 &frev, &crev, &data_offset)) {
3614 vram_info = (union vram_info *)
3615 (rdev->mode_info.atom_context->bios + data_offset);
3616 switch (frev) {
3617 case 1:
3618 switch (crev) {
3619 case 3:
3620 DRM_ERROR("old table version %d, %d\n", frev, crev);
3621 return -EINVAL;
3622 case 4:
3623 /* r7xx, evergreen */
3624 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
3625 ATOM_VRAM_MODULE_V4 *vram_module =
3626 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
3627 ATOM_MEMORY_TIMING_FORMAT *format;
3628 p = (u8 *)vram_info->v1_4.aVramInfo;
3629
3630 for (i = 0; i < module_index; i++) {
3631 vram_module = (ATOM_VRAM_MODULE_V4 *)p;
3632 if (le16_to_cpu(vram_module->usModuleSize) == 0)
3633 return -EINVAL;
3634 p += le16_to_cpu(vram_module->usModuleSize);
3635 }
3636 mclk_range_table->num_entries = (u8)
3637 ((vram_module->usModuleSize - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) /
3638 mem_timing_size);
3639 p = (u8 *)vram_module->asMemTiming;
3640 for (i = 0; i < mclk_range_table->num_entries; i++) {
3641 format = (ATOM_MEMORY_TIMING_FORMAT *)p;
3642 mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange);
3643 p += mem_timing_size;
3644 }
3645 } else
3646 return -EINVAL;
3647 break;
3648 default:
3649 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3650 return -EINVAL;
3651 }
3652 break;
3653 case 2:
3654 DRM_ERROR("new table version %d, %d\n", frev, crev);
3655 return -EINVAL;
3656 default:
3657 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3658 return -EINVAL;
3659 }
3660 return 0;
3661 }
3662 return -EINVAL;
3663}
3664
3665#define MEM_ID_MASK 0xff000000
3666#define MEM_ID_SHIFT 24
3667#define CLOCK_RANGE_MASK 0x00ffffff
3668#define CLOCK_RANGE_SHIFT 0
3669#define LOW_NIBBLE_MASK 0xf
3670#define DATA_EQU_PREV 0
3671#define DATA_FROM_TABLE 4
3672
3673int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
3674 u8 module_index,
3675 struct atom_mc_reg_table *reg_table)
3676{
3677 int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
3678 u8 frev, crev, num_entries, t_mem_id, num_ranges = 0;
3679 u32 i = 0, j;
3680 u16 data_offset, size;
3681 union vram_info *vram_info;
3682
3683 memset(reg_table, 0, sizeof(struct atom_mc_reg_table));
3684
3685 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3686 &frev, &crev, &data_offset)) {
3687 vram_info = (union vram_info *)
3688 (rdev->mode_info.atom_context->bios + data_offset);
3689 switch (frev) {
3690 case 1:
3691 DRM_ERROR("old table version %d, %d\n", frev, crev);
3692 return -EINVAL;
3693 case 2:
3694 switch (crev) {
3695 case 1:
3696 if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
3697 ATOM_INIT_REG_BLOCK *reg_block =
3698 (ATOM_INIT_REG_BLOCK *)
3699 ((u8 *)vram_info + le16_to_cpu(vram_info->v2_1.usMemClkPatchTblOffset));
3700 ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data =
3701 (ATOM_MEMORY_SETTING_DATA_BLOCK *)
3702 ((u8 *)reg_block + (2 * sizeof(u16)) +
3703 le16_to_cpu(reg_block->usRegIndexTblSize));
3704 num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) /
3705 sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1;
3706 if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE)
3707 return -EINVAL;
3708 while (!(reg_block->asRegIndexBuf[i].ucPreRegDataLength & ACCESS_PLACEHOLDER) &&
3709 (i < num_entries)) {
3710 reg_table->mc_reg_address[i].s1 =
3711 (u16)(le16_to_cpu(reg_block->asRegIndexBuf[i].usRegIndex));
3712 reg_table->mc_reg_address[i].pre_reg_data =
3713 (u8)(reg_block->asRegIndexBuf[i].ucPreRegDataLength);
3714 i++;
3715 }
3716 reg_table->last = i;
3717 while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) &&
3718 (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) {
3719 t_mem_id = (u8)((*(u32 *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT);
3720 if (module_index == t_mem_id) {
3721 reg_table->mc_reg_table_entry[num_ranges].mclk_max =
3722 (u32)((*(u32 *)reg_data & CLOCK_RANGE_MASK) >> CLOCK_RANGE_SHIFT);
3723 for (i = 0, j = 1; i < reg_table->last; i++) {
3724 if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
3725 reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
3726 (u32)*((u32 *)reg_data + j);
3727 j++;
3728 } else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
3729 reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
3730 reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
3731 }
3732 }
3733 num_ranges++;
3734 }
3735 reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
3736 ((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize));
3737 }
3738 if (*(u32 *)reg_data != END_OF_REG_DATA_BLOCK)
3739 return -EINVAL;
3740 reg_table->num_entries = num_ranges;
3741 } else
3742 return -EINVAL;
3743 break;
3744 default:
3745 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3746 return -EINVAL;
3747 }
3748 break;
3749 default:
3750 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
3751 return -EINVAL;
3752 }
3753 return 0;
3754 }
3755 return -EINVAL;
3756}
3757
2905void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) 3758void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
2906{ 3759{
2907 struct radeon_device *rdev = dev->dev_private; 3760 struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 7e265a58141f..13a130fb3517 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -106,7 +106,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
106 radeon_bo_list_add_object(&p->relocs[i].lobj, 106 radeon_bo_list_add_object(&p->relocs[i].lobj,
107 &p->validated); 107 &p->validated);
108 } 108 }
109 return radeon_bo_list_validate(&p->validated, p->ring); 109 return radeon_bo_list_validate(&p->ticket, &p->validated, p->ring);
110} 110}
111 111
112static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) 112static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
@@ -314,15 +314,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
314 * If error is set than unvalidate buffer, otherwise just free memory 314 * If error is set than unvalidate buffer, otherwise just free memory
315 * used by parsing context. 315 * used by parsing context.
316 **/ 316 **/
317static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) 317static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
318{ 318{
319 unsigned i; 319 unsigned i;
320 320
321 if (!error) { 321 if (!error) {
322 ttm_eu_fence_buffer_objects(&parser->validated, 322 ttm_eu_fence_buffer_objects(&parser->ticket,
323 &parser->validated,
323 parser->ib.fence); 324 parser->ib.fence);
324 } else { 325 } else if (backoff) {
325 ttm_eu_backoff_reservation(&parser->validated); 326 ttm_eu_backoff_reservation(&parser->ticket,
327 &parser->validated);
326 } 328 }
327 329
328 if (parser->relocs != NULL) { 330 if (parser->relocs != NULL) {
@@ -535,7 +537,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
535 r = radeon_cs_parser_init(&parser, data); 537 r = radeon_cs_parser_init(&parser, data);
536 if (r) { 538 if (r) {
537 DRM_ERROR("Failed to initialize parser !\n"); 539 DRM_ERROR("Failed to initialize parser !\n");
538 radeon_cs_parser_fini(&parser, r); 540 radeon_cs_parser_fini(&parser, r, false);
539 up_read(&rdev->exclusive_lock); 541 up_read(&rdev->exclusive_lock);
540 r = radeon_cs_handle_lockup(rdev, r); 542 r = radeon_cs_handle_lockup(rdev, r);
541 return r; 543 return r;
@@ -544,12 +546,13 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
544 if (r) { 546 if (r) {
545 if (r != -ERESTARTSYS) 547 if (r != -ERESTARTSYS)
546 DRM_ERROR("Failed to parse relocation %d!\n", r); 548 DRM_ERROR("Failed to parse relocation %d!\n", r);
547 radeon_cs_parser_fini(&parser, r); 549 radeon_cs_parser_fini(&parser, r, false);
548 up_read(&rdev->exclusive_lock); 550 up_read(&rdev->exclusive_lock);
549 r = radeon_cs_handle_lockup(rdev, r); 551 r = radeon_cs_handle_lockup(rdev, r);
550 return r; 552 return r;
551 } 553 }
552 554
555 /* XXX pick SD/HD/MVC */
553 if (parser.ring == R600_RING_TYPE_UVD_INDEX) 556 if (parser.ring == R600_RING_TYPE_UVD_INDEX)
554 radeon_uvd_note_usage(rdev); 557 radeon_uvd_note_usage(rdev);
555 558
@@ -562,7 +565,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
562 goto out; 565 goto out;
563 } 566 }
564out: 567out:
565 radeon_cs_parser_fini(&parser, r); 568 radeon_cs_parser_fini(&parser, r, true);
566 up_read(&rdev->exclusive_lock); 569 up_read(&rdev->exclusive_lock);
567 r = radeon_cs_handle_lockup(rdev, r); 570 r = radeon_cs_handle_lockup(rdev, r);
568 return r; 571 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index b097d5b4ff39..9630e8d95fb4 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -27,9 +27,6 @@
27#include <drm/radeon_drm.h> 27#include <drm/radeon_drm.h>
28#include "radeon.h" 28#include "radeon.h"
29 29
30#define CURSOR_WIDTH 64
31#define CURSOR_HEIGHT 64
32
33static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock) 30static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
34{ 31{
35 struct radeon_device *rdev = crtc->dev->dev_private; 32 struct radeon_device *rdev = crtc->dev->dev_private;
@@ -167,7 +164,8 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
167 goto unpin; 164 goto unpin;
168 } 165 }
169 166
170 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { 167 if ((width > radeon_crtc->max_cursor_width) ||
168 (height > radeon_crtc->max_cursor_height)) {
171 DRM_ERROR("bad cursor width or height %d x %d\n", width, height); 169 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
172 return -EINVAL; 170 return -EINVAL;
173 } 171 }
@@ -233,11 +231,11 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
233 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 231 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
234 232
235 if (x < 0) { 233 if (x < 0) {
236 xorigin = min(-x, CURSOR_WIDTH - 1); 234 xorigin = min(-x, radeon_crtc->max_cursor_width - 1);
237 x = 0; 235 x = 0;
238 } 236 }
239 if (y < 0) { 237 if (y < 0) {
240 yorigin = min(-y, CURSOR_HEIGHT - 1); 238 yorigin = min(-y, radeon_crtc->max_cursor_height - 1);
241 y = 0; 239 y = 0;
242 } 240 }
243 241
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b0dc0b6cb4e0..82335e38ec4f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -95,6 +95,9 @@ static const char radeon_family_name[][16] = {
95 "VERDE", 95 "VERDE",
96 "OLAND", 96 "OLAND",
97 "HAINAN", 97 "HAINAN",
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
98 "LAST", 101 "LAST",
99}; 102};
100 103
@@ -229,6 +232,94 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
229} 232}
230 233
231/* 234/*
235 * GPU doorbell aperture helpers function.
236 */
237/**
238 * radeon_doorbell_init - Init doorbell driver information.
239 *
240 * @rdev: radeon_device pointer
241 *
242 * Init doorbell driver information (CIK)
243 * Returns 0 on success, error on failure.
244 */
245int radeon_doorbell_init(struct radeon_device *rdev)
246{
247 int i;
248
249 /* doorbell bar mapping */
250 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
251 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
252
253 /* limit to 4 MB for now */
254 if (rdev->doorbell.size > (4 * 1024 * 1024))
255 rdev->doorbell.size = 4 * 1024 * 1024;
256
257 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size);
258 if (rdev->doorbell.ptr == NULL) {
259 return -ENOMEM;
260 }
261 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
262 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
263
264 rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE;
265
266 for (i = 0; i < rdev->doorbell.num_pages; i++) {
267 rdev->doorbell.free[i] = true;
268 }
269 return 0;
270}
271
272/**
273 * radeon_doorbell_fini - Tear down doorbell driver information.
274 *
275 * @rdev: radeon_device pointer
276 *
277 * Tear down doorbell driver information (CIK)
278 */
279void radeon_doorbell_fini(struct radeon_device *rdev)
280{
281 iounmap(rdev->doorbell.ptr);
282 rdev->doorbell.ptr = NULL;
283}
284
285/**
286 * radeon_doorbell_get - Allocate a doorbell page
287 *
288 * @rdev: radeon_device pointer
289 * @doorbell: doorbell page number
290 *
291 * Allocate a doorbell page for use by the driver (all asics).
292 * Returns 0 on success or -EINVAL on failure.
293 */
294int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
295{
296 int i;
297
298 for (i = 0; i < rdev->doorbell.num_pages; i++) {
299 if (rdev->doorbell.free[i]) {
300 rdev->doorbell.free[i] = false;
301 *doorbell = i;
302 return 0;
303 }
304 }
305 return -EINVAL;
306}
307
308/**
309 * radeon_doorbell_free - Free a doorbell page
310 *
311 * @rdev: radeon_device pointer
312 * @doorbell: doorbell page number
313 *
314 * Free a doorbell page allocated for use by the driver (all asics)
315 */
316void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
317{
318 if (doorbell < rdev->doorbell.num_pages)
319 rdev->doorbell.free[doorbell] = true;
320}
321
322/*
232 * radeon_wb_*() 323 * radeon_wb_*()
233 * Writeback is the the method by which the the GPU updates special pages 324 * Writeback is the the method by which the the GPU updates special pages
234 * in memory with the status of certain GPU events (fences, ring pointers, 325 * in memory with the status of certain GPU events (fences, ring pointers,
@@ -1145,8 +1236,13 @@ int radeon_device_init(struct radeon_device *rdev,
1145 /* Registers mapping */ 1236 /* Registers mapping */
1146 /* TODO: block userspace mapping of io register */ 1237 /* TODO: block userspace mapping of io register */
1147 spin_lock_init(&rdev->mmio_idx_lock); 1238 spin_lock_init(&rdev->mmio_idx_lock);
1148 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); 1239 if (rdev->family >= CHIP_BONAIRE) {
1149 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); 1240 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1241 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1242 } else {
1243 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1244 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1245 }
1150 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 1246 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1151 if (rdev->rmmio == NULL) { 1247 if (rdev->rmmio == NULL) {
1152 return -ENOMEM; 1248 return -ENOMEM;
@@ -1154,6 +1250,10 @@ int radeon_device_init(struct radeon_device *rdev,
1154 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 1250 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1155 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 1251 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1156 1252
1253 /* doorbell bar mapping */
1254 if (rdev->family >= CHIP_BONAIRE)
1255 radeon_doorbell_init(rdev);
1256
1157 /* io port mapping */ 1257 /* io port mapping */
1158 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 1258 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1159 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { 1259 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
@@ -1231,6 +1331,8 @@ void radeon_device_fini(struct radeon_device *rdev)
1231 rdev->rio_mem = NULL; 1331 rdev->rio_mem = NULL;
1232 iounmap(rdev->rmmio); 1332 iounmap(rdev->rmmio);
1233 rdev->rmmio = NULL; 1333 rdev->rmmio = NULL;
1334 if (rdev->family >= CHIP_BONAIRE)
1335 radeon_doorbell_fini(rdev);
1234 radeon_debugfs_remove_files(rdev); 1336 radeon_debugfs_remove_files(rdev);
1235} 1337}
1236 1338
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index eb18bb7af1cc..c2b67b4e1ac2 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -153,7 +153,13 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
153 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); 153 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
154 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 154 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
155 WREG32(0x6940 + radeon_crtc->crtc_offset, 0); 155 WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
156 156 if (ASIC_IS_DCE8(rdev)) {
157 /* XXX this only needs to be programmed once per crtc at startup,
158 * not sure where the best place for it is
159 */
160 WREG32(CIK_ALPHA_CONTROL + radeon_crtc->crtc_offset,
161 CIK_CURSOR_ALPHA_BLND_ENA);
162 }
157} 163}
158 164
159static void legacy_crtc_load_lut(struct drm_crtc *crtc) 165static void legacy_crtc_load_lut(struct drm_crtc *crtc)
@@ -512,6 +518,14 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
512 radeon_crtc->crtc_id = index; 518 radeon_crtc->crtc_id = index;
513 rdev->mode_info.crtcs[index] = radeon_crtc; 519 rdev->mode_info.crtcs[index] = radeon_crtc;
514 520
521 if (rdev->family >= CHIP_BONAIRE) {
522 radeon_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
523 radeon_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
524 } else {
525 radeon_crtc->max_cursor_width = CURSOR_WIDTH;
526 radeon_crtc->max_cursor_height = CURSOR_HEIGHT;
527 }
528
515#if 0 529#if 0
516 radeon_crtc->mode_set.crtc = &radeon_crtc->base; 530 radeon_crtc->mode_set.crtc = &radeon_crtc->base;
517 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); 531 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
@@ -530,7 +544,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
530 radeon_legacy_init_crtc(dev, radeon_crtc); 544 radeon_legacy_init_crtc(dev, radeon_crtc);
531} 545}
532 546
533static const char *encoder_names[37] = { 547static const char *encoder_names[38] = {
534 "NONE", 548 "NONE",
535 "INTERNAL_LVDS", 549 "INTERNAL_LVDS",
536 "INTERNAL_TMDS1", 550 "INTERNAL_TMDS1",
@@ -567,7 +581,8 @@ static const char *encoder_names[37] = {
567 "INTERNAL_UNIPHY2", 581 "INTERNAL_UNIPHY2",
568 "NUTMEG", 582 "NUTMEG",
569 "TRAVIS", 583 "TRAVIS",
570 "INTERNAL_VCE" 584 "INTERNAL_VCE",
585 "INTERNAL_UNIPHY3",
571}; 586};
572 587
573static const char *hpd_names[6] = { 588static const char *hpd_names[6] = {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 094e7e5ea39e..e5419b350170 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -74,9 +74,10 @@
74 * 2.31.0 - Add fastfb support for rs690 74 * 2.31.0 - Add fastfb support for rs690
75 * 2.32.0 - new info request for rings working 75 * 2.32.0 - new info request for rings working
76 * 2.33.0 - Add SI tiling mode array query 76 * 2.33.0 - Add SI tiling mode array query
77 * 2.34.0 - Add CIK tiling mode array query
77 */ 78 */
78#define KMS_DRIVER_MAJOR 2 79#define KMS_DRIVER_MAJOR 2
79#define KMS_DRIVER_MINOR 33 80#define KMS_DRIVER_MINOR 34
80#define KMS_DRIVER_PATCHLEVEL 0 81#define KMS_DRIVER_PATCHLEVEL 0
81int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 82int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
82int radeon_driver_unload_kms(struct drm_device *dev); 83int radeon_driver_unload_kms(struct drm_device *dev);
@@ -127,6 +128,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
127 size_t size, 128 size_t size,
128 struct sg_table *sg); 129 struct sg_table *sg);
129int radeon_gem_prime_pin(struct drm_gem_object *obj); 130int radeon_gem_prime_pin(struct drm_gem_object *obj);
131void radeon_gem_prime_unpin(struct drm_gem_object *obj);
130void *radeon_gem_prime_vmap(struct drm_gem_object *obj); 132void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
131void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 133void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
132extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, 134extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -164,6 +166,7 @@ int radeon_pcie_gen2 = -1;
164int radeon_msi = -1; 166int radeon_msi = -1;
165int radeon_lockup_timeout = 10000; 167int radeon_lockup_timeout = 10000;
166int radeon_fastfb = 0; 168int radeon_fastfb = 0;
169int radeon_dpm = -1;
167 170
168MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 171MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
169module_param_named(no_wb, radeon_no_wb, int, 0444); 172module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -219,6 +222,9 @@ module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
219MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)"); 222MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)");
220module_param_named(fastfb, radeon_fastfb, int, 0444); 223module_param_named(fastfb, radeon_fastfb, int, 0444);
221 224
225MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
226module_param_named(dpm, radeon_dpm, int, 0444);
227
222static struct pci_device_id pciidlist[] = { 228static struct pci_device_id pciidlist[] = {
223 radeon_PCI_IDS 229 radeon_PCI_IDS
224}; 230};
@@ -422,6 +428,7 @@ static struct drm_driver kms_driver = {
422 .gem_prime_export = drm_gem_prime_export, 428 .gem_prime_export = drm_gem_prime_export,
423 .gem_prime_import = drm_gem_prime_import, 429 .gem_prime_import = drm_gem_prime_import,
424 .gem_prime_pin = radeon_gem_prime_pin, 430 .gem_prime_pin = radeon_gem_prime_pin,
431 .gem_prime_unpin = radeon_gem_prime_unpin,
425 .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table, 432 .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
426 .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table, 433 .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
427 .gem_prime_vmap = radeon_gem_prime_vmap, 434 .gem_prime_vmap = radeon_gem_prime_vmap,
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 36e9803b077d..3c8289083f9d 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -93,6 +93,9 @@ enum radeon_family {
93 CHIP_VERDE, 93 CHIP_VERDE,
94 CHIP_OLAND, 94 CHIP_OLAND,
95 CHIP_HAINAN, 95 CHIP_HAINAN,
96 CHIP_BONAIRE,
97 CHIP_KAVERI,
98 CHIP_KABINI,
96 CHIP_LAST, 99 CHIP_LAST,
97}; 100};
98 101
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 5a99d433fc35..bcdefd1dcd43 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -82,6 +82,23 @@ static void radeon_hotplug_work_func(struct work_struct *work)
82} 82}
83 83
84/** 84/**
85 * radeon_irq_reset_work_func - execute gpu reset
86 *
87 * @work: work struct
88 *
89 * Execute scheduled gpu reset (cayman+).
90 * This function is called when the irq handler
91 * thinks we need a gpu reset.
92 */
93static void radeon_irq_reset_work_func(struct work_struct *work)
94{
95 struct radeon_device *rdev = container_of(work, struct radeon_device,
96 reset_work);
97
98 radeon_gpu_reset(rdev);
99}
100
101/**
85 * radeon_driver_irq_preinstall_kms - drm irq preinstall callback 102 * radeon_driver_irq_preinstall_kms - drm irq preinstall callback
86 * 103 *
87 * @dev: drm dev pointer 104 * @dev: drm dev pointer
@@ -99,6 +116,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
99 /* Disable *all* interrupts */ 116 /* Disable *all* interrupts */
100 for (i = 0; i < RADEON_NUM_RINGS; i++) 117 for (i = 0; i < RADEON_NUM_RINGS; i++)
101 atomic_set(&rdev->irq.ring_int[i], 0); 118 atomic_set(&rdev->irq.ring_int[i], 0);
119 rdev->irq.dpm_thermal = false;
102 for (i = 0; i < RADEON_MAX_HPD_PINS; i++) 120 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
103 rdev->irq.hpd[i] = false; 121 rdev->irq.hpd[i] = false;
104 for (i = 0; i < RADEON_MAX_CRTCS; i++) { 122 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
@@ -146,6 +164,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
146 /* Disable *all* interrupts */ 164 /* Disable *all* interrupts */
147 for (i = 0; i < RADEON_NUM_RINGS; i++) 165 for (i = 0; i < RADEON_NUM_RINGS; i++)
148 atomic_set(&rdev->irq.ring_int[i], 0); 166 atomic_set(&rdev->irq.ring_int[i], 0);
167 rdev->irq.dpm_thermal = false;
149 for (i = 0; i < RADEON_MAX_HPD_PINS; i++) 168 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
150 rdev->irq.hpd[i] = false; 169 rdev->irq.hpd[i] = false;
151 for (i = 0; i < RADEON_MAX_CRTCS; i++) { 170 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
@@ -243,6 +262,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
243 262
244 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); 263 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
245 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); 264 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
265 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
246 266
247 spin_lock_init(&rdev->irq.lock); 267 spin_lock_init(&rdev->irq.lock);
248 r = drm_vblank_init(rdev->ddev, rdev->num_crtc); 268 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 4f2d4f4c1dab..49ff3d1a6102 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -229,7 +229,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
229 *value = rdev->accel_working; 229 *value = rdev->accel_working;
230 break; 230 break;
231 case RADEON_INFO_TILING_CONFIG: 231 case RADEON_INFO_TILING_CONFIG:
232 if (rdev->family >= CHIP_TAHITI) 232 if (rdev->family >= CHIP_BONAIRE)
233 *value = rdev->config.cik.tile_config;
234 else if (rdev->family >= CHIP_TAHITI)
233 *value = rdev->config.si.tile_config; 235 *value = rdev->config.si.tile_config;
234 else if (rdev->family >= CHIP_CAYMAN) 236 else if (rdev->family >= CHIP_CAYMAN)
235 *value = rdev->config.cayman.tile_config; 237 *value = rdev->config.cayman.tile_config;
@@ -281,7 +283,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
281 *value = rdev->clock.spll.reference_freq * 10; 283 *value = rdev->clock.spll.reference_freq * 10;
282 break; 284 break;
283 case RADEON_INFO_NUM_BACKENDS: 285 case RADEON_INFO_NUM_BACKENDS:
284 if (rdev->family >= CHIP_TAHITI) 286 if (rdev->family >= CHIP_BONAIRE)
287 *value = rdev->config.cik.max_backends_per_se *
288 rdev->config.cik.max_shader_engines;
289 else if (rdev->family >= CHIP_TAHITI)
285 *value = rdev->config.si.max_backends_per_se * 290 *value = rdev->config.si.max_backends_per_se *
286 rdev->config.si.max_shader_engines; 291 rdev->config.si.max_shader_engines;
287 else if (rdev->family >= CHIP_CAYMAN) 292 else if (rdev->family >= CHIP_CAYMAN)
@@ -298,7 +303,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
298 } 303 }
299 break; 304 break;
300 case RADEON_INFO_NUM_TILE_PIPES: 305 case RADEON_INFO_NUM_TILE_PIPES:
301 if (rdev->family >= CHIP_TAHITI) 306 if (rdev->family >= CHIP_BONAIRE)
307 *value = rdev->config.cik.max_tile_pipes;
308 else if (rdev->family >= CHIP_TAHITI)
302 *value = rdev->config.si.max_tile_pipes; 309 *value = rdev->config.si.max_tile_pipes;
303 else if (rdev->family >= CHIP_CAYMAN) 310 else if (rdev->family >= CHIP_CAYMAN)
304 *value = rdev->config.cayman.max_tile_pipes; 311 *value = rdev->config.cayman.max_tile_pipes;
@@ -316,7 +323,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
316 *value = 1; 323 *value = 1;
317 break; 324 break;
318 case RADEON_INFO_BACKEND_MAP: 325 case RADEON_INFO_BACKEND_MAP:
319 if (rdev->family >= CHIP_TAHITI) 326 if (rdev->family >= CHIP_BONAIRE)
327 return -EINVAL;
328 else if (rdev->family >= CHIP_TAHITI)
320 *value = rdev->config.si.backend_map; 329 *value = rdev->config.si.backend_map;
321 else if (rdev->family >= CHIP_CAYMAN) 330 else if (rdev->family >= CHIP_CAYMAN)
322 *value = rdev->config.cayman.backend_map; 331 *value = rdev->config.cayman.backend_map;
@@ -343,7 +352,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
343 *value = RADEON_IB_VM_MAX_SIZE; 352 *value = RADEON_IB_VM_MAX_SIZE;
344 break; 353 break;
345 case RADEON_INFO_MAX_PIPES: 354 case RADEON_INFO_MAX_PIPES:
346 if (rdev->family >= CHIP_TAHITI) 355 if (rdev->family >= CHIP_BONAIRE)
356 *value = rdev->config.cik.max_cu_per_sh;
357 else if (rdev->family >= CHIP_TAHITI)
347 *value = rdev->config.si.max_cu_per_sh; 358 *value = rdev->config.si.max_cu_per_sh;
348 else if (rdev->family >= CHIP_CAYMAN) 359 else if (rdev->family >= CHIP_CAYMAN)
349 *value = rdev->config.cayman.max_pipes_per_simd; 360 *value = rdev->config.cayman.max_pipes_per_simd;
@@ -367,7 +378,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
367 value64 = radeon_get_gpu_clock_counter(rdev); 378 value64 = radeon_get_gpu_clock_counter(rdev);
368 break; 379 break;
369 case RADEON_INFO_MAX_SE: 380 case RADEON_INFO_MAX_SE:
370 if (rdev->family >= CHIP_TAHITI) 381 if (rdev->family >= CHIP_BONAIRE)
382 *value = rdev->config.cik.max_shader_engines;
383 else if (rdev->family >= CHIP_TAHITI)
371 *value = rdev->config.si.max_shader_engines; 384 *value = rdev->config.si.max_shader_engines;
372 else if (rdev->family >= CHIP_CAYMAN) 385 else if (rdev->family >= CHIP_CAYMAN)
373 *value = rdev->config.cayman.max_shader_engines; 386 *value = rdev->config.cayman.max_shader_engines;
@@ -377,7 +390,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
377 *value = 1; 390 *value = 1;
378 break; 391 break;
379 case RADEON_INFO_MAX_SH_PER_SE: 392 case RADEON_INFO_MAX_SH_PER_SE:
380 if (rdev->family >= CHIP_TAHITI) 393 if (rdev->family >= CHIP_BONAIRE)
394 *value = rdev->config.cik.max_sh_per_se;
395 else if (rdev->family >= CHIP_TAHITI)
381 *value = rdev->config.si.max_sh_per_se; 396 *value = rdev->config.si.max_sh_per_se;
382 else 397 else
383 return -EINVAL; 398 return -EINVAL;
@@ -407,12 +422,16 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
407 } 422 }
408 break; 423 break;
409 case RADEON_INFO_SI_TILE_MODE_ARRAY: 424 case RADEON_INFO_SI_TILE_MODE_ARRAY:
410 if (rdev->family < CHIP_TAHITI) { 425 if (rdev->family >= CHIP_BONAIRE) {
411 DRM_DEBUG_KMS("tile mode array is si only!\n"); 426 value = rdev->config.cik.tile_mode_array;
427 value_size = sizeof(uint32_t)*32;
428 } else if (rdev->family >= CHIP_TAHITI) {
429 value = rdev->config.si.tile_mode_array;
430 value_size = sizeof(uint32_t)*32;
431 } else {
432 DRM_DEBUG_KMS("tile mode array is si+ only!\n");
412 return -EINVAL; 433 return -EINVAL;
413 } 434 }
414 value = rdev->config.si.tile_mode_array;
415 value_size = sizeof(uint32_t)*32;
416 break; 435 break;
417 default: 436 default:
418 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 437 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 69ad4fe224c1..b568cb19a7fa 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -307,6 +307,8 @@ struct radeon_crtc {
307 uint64_t cursor_addr; 307 uint64_t cursor_addr;
308 int cursor_width; 308 int cursor_width;
309 int cursor_height; 309 int cursor_height;
310 int max_cursor_width;
311 int max_cursor_height;
310 uint32_t legacy_display_base_addr; 312 uint32_t legacy_display_base_addr;
311 uint32_t legacy_cursor_offset; 313 uint32_t legacy_cursor_offset;
312 enum radeon_rmx_type rmx_type; 314 enum radeon_rmx_type rmx_type;
@@ -329,6 +331,10 @@ struct radeon_crtc {
329 u32 pll_flags; 331 u32 pll_flags;
330 struct drm_encoder *encoder; 332 struct drm_encoder *encoder;
331 struct drm_connector *connector; 333 struct drm_connector *connector;
334 /* for dpm */
335 u32 line_time;
336 u32 wm_low;
337 u32 wm_high;
332}; 338};
333 339
334struct radeon_encoder_primary_dac { 340struct radeon_encoder_primary_dac {
@@ -512,12 +518,99 @@ struct atom_clock_dividers {
512 bool enable_dithen; 518 bool enable_dithen;
513 u32 vco_mode; 519 u32 vco_mode;
514 u32 real_clock; 520 u32 real_clock;
521 /* added for CI */
522 u32 post_divider;
523 u32 flags;
524};
525
526struct atom_mpll_param {
527 union {
528 struct {
529#ifdef __BIG_ENDIAN
530 u32 reserved : 8;
531 u32 clkfrac : 12;
532 u32 clkf : 12;
533#else
534 u32 clkf : 12;
535 u32 clkfrac : 12;
536 u32 reserved : 8;
537#endif
538 };
539 u32 fb_div;
540 };
541 u32 post_div;
542 u32 bwcntl;
543 u32 dll_speed;
544 u32 vco_mode;
545 u32 yclk_sel;
546 u32 qdr;
547 u32 half_rate;
548};
549
550#define MEM_TYPE_GDDR5 0x50
551#define MEM_TYPE_GDDR4 0x40
552#define MEM_TYPE_GDDR3 0x30
553#define MEM_TYPE_DDR2 0x20
554#define MEM_TYPE_GDDR1 0x10
555#define MEM_TYPE_DDR3 0xb0
556#define MEM_TYPE_MASK 0xf0
557
558struct atom_memory_info {
559 u8 mem_vendor;
560 u8 mem_type;
561};
562
563#define MAX_AC_TIMING_ENTRIES 16
564
565struct atom_memory_clock_range_table
566{
567 u8 num_entries;
568 u8 rsv[3];
569 u32 mclk[MAX_AC_TIMING_ENTRIES];
570};
571
572#define VBIOS_MC_REGISTER_ARRAY_SIZE 32
573#define VBIOS_MAX_AC_TIMING_ENTRIES 20
574
575struct atom_mc_reg_entry {
576 u32 mclk_max;
577 u32 mc_data[VBIOS_MC_REGISTER_ARRAY_SIZE];
578};
579
580struct atom_mc_register_address {
581 u16 s1;
582 u8 pre_reg_data;
583};
584
585struct atom_mc_reg_table {
586 u8 last;
587 u8 num_entries;
588 struct atom_mc_reg_entry mc_reg_table_entry[VBIOS_MAX_AC_TIMING_ENTRIES];
589 struct atom_mc_register_address mc_reg_address[VBIOS_MC_REGISTER_ARRAY_SIZE];
590};
591
592#define MAX_VOLTAGE_ENTRIES 32
593
594struct atom_voltage_table_entry
595{
596 u16 value;
597 u32 smio_low;
598};
599
600struct atom_voltage_table
601{
602 u32 count;
603 u32 mask_low;
604 u32 phase_delay;
605 struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
515}; 606};
516 607
517extern enum radeon_tv_std 608extern enum radeon_tv_std
518radeon_combios_get_tv_info(struct radeon_device *rdev); 609radeon_combios_get_tv_info(struct radeon_device *rdev);
519extern enum radeon_tv_std 610extern enum radeon_tv_std
520radeon_atombios_get_tv_info(struct radeon_device *rdev); 611radeon_atombios_get_tv_info(struct radeon_device *rdev);
612extern void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
613 u16 *vddc, u16 *vddci, u16 *mvdd);
521 614
522extern struct drm_connector * 615extern struct drm_connector *
523radeon_get_connector_for_encoder(struct drm_encoder *encoder); 616radeon_get_connector_for_encoder(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1424ccde2377..0219d263e2df 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -322,8 +322,8 @@ int radeon_bo_init(struct radeon_device *rdev)
322{ 322{
323 /* Add an MTRR for the VRAM */ 323 /* Add an MTRR for the VRAM */
324 if (!rdev->fastfb_working) { 324 if (!rdev->fastfb_working) {
325 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, 325 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
326 MTRR_TYPE_WRCOMB, 1); 326 rdev->mc.aper_size);
327 } 327 }
328 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 328 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
329 rdev->mc.mc_vram_size >> 20, 329 rdev->mc.mc_vram_size >> 20,
@@ -336,6 +336,7 @@ int radeon_bo_init(struct radeon_device *rdev)
336void radeon_bo_fini(struct radeon_device *rdev) 336void radeon_bo_fini(struct radeon_device *rdev)
337{ 337{
338 radeon_ttm_fini(rdev); 338 radeon_ttm_fini(rdev);
339 arch_phys_wc_del(rdev->mc.vram_mtrr);
339} 340}
340 341
341void radeon_bo_list_add_object(struct radeon_bo_list *lobj, 342void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
@@ -348,14 +349,15 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
348 } 349 }
349} 350}
350 351
351int radeon_bo_list_validate(struct list_head *head, int ring) 352int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
353 struct list_head *head, int ring)
352{ 354{
353 struct radeon_bo_list *lobj; 355 struct radeon_bo_list *lobj;
354 struct radeon_bo *bo; 356 struct radeon_bo *bo;
355 u32 domain; 357 u32 domain;
356 int r; 358 int r;
357 359
358 r = ttm_eu_reserve_buffers(head); 360 r = ttm_eu_reserve_buffers(ticket, head);
359 if (unlikely(r != 0)) { 361 if (unlikely(r != 0)) {
360 return r; 362 return r;
361 } 363 }
@@ -398,7 +400,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
398 int steal; 400 int steal;
399 int i; 401 int i;
400 402
401 BUG_ON(!radeon_bo_is_reserved(bo)); 403 lockdep_assert_held(&bo->tbo.resv->lock.base);
402 404
403 if (!bo->tiling_flags) 405 if (!bo->tiling_flags)
404 return 0; 406 return 0;
@@ -524,7 +526,8 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
524 uint32_t *tiling_flags, 526 uint32_t *tiling_flags,
525 uint32_t *pitch) 527 uint32_t *pitch)
526{ 528{
527 BUG_ON(!radeon_bo_is_reserved(bo)); 529 lockdep_assert_held(&bo->tbo.resv->lock.base);
530
528 if (tiling_flags) 531 if (tiling_flags)
529 *tiling_flags = bo->tiling_flags; 532 *tiling_flags = bo->tiling_flags;
530 if (pitch) 533 if (pitch)
@@ -534,7 +537,8 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
534int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, 537int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
535 bool force_drop) 538 bool force_drop)
536{ 539{
537 BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop); 540 if (!force_drop)
541 lockdep_assert_held(&bo->tbo.resv->lock.base);
538 542
539 if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) 543 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
540 return 0; 544 return 0;
@@ -617,26 +621,3 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
617 ttm_bo_unreserve(&bo->tbo); 621 ttm_bo_unreserve(&bo->tbo);
618 return r; 622 return r;
619} 623}
620
621
622/**
623 * radeon_bo_reserve - reserve bo
624 * @bo: bo structure
625 * @no_intr: don't return -ERESTARTSYS on pending signal
626 *
627 * Returns:
628 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
629 * a signal. Release all buffer reservations and return to user-space.
630 */
631int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
632{
633 int r;
634
635 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
636 if (unlikely(r != 0)) {
637 if (r != -ERESTARTSYS)
638 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
639 return r;
640 }
641 return 0;
642}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index e2cb80a96b51..91519a5622b4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -52,7 +52,27 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
52 return 0; 52 return 0;
53} 53}
54 54
55int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr); 55/**
56 * radeon_bo_reserve - reserve bo
57 * @bo: bo structure
58 * @no_intr: don't return -ERESTARTSYS on pending signal
59 *
60 * Returns:
61 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
62 * a signal. Release all buffer reservations and return to user-space.
63 */
64static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
65{
66 int r;
67
68 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
69 if (unlikely(r != 0)) {
70 if (r != -ERESTARTSYS)
71 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
72 return r;
73 }
74 return 0;
75}
56 76
57static inline void radeon_bo_unreserve(struct radeon_bo *bo) 77static inline void radeon_bo_unreserve(struct radeon_bo *bo)
58{ 78{
@@ -78,11 +98,6 @@ static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
78 return bo->tbo.num_pages << PAGE_SHIFT; 98 return bo->tbo.num_pages << PAGE_SHIFT;
79} 99}
80 100
81static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
82{
83 return ttm_bo_is_reserved(&bo->tbo);
84}
85
86static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo) 101static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
87{ 102{
88 return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; 103 return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
@@ -128,7 +143,8 @@ extern int radeon_bo_init(struct radeon_device *rdev);
128extern void radeon_bo_fini(struct radeon_device *rdev); 143extern void radeon_bo_fini(struct radeon_device *rdev);
129extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, 144extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
130 struct list_head *head); 145 struct list_head *head);
131extern int radeon_bo_list_validate(struct list_head *head, int ring); 146extern int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
147 struct list_head *head, int ring);
132extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 148extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
133 struct vm_area_struct *vma); 149 struct vm_area_struct *vma);
134extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 150extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 788c64cb4b47..ebbdb477745a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -388,7 +388,8 @@ static ssize_t radeon_get_pm_method(struct device *dev,
388 int pm = rdev->pm.pm_method; 388 int pm = rdev->pm.pm_method;
389 389
390 return snprintf(buf, PAGE_SIZE, "%s\n", 390 return snprintf(buf, PAGE_SIZE, "%s\n",
391 (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile"); 391 (pm == PM_METHOD_DYNPM) ? "dynpm" :
392 (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
392} 393}
393 394
394static ssize_t radeon_set_pm_method(struct device *dev, 395static ssize_t radeon_set_pm_method(struct device *dev,
@@ -399,6 +400,11 @@ static ssize_t radeon_set_pm_method(struct device *dev,
399 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 400 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
400 struct radeon_device *rdev = ddev->dev_private; 401 struct radeon_device *rdev = ddev->dev_private;
401 402
403 /* we don't support the legacy modes with dpm */
404 if (rdev->pm.pm_method == PM_METHOD_DPM) {
405 count = -EINVAL;
406 goto fail;
407 }
402 408
403 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { 409 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
404 mutex_lock(&rdev->pm.mutex); 410 mutex_lock(&rdev->pm.mutex);
@@ -423,8 +429,48 @@ fail:
423 return count; 429 return count;
424} 430}
425 431
432static ssize_t radeon_get_dpm_state(struct device *dev,
433 struct device_attribute *attr,
434 char *buf)
435{
436 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
437 struct radeon_device *rdev = ddev->dev_private;
438 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
439
440 return snprintf(buf, PAGE_SIZE, "%s\n",
441 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
442 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
443}
444
445static ssize_t radeon_set_dpm_state(struct device *dev,
446 struct device_attribute *attr,
447 const char *buf,
448 size_t count)
449{
450 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
451 struct radeon_device *rdev = ddev->dev_private;
452
453 mutex_lock(&rdev->pm.mutex);
454 if (strncmp("battery", buf, strlen("battery")) == 0)
455 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
456 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
457 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
458 else if (strncmp("performance", buf, strlen("performance")) == 0)
459 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
460 else {
461 mutex_unlock(&rdev->pm.mutex);
462 count = -EINVAL;
463 goto fail;
464 }
465 mutex_unlock(&rdev->pm.mutex);
466 radeon_pm_compute_clocks(rdev);
467fail:
468 return count;
469}
470
426static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); 471static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
427static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); 472static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
473static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
428 474
429static ssize_t radeon_hwmon_show_temp(struct device *dev, 475static ssize_t radeon_hwmon_show_temp(struct device *dev,
430 struct device_attribute *attr, 476 struct device_attribute *attr,
@@ -434,27 +480,10 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
434 struct radeon_device *rdev = ddev->dev_private; 480 struct radeon_device *rdev = ddev->dev_private;
435 int temp; 481 int temp;
436 482
437 switch (rdev->pm.int_thermal_type) { 483 if (rdev->asic->pm.get_temperature)
438 case THERMAL_TYPE_RV6XX: 484 temp = radeon_get_temperature(rdev);
439 temp = rv6xx_get_temp(rdev); 485 else
440 break;
441 case THERMAL_TYPE_RV770:
442 temp = rv770_get_temp(rdev);
443 break;
444 case THERMAL_TYPE_EVERGREEN:
445 case THERMAL_TYPE_NI:
446 temp = evergreen_get_temp(rdev);
447 break;
448 case THERMAL_TYPE_SUMO:
449 temp = sumo_get_temp(rdev);
450 break;
451 case THERMAL_TYPE_SI:
452 temp = si_get_temp(rdev);
453 break;
454 default:
455 temp = 0; 486 temp = 0;
456 break;
457 }
458 487
459 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 488 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
460} 489}
@@ -492,8 +521,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
492 case THERMAL_TYPE_NI: 521 case THERMAL_TYPE_NI:
493 case THERMAL_TYPE_SUMO: 522 case THERMAL_TYPE_SUMO:
494 case THERMAL_TYPE_SI: 523 case THERMAL_TYPE_SI:
495 /* No support for TN yet */ 524 if (rdev->asic->pm.get_temperature == NULL)
496 if (rdev->family == CHIP_ARUBA)
497 return err; 525 return err;
498 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 526 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
499 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 527 if (IS_ERR(rdev->pm.int_hwmon_dev)) {
@@ -526,7 +554,270 @@ static void radeon_hwmon_fini(struct radeon_device *rdev)
526 } 554 }
527} 555}
528 556
529void radeon_pm_suspend(struct radeon_device *rdev) 557static void radeon_dpm_thermal_work_handler(struct work_struct *work)
558{
559 struct radeon_device *rdev =
560 container_of(work, struct radeon_device,
561 pm.dpm.thermal.work);
562 /* switch to the thermal state */
563 enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
564
565 if (!rdev->pm.dpm_enabled)
566 return;
567
568 if (rdev->asic->pm.get_temperature) {
569 int temp = radeon_get_temperature(rdev);
570
571 if (temp < rdev->pm.dpm.thermal.min_temp)
572 /* switch back the user state */
573 dpm_state = rdev->pm.dpm.user_state;
574 } else {
575 if (rdev->pm.dpm.thermal.high_to_low)
576 /* switch back the user state */
577 dpm_state = rdev->pm.dpm.user_state;
578 }
579 radeon_dpm_enable_power_state(rdev, dpm_state);
580}
581
582static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
583 enum radeon_pm_state_type dpm_state)
584{
585 int i;
586 struct radeon_ps *ps;
587 u32 ui_class;
588
589restart_search:
590 /* balanced states don't exist at the moment */
591 if (dpm_state == POWER_STATE_TYPE_BALANCED)
592 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
593
594 /* Pick the best power state based on current conditions */
595 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
596 ps = &rdev->pm.dpm.ps[i];
597 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
598 switch (dpm_state) {
599 /* user states */
600 case POWER_STATE_TYPE_BATTERY:
601 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
602 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
603 if (rdev->pm.dpm.new_active_crtc_count < 2)
604 return ps;
605 } else
606 return ps;
607 }
608 break;
609 case POWER_STATE_TYPE_BALANCED:
610 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
611 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
612 if (rdev->pm.dpm.new_active_crtc_count < 2)
613 return ps;
614 } else
615 return ps;
616 }
617 break;
618 case POWER_STATE_TYPE_PERFORMANCE:
619 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
620 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
621 if (rdev->pm.dpm.new_active_crtc_count < 2)
622 return ps;
623 } else
624 return ps;
625 }
626 break;
627 /* internal states */
628 case POWER_STATE_TYPE_INTERNAL_UVD:
629 return rdev->pm.dpm.uvd_ps;
630 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
631 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
632 return ps;
633 break;
634 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
635 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
636 return ps;
637 break;
638 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
639 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
640 return ps;
641 break;
642 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
643 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
644 return ps;
645 break;
646 case POWER_STATE_TYPE_INTERNAL_BOOT:
647 return rdev->pm.dpm.boot_ps;
648 case POWER_STATE_TYPE_INTERNAL_THERMAL:
649 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
650 return ps;
651 break;
652 case POWER_STATE_TYPE_INTERNAL_ACPI:
653 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
654 return ps;
655 break;
656 case POWER_STATE_TYPE_INTERNAL_ULV:
657 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
658 return ps;
659 break;
660 default:
661 break;
662 }
663 }
664 /* use a fallback state if we didn't match */
665 switch (dpm_state) {
666 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
667 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
668 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
669 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
670 return rdev->pm.dpm.uvd_ps;
671 case POWER_STATE_TYPE_INTERNAL_THERMAL:
672 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
673 goto restart_search;
674 case POWER_STATE_TYPE_INTERNAL_ACPI:
675 dpm_state = POWER_STATE_TYPE_BATTERY;
676 goto restart_search;
677 case POWER_STATE_TYPE_BATTERY:
678 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
679 goto restart_search;
680 default:
681 break;
682 }
683
684 return NULL;
685}
686
687static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
688{
689 int i;
690 struct radeon_ps *ps;
691 enum radeon_pm_state_type dpm_state;
692 int ret;
693
694 /* if dpm init failed */
695 if (!rdev->pm.dpm_enabled)
696 return;
697
698 if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
699 /* add other state override checks here */
700 if ((!rdev->pm.dpm.thermal_active) &&
701 (!rdev->pm.dpm.uvd_active))
702 rdev->pm.dpm.state = rdev->pm.dpm.user_state;
703 }
704 dpm_state = rdev->pm.dpm.state;
705
706 ps = radeon_dpm_pick_power_state(rdev, dpm_state);
707 if (ps)
708 rdev->pm.dpm.requested_ps = ps;
709 else
710 return;
711
712 /* no need to reprogram if nothing changed unless we are on BTC+ */
713 if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
714 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
715 /* for pre-BTC and APUs if the num crtcs changed but state is the same,
716 * all we need to do is update the display configuration.
717 */
718 if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
719 /* update display watermarks based on new power state */
720 radeon_bandwidth_update(rdev);
721 /* update displays */
722 radeon_dpm_display_configuration_changed(rdev);
723 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
724 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
725 }
726 return;
727 } else {
728 /* for BTC+ if the num crtcs hasn't changed and state is the same,
729 * nothing to do, if the num crtcs is > 1 and state is the same,
730 * update display configuration.
731 */
732 if (rdev->pm.dpm.new_active_crtcs ==
733 rdev->pm.dpm.current_active_crtcs) {
734 return;
735 } else {
736 if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
737 (rdev->pm.dpm.new_active_crtc_count > 1)) {
738 /* update display watermarks based on new power state */
739 radeon_bandwidth_update(rdev);
740 /* update displays */
741 radeon_dpm_display_configuration_changed(rdev);
742 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
743 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
744 return;
745 }
746 }
747 }
748 }
749
750 printk("switching from power state:\n");
751 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
752 printk("switching to power state:\n");
753 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
754
755 mutex_lock(&rdev->ddev->struct_mutex);
756 down_write(&rdev->pm.mclk_lock);
757 mutex_lock(&rdev->ring_lock);
758
759 ret = radeon_dpm_pre_set_power_state(rdev);
760 if (ret)
761 goto done;
762
763 /* update display watermarks based on new power state */
764 radeon_bandwidth_update(rdev);
765 /* update displays */
766 radeon_dpm_display_configuration_changed(rdev);
767
768 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
769 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
770
771 /* wait for the rings to drain */
772 for (i = 0; i < RADEON_NUM_RINGS; i++) {
773 struct radeon_ring *ring = &rdev->ring[i];
774 if (ring->ready)
775 radeon_fence_wait_empty_locked(rdev, i);
776 }
777
778 /* program the new power state */
779 radeon_dpm_set_power_state(rdev);
780
781 /* update current power state */
782 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
783
784 radeon_dpm_post_set_power_state(rdev);
785
786done:
787 mutex_unlock(&rdev->ring_lock);
788 up_write(&rdev->pm.mclk_lock);
789 mutex_unlock(&rdev->ddev->struct_mutex);
790}
791
792void radeon_dpm_enable_power_state(struct radeon_device *rdev,
793 enum radeon_pm_state_type dpm_state)
794{
795 if (!rdev->pm.dpm_enabled)
796 return;
797
798 mutex_lock(&rdev->pm.mutex);
799 switch (dpm_state) {
800 case POWER_STATE_TYPE_INTERNAL_THERMAL:
801 rdev->pm.dpm.thermal_active = true;
802 break;
803 case POWER_STATE_TYPE_INTERNAL_UVD:
804 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
805 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
806 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
807 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
808 rdev->pm.dpm.uvd_active = true;
809 break;
810 default:
811 rdev->pm.dpm.thermal_active = false;
812 rdev->pm.dpm.uvd_active = false;
813 break;
814 }
815 rdev->pm.dpm.state = dpm_state;
816 mutex_unlock(&rdev->pm.mutex);
817 radeon_pm_compute_clocks(rdev);
818}
819
820static void radeon_pm_suspend_old(struct radeon_device *rdev)
530{ 821{
531 mutex_lock(&rdev->pm.mutex); 822 mutex_lock(&rdev->pm.mutex);
532 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 823 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
@@ -538,7 +829,26 @@ void radeon_pm_suspend(struct radeon_device *rdev)
538 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 829 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
539} 830}
540 831
541void radeon_pm_resume(struct radeon_device *rdev) 832static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
833{
834 mutex_lock(&rdev->pm.mutex);
835 /* disable dpm */
836 radeon_dpm_disable(rdev);
837 /* reset the power state */
838 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
839 rdev->pm.dpm_enabled = false;
840 mutex_unlock(&rdev->pm.mutex);
841}
842
843void radeon_pm_suspend(struct radeon_device *rdev)
844{
845 if (rdev->pm.pm_method == PM_METHOD_DPM)
846 radeon_pm_suspend_dpm(rdev);
847 else
848 radeon_pm_suspend_old(rdev);
849}
850
851static void radeon_pm_resume_old(struct radeon_device *rdev)
542{ 852{
543 /* set up the default clocks if the MC ucode is loaded */ 853 /* set up the default clocks if the MC ucode is loaded */
544 if ((rdev->family >= CHIP_BARTS) && 854 if ((rdev->family >= CHIP_BARTS) &&
@@ -573,12 +883,50 @@ void radeon_pm_resume(struct radeon_device *rdev)
573 radeon_pm_compute_clocks(rdev); 883 radeon_pm_compute_clocks(rdev);
574} 884}
575 885
576int radeon_pm_init(struct radeon_device *rdev) 886static void radeon_pm_resume_dpm(struct radeon_device *rdev)
887{
888 int ret;
889
890 /* asic init will reset to the boot state */
891 mutex_lock(&rdev->pm.mutex);
892 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
893 radeon_dpm_setup_asic(rdev);
894 ret = radeon_dpm_enable(rdev);
895 mutex_unlock(&rdev->pm.mutex);
896 if (ret) {
897 DRM_ERROR("radeon: dpm resume failed\n");
898 if ((rdev->family >= CHIP_BARTS) &&
899 (rdev->family <= CHIP_CAYMAN) &&
900 rdev->mc_fw) {
901 if (rdev->pm.default_vddc)
902 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
903 SET_VOLTAGE_TYPE_ASIC_VDDC);
904 if (rdev->pm.default_vddci)
905 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
906 SET_VOLTAGE_TYPE_ASIC_VDDCI);
907 if (rdev->pm.default_sclk)
908 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
909 if (rdev->pm.default_mclk)
910 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
911 }
912 } else {
913 rdev->pm.dpm_enabled = true;
914 radeon_pm_compute_clocks(rdev);
915 }
916}
917
918void radeon_pm_resume(struct radeon_device *rdev)
919{
920 if (rdev->pm.pm_method == PM_METHOD_DPM)
921 radeon_pm_resume_dpm(rdev);
922 else
923 radeon_pm_resume_old(rdev);
924}
925
926static int radeon_pm_init_old(struct radeon_device *rdev)
577{ 927{
578 int ret; 928 int ret;
579 929
580 /* default to profile method */
581 rdev->pm.pm_method = PM_METHOD_PROFILE;
582 rdev->pm.profile = PM_PROFILE_DEFAULT; 930 rdev->pm.profile = PM_PROFILE_DEFAULT;
583 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 931 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
584 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 932 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
@@ -640,7 +988,142 @@ int radeon_pm_init(struct radeon_device *rdev)
640 return 0; 988 return 0;
641} 989}
642 990
643void radeon_pm_fini(struct radeon_device *rdev) 991static void radeon_dpm_print_power_states(struct radeon_device *rdev)
992{
993 int i;
994
995 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
996 printk("== power state %d ==\n", i);
997 radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
998 }
999}
1000
1001static int radeon_pm_init_dpm(struct radeon_device *rdev)
1002{
1003 int ret;
1004
1005 /* default to performance state */
1006 rdev->pm.dpm.state = POWER_STATE_TYPE_PERFORMANCE;
1007 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
1008 rdev->pm.default_sclk = rdev->clock.default_sclk;
1009 rdev->pm.default_mclk = rdev->clock.default_mclk;
1010 rdev->pm.current_sclk = rdev->clock.default_sclk;
1011 rdev->pm.current_mclk = rdev->clock.default_mclk;
1012 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1013
1014 if (rdev->bios && rdev->is_atom_bios)
1015 radeon_atombios_get_power_modes(rdev);
1016 else
1017 return -EINVAL;
1018
1019 /* set up the internal thermal sensor if applicable */
1020 ret = radeon_hwmon_init(rdev);
1021 if (ret)
1022 return ret;
1023
1024 INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1025 mutex_lock(&rdev->pm.mutex);
1026 radeon_dpm_init(rdev);
1027 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1028 radeon_dpm_print_power_states(rdev);
1029 radeon_dpm_setup_asic(rdev);
1030 ret = radeon_dpm_enable(rdev);
1031 mutex_unlock(&rdev->pm.mutex);
1032 if (ret) {
1033 rdev->pm.dpm_enabled = false;
1034 if ((rdev->family >= CHIP_BARTS) &&
1035 (rdev->family <= CHIP_CAYMAN) &&
1036 rdev->mc_fw) {
1037 if (rdev->pm.default_vddc)
1038 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1039 SET_VOLTAGE_TYPE_ASIC_VDDC);
1040 if (rdev->pm.default_vddci)
1041 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1042 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1043 if (rdev->pm.default_sclk)
1044 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1045 if (rdev->pm.default_mclk)
1046 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1047 }
1048 DRM_ERROR("radeon: dpm initialization failed\n");
1049 return ret;
1050 }
1051 rdev->pm.dpm_enabled = true;
1052 radeon_pm_compute_clocks(rdev);
1053
1054 if (rdev->pm.num_power_states > 1) {
1055 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1056 if (ret)
1057 DRM_ERROR("failed to create device file for dpm state\n");
1058 /* XXX: these are noops for dpm but are here for backwards compat */
1059 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1060 if (ret)
1061 DRM_ERROR("failed to create device file for power profile\n");
1062 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1063 if (ret)
1064 DRM_ERROR("failed to create device file for power method\n");
1065
1066 if (radeon_debugfs_pm_init(rdev)) {
1067 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1068 }
1069
1070 DRM_INFO("radeon: dpm initialized\n");
1071 }
1072
1073 return 0;
1074}
1075
1076int radeon_pm_init(struct radeon_device *rdev)
1077{
1078 /* enable dpm on rv6xx+ */
1079 switch (rdev->family) {
1080 case CHIP_RV610:
1081 case CHIP_RV630:
1082 case CHIP_RV620:
1083 case CHIP_RV635:
1084 case CHIP_RV670:
1085 case CHIP_RS780:
1086 case CHIP_RS880:
1087 case CHIP_RV770:
1088 case CHIP_RV730:
1089 case CHIP_RV710:
1090 case CHIP_RV740:
1091 case CHIP_CEDAR:
1092 case CHIP_REDWOOD:
1093 case CHIP_JUNIPER:
1094 case CHIP_CYPRESS:
1095 case CHIP_HEMLOCK:
1096 case CHIP_PALM:
1097 case CHIP_SUMO:
1098 case CHIP_SUMO2:
1099 case CHIP_BARTS:
1100 case CHIP_TURKS:
1101 case CHIP_CAICOS:
1102 case CHIP_CAYMAN:
1103 case CHIP_ARUBA:
1104 case CHIP_TAHITI:
1105 case CHIP_PITCAIRN:
1106 case CHIP_VERDE:
1107 case CHIP_OLAND:
1108 case CHIP_HAINAN:
1109 if (radeon_dpm == 1)
1110 rdev->pm.pm_method = PM_METHOD_DPM;
1111 else
1112 rdev->pm.pm_method = PM_METHOD_PROFILE;
1113 break;
1114 default:
1115 /* default to profile method */
1116 rdev->pm.pm_method = PM_METHOD_PROFILE;
1117 break;
1118 }
1119
1120 if (rdev->pm.pm_method == PM_METHOD_DPM)
1121 return radeon_pm_init_dpm(rdev);
1122 else
1123 return radeon_pm_init_old(rdev);
1124}
1125
1126static void radeon_pm_fini_old(struct radeon_device *rdev)
644{ 1127{
645 if (rdev->pm.num_power_states > 1) { 1128 if (rdev->pm.num_power_states > 1) {
646 mutex_lock(&rdev->pm.mutex); 1129 mutex_lock(&rdev->pm.mutex);
@@ -668,7 +1151,35 @@ void radeon_pm_fini(struct radeon_device *rdev)
668 radeon_hwmon_fini(rdev); 1151 radeon_hwmon_fini(rdev);
669} 1152}
670 1153
671void radeon_pm_compute_clocks(struct radeon_device *rdev) 1154static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1155{
1156 if (rdev->pm.num_power_states > 1) {
1157 mutex_lock(&rdev->pm.mutex);
1158 radeon_dpm_disable(rdev);
1159 mutex_unlock(&rdev->pm.mutex);
1160
1161 device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
1162 /* XXX backwards compat */
1163 device_remove_file(rdev->dev, &dev_attr_power_profile);
1164 device_remove_file(rdev->dev, &dev_attr_power_method);
1165 }
1166 radeon_dpm_fini(rdev);
1167
1168 if (rdev->pm.power_state)
1169 kfree(rdev->pm.power_state);
1170
1171 radeon_hwmon_fini(rdev);
1172}
1173
1174void radeon_pm_fini(struct radeon_device *rdev)
1175{
1176 if (rdev->pm.pm_method == PM_METHOD_DPM)
1177 radeon_pm_fini_dpm(rdev);
1178 else
1179 radeon_pm_fini_old(rdev);
1180}
1181
1182static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
672{ 1183{
673 struct drm_device *ddev = rdev->ddev; 1184 struct drm_device *ddev = rdev->ddev;
674 struct drm_crtc *crtc; 1185 struct drm_crtc *crtc;
@@ -677,6 +1188,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
677 if (rdev->pm.num_power_states < 2) 1188 if (rdev->pm.num_power_states < 2)
678 return; 1189 return;
679 1190
1191 INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
680 mutex_lock(&rdev->pm.mutex); 1192 mutex_lock(&rdev->pm.mutex);
681 1193
682 rdev->pm.active_crtcs = 0; 1194 rdev->pm.active_crtcs = 0;
@@ -739,6 +1251,46 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
739 mutex_unlock(&rdev->pm.mutex); 1251 mutex_unlock(&rdev->pm.mutex);
740} 1252}
741 1253
1254static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1255{
1256 struct drm_device *ddev = rdev->ddev;
1257 struct drm_crtc *crtc;
1258 struct radeon_crtc *radeon_crtc;
1259
1260 mutex_lock(&rdev->pm.mutex);
1261
1262 /* update active crtc counts */
1263 rdev->pm.dpm.new_active_crtcs = 0;
1264 rdev->pm.dpm.new_active_crtc_count = 0;
1265 list_for_each_entry(crtc,
1266 &ddev->mode_config.crtc_list, head) {
1267 radeon_crtc = to_radeon_crtc(crtc);
1268 if (crtc->enabled) {
1269 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1270 rdev->pm.dpm.new_active_crtc_count++;
1271 }
1272 }
1273
1274 /* update battery/ac status */
1275 if (power_supply_is_system_supplied() > 0)
1276 rdev->pm.dpm.ac_power = true;
1277 else
1278 rdev->pm.dpm.ac_power = false;
1279
1280 radeon_dpm_change_power_state_locked(rdev);
1281
1282 mutex_unlock(&rdev->pm.mutex);
1283
1284}
1285
1286void radeon_pm_compute_clocks(struct radeon_device *rdev)
1287{
1288 if (rdev->pm.pm_method == PM_METHOD_DPM)
1289 radeon_pm_compute_clocks_dpm(rdev);
1290 else
1291 radeon_pm_compute_clocks_old(rdev);
1292}
1293
742static bool radeon_pm_in_vbl(struct radeon_device *rdev) 1294static bool radeon_pm_in_vbl(struct radeon_device *rdev)
743{ 1295{
744 int crtc, vpos, hpos, vbl_status; 1296 int crtc, vpos, hpos, vbl_status;
@@ -842,19 +1394,28 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
842 struct drm_device *dev = node->minor->dev; 1394 struct drm_device *dev = node->minor->dev;
843 struct radeon_device *rdev = dev->dev_private; 1395 struct radeon_device *rdev = dev->dev_private;
844 1396
845 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); 1397 if (rdev->pm.dpm_enabled) {
846 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */ 1398 mutex_lock(&rdev->pm.mutex);
847 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP)) 1399 if (rdev->asic->dpm.debugfs_print_current_performance_level)
848 seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk); 1400 radeon_dpm_debugfs_print_current_performance_level(rdev, m);
849 else 1401 else
850 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 1402 seq_printf(m, "Debugfs support not implemented for this asic\n");
851 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); 1403 mutex_unlock(&rdev->pm.mutex);
852 if (rdev->asic->pm.get_memory_clock) 1404 } else {
853 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 1405 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
854 if (rdev->pm.current_vddc) 1406 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
855 seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); 1407 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
856 if (rdev->asic->pm.get_pcie_lanes) 1408 seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
857 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); 1409 else
1410 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1411 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1412 if (rdev->asic->pm.get_memory_clock)
1413 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1414 if (rdev->pm.current_vddc)
1415 seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1416 if (rdev->asic->pm.get_pcie_lanes)
1417 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1418 }
858 1419
859 return 0; 1420 return 0;
860} 1421}
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 4940af7e75e6..65b9eabd5a2f 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -88,11 +88,19 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
88 88
89 /* pin buffer into GTT */ 89 /* pin buffer into GTT */
90 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); 90 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
91 if (ret) {
92 radeon_bo_unreserve(bo);
93 return ret;
94 }
95 radeon_bo_unreserve(bo); 91 radeon_bo_unreserve(bo);
92 return ret;
93}
94
95void radeon_gem_prime_unpin(struct drm_gem_object *obj)
96{
97 struct radeon_bo *bo = gem_to_radeon_bo(obj);
98 int ret = 0;
96 99
97 return 0; 100 ret = radeon_bo_reserve(bo, false);
101 if (unlikely(ret != 0))
102 return;
103
104 radeon_bo_unpin(bo);
105 radeon_bo_unreserve(bo);
98} 106}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 7e2c2b7cf188..62d54976d24e 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -57,6 +57,7 @@
57#include "evergreen_reg.h" 57#include "evergreen_reg.h"
58#include "ni_reg.h" 58#include "ni_reg.h"
59#include "si_reg.h" 59#include "si_reg.h"
60#include "cik_reg.h"
60 61
61#define RADEON_MC_AGP_LOCATION 0x014c 62#define RADEON_MC_AGP_LOCATION 0x014c
62#define RADEON_MC_AGP_START_MASK 0x0000FFFF 63#define RADEON_MC_AGP_START_MASK 0x0000FFFF
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 82434018cbe8..5f1c51a776ed 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -357,6 +357,38 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
357 } 357 }
358} 358}
359 359
360u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
361 struct radeon_ring *ring)
362{
363 u32 rptr;
364
365 if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
366 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
367 else
368 rptr = RREG32(ring->rptr_reg);
369 rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
370
371 return rptr;
372}
373
374u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
375 struct radeon_ring *ring)
376{
377 u32 wptr;
378
379 wptr = RREG32(ring->wptr_reg);
380 wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
381
382 return wptr;
383}
384
385void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
386 struct radeon_ring *ring)
387{
388 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
389 (void)RREG32(ring->wptr_reg);
390}
391
360/** 392/**
361 * radeon_ring_free_size - update the free size 393 * radeon_ring_free_size - update the free size
362 * 394 *
@@ -367,13 +399,7 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
367 */ 399 */
368void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) 400void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
369{ 401{
370 u32 rptr; 402 ring->rptr = radeon_ring_get_rptr(rdev, ring);
371
372 if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
373 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
374 else
375 rptr = RREG32(ring->rptr_reg);
376 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
377 /* This works because ring_size is a power of 2 */ 403 /* This works because ring_size is a power of 2 */
378 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4)); 404 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
379 ring->ring_free_dw -= ring->wptr; 405 ring->ring_free_dw -= ring->wptr;
@@ -465,8 +491,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
465 radeon_ring_write(ring, ring->nop); 491 radeon_ring_write(ring, ring->nop);
466 } 492 }
467 DRM_MEMORYBARRIER(); 493 DRM_MEMORYBARRIER();
468 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask); 494 radeon_ring_set_wptr(rdev, ring);
469 (void)RREG32(ring->wptr_reg);
470} 495}
471 496
472/** 497/**
@@ -568,7 +593,6 @@ void radeon_ring_lockup_update(struct radeon_ring *ring)
568bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 593bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
569{ 594{
570 unsigned long cjiffies, elapsed; 595 unsigned long cjiffies, elapsed;
571 uint32_t rptr;
572 596
573 cjiffies = jiffies; 597 cjiffies = jiffies;
574 if (!time_after(cjiffies, ring->last_activity)) { 598 if (!time_after(cjiffies, ring->last_activity)) {
@@ -576,8 +600,7 @@ bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *rin
576 radeon_ring_lockup_update(ring); 600 radeon_ring_lockup_update(ring);
577 return false; 601 return false;
578 } 602 }
579 rptr = RREG32(ring->rptr_reg); 603 ring->rptr = radeon_ring_get_rptr(rdev, ring);
580 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
581 if (ring->rptr != ring->last_rptr) { 604 if (ring->rptr != ring->last_rptr) {
582 /* CP is still working no lockup */ 605 /* CP is still working no lockup */
583 radeon_ring_lockup_update(ring); 606 radeon_ring_lockup_update(ring);
@@ -804,9 +827,9 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
804 827
805 radeon_ring_free_size(rdev, ring); 828 radeon_ring_free_size(rdev, ring);
806 count = (ring->ring_size / 4) - ring->ring_free_dw; 829 count = (ring->ring_size / 4) - ring->ring_free_dw;
807 tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift; 830 tmp = radeon_ring_get_wptr(rdev, ring);
808 seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp); 831 seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
809 tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift; 832 tmp = radeon_ring_get_rptr(rdev, ring);
810 seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp); 833 seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
811 if (ring->rptr_save_reg) { 834 if (ring->rptr_save_reg) {
812 seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg, 835 seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index bbed4af8d0bc..f4d6bcee9006 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -35,7 +35,6 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
35{ 35{
36 struct radeon_bo *vram_obj = NULL; 36 struct radeon_bo *vram_obj = NULL;
37 struct radeon_bo **gtt_obj = NULL; 37 struct radeon_bo **gtt_obj = NULL;
38 struct radeon_fence *fence = NULL;
39 uint64_t gtt_addr, vram_addr; 38 uint64_t gtt_addr, vram_addr;
40 unsigned i, n, size; 39 unsigned i, n, size;
41 int r, ring; 40 int r, ring;
@@ -81,37 +80,38 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
81 } 80 }
82 r = radeon_bo_reserve(vram_obj, false); 81 r = radeon_bo_reserve(vram_obj, false);
83 if (unlikely(r != 0)) 82 if (unlikely(r != 0))
84 goto out_cleanup; 83 goto out_unref;
85 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); 84 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
86 if (r) { 85 if (r) {
87 DRM_ERROR("Failed to pin VRAM object\n"); 86 DRM_ERROR("Failed to pin VRAM object\n");
88 goto out_cleanup; 87 goto out_unres;
89 } 88 }
90 for (i = 0; i < n; i++) { 89 for (i = 0; i < n; i++) {
91 void *gtt_map, *vram_map; 90 void *gtt_map, *vram_map;
92 void **gtt_start, **gtt_end; 91 void **gtt_start, **gtt_end;
93 void **vram_start, **vram_end; 92 void **vram_start, **vram_end;
93 struct radeon_fence *fence = NULL;
94 94
95 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 95 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
96 RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i); 96 RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
97 if (r) { 97 if (r) {
98 DRM_ERROR("Failed to create GTT object %d\n", i); 98 DRM_ERROR("Failed to create GTT object %d\n", i);
99 goto out_cleanup; 99 goto out_lclean;
100 } 100 }
101 101
102 r = radeon_bo_reserve(gtt_obj[i], false); 102 r = radeon_bo_reserve(gtt_obj[i], false);
103 if (unlikely(r != 0)) 103 if (unlikely(r != 0))
104 goto out_cleanup; 104 goto out_lclean_unref;
105 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr); 105 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
106 if (r) { 106 if (r) {
107 DRM_ERROR("Failed to pin GTT object %d\n", i); 107 DRM_ERROR("Failed to pin GTT object %d\n", i);
108 goto out_cleanup; 108 goto out_lclean_unres;
109 } 109 }
110 110
111 r = radeon_bo_kmap(gtt_obj[i], &gtt_map); 111 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
112 if (r) { 112 if (r) {
113 DRM_ERROR("Failed to map GTT object %d\n", i); 113 DRM_ERROR("Failed to map GTT object %d\n", i);
114 goto out_cleanup; 114 goto out_lclean_unpin;
115 } 115 }
116 116
117 for (gtt_start = gtt_map, gtt_end = gtt_map + size; 117 for (gtt_start = gtt_map, gtt_end = gtt_map + size;
@@ -127,13 +127,13 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
127 r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 127 r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
128 if (r) { 128 if (r) {
129 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 129 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
130 goto out_cleanup; 130 goto out_lclean_unpin;
131 } 131 }
132 132
133 r = radeon_fence_wait(fence, false); 133 r = radeon_fence_wait(fence, false);
134 if (r) { 134 if (r) {
135 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); 135 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
136 goto out_cleanup; 136 goto out_lclean_unpin;
137 } 137 }
138 138
139 radeon_fence_unref(&fence); 139 radeon_fence_unref(&fence);
@@ -141,7 +141,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
141 r = radeon_bo_kmap(vram_obj, &vram_map); 141 r = radeon_bo_kmap(vram_obj, &vram_map);
142 if (r) { 142 if (r) {
143 DRM_ERROR("Failed to map VRAM object after copy %d\n", i); 143 DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
144 goto out_cleanup; 144 goto out_lclean_unpin;
145 } 145 }
146 146
147 for (gtt_start = gtt_map, gtt_end = gtt_map + size, 147 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
@@ -160,7 +160,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
160 (vram_addr - rdev->mc.vram_start + 160 (vram_addr - rdev->mc.vram_start +
161 (void*)gtt_start - gtt_map)); 161 (void*)gtt_start - gtt_map));
162 radeon_bo_kunmap(vram_obj); 162 radeon_bo_kunmap(vram_obj);
163 goto out_cleanup; 163 goto out_lclean_unpin;
164 } 164 }
165 *vram_start = vram_start; 165 *vram_start = vram_start;
166 } 166 }
@@ -173,13 +173,13 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
173 r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 173 r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
174 if (r) { 174 if (r) {
175 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 175 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
176 goto out_cleanup; 176 goto out_lclean_unpin;
177 } 177 }
178 178
179 r = radeon_fence_wait(fence, false); 179 r = radeon_fence_wait(fence, false);
180 if (r) { 180 if (r) {
181 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); 181 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
182 goto out_cleanup; 182 goto out_lclean_unpin;
183 } 183 }
184 184
185 radeon_fence_unref(&fence); 185 radeon_fence_unref(&fence);
@@ -187,7 +187,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
187 r = radeon_bo_kmap(gtt_obj[i], &gtt_map); 187 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
188 if (r) { 188 if (r) {
189 DRM_ERROR("Failed to map GTT object after copy %d\n", i); 189 DRM_ERROR("Failed to map GTT object after copy %d\n", i);
190 goto out_cleanup; 190 goto out_lclean_unpin;
191 } 191 }
192 192
193 for (gtt_start = gtt_map, gtt_end = gtt_map + size, 193 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
@@ -206,7 +206,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
206 (gtt_addr - rdev->mc.gtt_start + 206 (gtt_addr - rdev->mc.gtt_start +
207 (void*)vram_start - vram_map)); 207 (void*)vram_start - vram_map));
208 radeon_bo_kunmap(gtt_obj[i]); 208 radeon_bo_kunmap(gtt_obj[i]);
209 goto out_cleanup; 209 goto out_lclean_unpin;
210 } 210 }
211 } 211 }
212 212
@@ -214,31 +214,32 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
214 214
215 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", 215 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
216 gtt_addr - rdev->mc.gtt_start); 216 gtt_addr - rdev->mc.gtt_start);
217 continue;
218
219out_lclean_unpin:
220 radeon_bo_unpin(gtt_obj[i]);
221out_lclean_unres:
222 radeon_bo_unreserve(gtt_obj[i]);
223out_lclean_unref:
224 radeon_bo_unref(&gtt_obj[i]);
225out_lclean:
226 for (--i; i >= 0; --i) {
227 radeon_bo_unpin(gtt_obj[i]);
228 radeon_bo_unreserve(gtt_obj[i]);
229 radeon_bo_unref(&gtt_obj[i]);
230 }
231 if (fence)
232 radeon_fence_unref(&fence);
233 break;
217 } 234 }
218 235
236 radeon_bo_unpin(vram_obj);
237out_unres:
238 radeon_bo_unreserve(vram_obj);
239out_unref:
240 radeon_bo_unref(&vram_obj);
219out_cleanup: 241out_cleanup:
220 if (vram_obj) { 242 kfree(gtt_obj);
221 if (radeon_bo_is_reserved(vram_obj)) {
222 radeon_bo_unpin(vram_obj);
223 radeon_bo_unreserve(vram_obj);
224 }
225 radeon_bo_unref(&vram_obj);
226 }
227 if (gtt_obj) {
228 for (i = 0; i < n; i++) {
229 if (gtt_obj[i]) {
230 if (radeon_bo_is_reserved(gtt_obj[i])) {
231 radeon_bo_unpin(gtt_obj[i]);
232 radeon_bo_unreserve(gtt_obj[i]);
233 }
234 radeon_bo_unref(&gtt_obj[i]);
235 }
236 }
237 kfree(gtt_obj);
238 }
239 if (fence) {
240 radeon_fence_unref(&fence);
241 }
242 if (r) { 243 if (r) {
243 printk(KERN_WARNING "Error while testing BO move.\n"); 244 printk(KERN_WARNING "Error while testing BO move.\n");
244 } 245 }
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
new file mode 100644
index 000000000000..d8b05f7bcf1a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -0,0 +1,129 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __RADEON_UCODE_H__
24#define __RADEON_UCODE_H__
25
26/* CP */
27#define R600_PFP_UCODE_SIZE 576
28#define R600_PM4_UCODE_SIZE 1792
29#define R700_PFP_UCODE_SIZE 848
30#define R700_PM4_UCODE_SIZE 1360
31#define EVERGREEN_PFP_UCODE_SIZE 1120
32#define EVERGREEN_PM4_UCODE_SIZE 1376
33#define CAYMAN_PFP_UCODE_SIZE 2176
34#define CAYMAN_PM4_UCODE_SIZE 2176
35#define SI_PFP_UCODE_SIZE 2144
36#define SI_PM4_UCODE_SIZE 2144
37#define SI_CE_UCODE_SIZE 2144
38
39/* RLC */
40#define R600_RLC_UCODE_SIZE 768
41#define R700_RLC_UCODE_SIZE 1024
42#define EVERGREEN_RLC_UCODE_SIZE 768
43#define CAYMAN_RLC_UCODE_SIZE 1024
44#define ARUBA_RLC_UCODE_SIZE 1536
45#define SI_RLC_UCODE_SIZE 2048
46
47/* MC */
48#define BTC_MC_UCODE_SIZE 6024
49#define CAYMAN_MC_UCODE_SIZE 6037
50#define SI_MC_UCODE_SIZE 7769
51#define OLAND_MC_UCODE_SIZE 7863
52
53/* SMC */
54#define RV770_SMC_UCODE_START 0x0100
55#define RV770_SMC_UCODE_SIZE 0x410d
56#define RV770_SMC_INT_VECTOR_START 0xffc0
57#define RV770_SMC_INT_VECTOR_SIZE 0x0040
58
59#define RV730_SMC_UCODE_START 0x0100
60#define RV730_SMC_UCODE_SIZE 0x412c
61#define RV730_SMC_INT_VECTOR_START 0xffc0
62#define RV730_SMC_INT_VECTOR_SIZE 0x0040
63
64#define RV710_SMC_UCODE_START 0x0100
65#define RV710_SMC_UCODE_SIZE 0x3f1f
66#define RV710_SMC_INT_VECTOR_START 0xffc0
67#define RV710_SMC_INT_VECTOR_SIZE 0x0040
68
69#define RV740_SMC_UCODE_START 0x0100
70#define RV740_SMC_UCODE_SIZE 0x41c5
71#define RV740_SMC_INT_VECTOR_START 0xffc0
72#define RV740_SMC_INT_VECTOR_SIZE 0x0040
73
74#define CEDAR_SMC_UCODE_START 0x0100
75#define CEDAR_SMC_UCODE_SIZE 0x5d50
76#define CEDAR_SMC_INT_VECTOR_START 0xffc0
77#define CEDAR_SMC_INT_VECTOR_SIZE 0x0040
78
79#define REDWOOD_SMC_UCODE_START 0x0100
80#define REDWOOD_SMC_UCODE_SIZE 0x5f0a
81#define REDWOOD_SMC_INT_VECTOR_START 0xffc0
82#define REDWOOD_SMC_INT_VECTOR_SIZE 0x0040
83
84#define JUNIPER_SMC_UCODE_START 0x0100
85#define JUNIPER_SMC_UCODE_SIZE 0x5f1f
86#define JUNIPER_SMC_INT_VECTOR_START 0xffc0
87#define JUNIPER_SMC_INT_VECTOR_SIZE 0x0040
88
89#define CYPRESS_SMC_UCODE_START 0x0100
90#define CYPRESS_SMC_UCODE_SIZE 0x61f7
91#define CYPRESS_SMC_INT_VECTOR_START 0xffc0
92#define CYPRESS_SMC_INT_VECTOR_SIZE 0x0040
93
94#define BARTS_SMC_UCODE_START 0x0100
95#define BARTS_SMC_UCODE_SIZE 0x6107
96#define BARTS_SMC_INT_VECTOR_START 0xffc0
97#define BARTS_SMC_INT_VECTOR_SIZE 0x0040
98
99#define TURKS_SMC_UCODE_START 0x0100
100#define TURKS_SMC_UCODE_SIZE 0x605b
101#define TURKS_SMC_INT_VECTOR_START 0xffc0
102#define TURKS_SMC_INT_VECTOR_SIZE 0x0040
103
104#define CAICOS_SMC_UCODE_START 0x0100
105#define CAICOS_SMC_UCODE_SIZE 0x5fbd
106#define CAICOS_SMC_INT_VECTOR_START 0xffc0
107#define CAICOS_SMC_INT_VECTOR_SIZE 0x0040
108
109#define CAYMAN_SMC_UCODE_START 0x0100
110#define CAYMAN_SMC_UCODE_SIZE 0x79ec
111#define CAYMAN_SMC_INT_VECTOR_START 0xffc0
112#define CAYMAN_SMC_INT_VECTOR_SIZE 0x0040
113
114#define TAHITI_SMC_UCODE_START 0x10000
115#define TAHITI_SMC_UCODE_SIZE 0xf458
116
117#define PITCAIRN_SMC_UCODE_START 0x10000
118#define PITCAIRN_SMC_UCODE_SIZE 0xe9f4
119
120#define VERDE_SMC_UCODE_START 0x10000
121#define VERDE_SMC_UCODE_SIZE 0xebe4
122
123#define OLAND_SMC_UCODE_START 0x10000
124#define OLAND_SMC_UCODE_SIZE 0xe7b4
125
126#define HAINAN_SMC_UCODE_START 0x10000
127#define HAINAN_SMC_UCODE_SIZE 0xe67C
128
129#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index cad735dd02c6..41efcec28cd8 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -44,11 +44,13 @@
44#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin" 44#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
45#define FIRMWARE_SUMO "radeon/SUMO_uvd.bin" 45#define FIRMWARE_SUMO "radeon/SUMO_uvd.bin"
46#define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin" 46#define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin"
47#define FIRMWARE_BONAIRE "radeon/BONAIRE_uvd.bin"
47 48
48MODULE_FIRMWARE(FIRMWARE_RV710); 49MODULE_FIRMWARE(FIRMWARE_RV710);
49MODULE_FIRMWARE(FIRMWARE_CYPRESS); 50MODULE_FIRMWARE(FIRMWARE_CYPRESS);
50MODULE_FIRMWARE(FIRMWARE_SUMO); 51MODULE_FIRMWARE(FIRMWARE_SUMO);
51MODULE_FIRMWARE(FIRMWARE_TAHITI); 52MODULE_FIRMWARE(FIRMWARE_TAHITI);
53MODULE_FIRMWARE(FIRMWARE_BONAIRE);
52 54
53static void radeon_uvd_idle_work_handler(struct work_struct *work); 55static void radeon_uvd_idle_work_handler(struct work_struct *work);
54 56
@@ -100,6 +102,12 @@ int radeon_uvd_init(struct radeon_device *rdev)
100 fw_name = FIRMWARE_TAHITI; 102 fw_name = FIRMWARE_TAHITI;
101 break; 103 break;
102 104
105 case CHIP_BONAIRE:
106 case CHIP_KABINI:
107 case CHIP_KAVERI:
108 fw_name = FIRMWARE_BONAIRE;
109 break;
110
103 default: 111 default:
104 return -EINVAL; 112 return -EINVAL;
105 } 113 }
@@ -542,6 +550,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
542 struct radeon_fence **fence) 550 struct radeon_fence **fence)
543{ 551{
544 struct ttm_validate_buffer tv; 552 struct ttm_validate_buffer tv;
553 struct ww_acquire_ctx ticket;
545 struct list_head head; 554 struct list_head head;
546 struct radeon_ib ib; 555 struct radeon_ib ib;
547 uint64_t addr; 556 uint64_t addr;
@@ -553,7 +562,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
553 INIT_LIST_HEAD(&head); 562 INIT_LIST_HEAD(&head);
554 list_add(&tv.head, &head); 563 list_add(&tv.head, &head);
555 564
556 r = ttm_eu_reserve_buffers(&head); 565 r = ttm_eu_reserve_buffers(&ticket, &head);
557 if (r) 566 if (r)
558 return r; 567 return r;
559 568
@@ -561,16 +570,12 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
561 radeon_uvd_force_into_uvd_segment(bo); 570 radeon_uvd_force_into_uvd_segment(bo);
562 571
563 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 572 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
564 if (r) { 573 if (r)
565 ttm_eu_backoff_reservation(&head); 574 goto err;
566 return r;
567 }
568 575
569 r = radeon_ib_get(rdev, ring, &ib, NULL, 16); 576 r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
570 if (r) { 577 if (r)
571 ttm_eu_backoff_reservation(&head); 578 goto err;
572 return r;
573 }
574 579
575 addr = radeon_bo_gpu_offset(bo); 580 addr = radeon_bo_gpu_offset(bo);
576 ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0); 581 ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
@@ -584,11 +589,9 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
584 ib.length_dw = 16; 589 ib.length_dw = 16;
585 590
586 r = radeon_ib_schedule(rdev, &ib, NULL); 591 r = radeon_ib_schedule(rdev, &ib, NULL);
587 if (r) { 592 if (r)
588 ttm_eu_backoff_reservation(&head); 593 goto err;
589 return r; 594 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
590 }
591 ttm_eu_fence_buffer_objects(&head, ib.fence);
592 595
593 if (fence) 596 if (fence)
594 *fence = radeon_fence_ref(ib.fence); 597 *fence = radeon_fence_ref(ib.fence);
@@ -596,6 +599,10 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
596 radeon_ib_free(rdev, &ib); 599 radeon_ib_free(rdev, &ib);
597 radeon_bo_unref(&bo); 600 radeon_bo_unref(&bo);
598 return 0; 601 return 0;
602
603err:
604 ttm_eu_backoff_reservation(&ticket, &head);
605 return r;
599} 606}
600 607
601/* multiple fence commands without any stream commands in between can 608/* multiple fence commands without any stream commands in between can
@@ -691,11 +698,19 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
691 struct radeon_device *rdev = 698 struct radeon_device *rdev =
692 container_of(work, struct radeon_device, uvd.idle_work.work); 699 container_of(work, struct radeon_device, uvd.idle_work.work);
693 700
694 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) 701 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
695 radeon_set_uvd_clocks(rdev, 0, 0); 702 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
696 else 703 mutex_lock(&rdev->pm.mutex);
704 rdev->pm.dpm.uvd_active = false;
705 mutex_unlock(&rdev->pm.mutex);
706 radeon_pm_compute_clocks(rdev);
707 } else {
708 radeon_set_uvd_clocks(rdev, 0, 0);
709 }
710 } else {
697 schedule_delayed_work(&rdev->uvd.idle_work, 711 schedule_delayed_work(&rdev->uvd.idle_work,
698 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); 712 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
713 }
699} 714}
700 715
701void radeon_uvd_note_usage(struct radeon_device *rdev) 716void radeon_uvd_note_usage(struct radeon_device *rdev)
@@ -703,8 +718,14 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
703 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work); 718 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
704 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work, 719 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
705 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); 720 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
706 if (set_clocks) 721 if (set_clocks) {
707 radeon_set_uvd_clocks(rdev, 53300, 40000); 722 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
723 /* XXX pick SD/HD/MVC */
724 radeon_dpm_enable_power_state(rdev, POWER_STATE_TYPE_INTERNAL_UVD);
725 } else {
726 radeon_set_uvd_clocks(rdev, 53300, 40000);
727 }
728 }
708} 729}
709 730
710static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq, 731static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 55880d5962c3..d8ddfb34545d 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -248,13 +248,16 @@ struct rs690_watermark {
248}; 248};
249 249
250static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, 250static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
251 struct radeon_crtc *crtc, 251 struct radeon_crtc *crtc,
252 struct rs690_watermark *wm) 252 struct rs690_watermark *wm,
253 bool low)
253{ 254{
254 struct drm_display_mode *mode = &crtc->base.mode; 255 struct drm_display_mode *mode = &crtc->base.mode;
255 fixed20_12 a, b, c; 256 fixed20_12 a, b, c;
256 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; 257 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
257 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; 258 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
259 fixed20_12 sclk, core_bandwidth, max_bandwidth;
260 u32 selected_sclk;
258 261
259 if (!crtc->base.enabled) { 262 if (!crtc->base.enabled) {
260 /* FIXME: wouldn't it better to set priority mark to maximum */ 263 /* FIXME: wouldn't it better to set priority mark to maximum */
@@ -262,6 +265,21 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
262 return; 265 return;
263 } 266 }
264 267
268 if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) &&
269 (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
270 selected_sclk = radeon_dpm_get_sclk(rdev, low);
271 else
272 selected_sclk = rdev->pm.current_sclk;
273
274 /* sclk in Mhz */
275 a.full = dfixed_const(100);
276 sclk.full = dfixed_const(selected_sclk);
277 sclk.full = dfixed_div(sclk, a);
278
279 /* core_bandwidth = sclk(Mhz) * 16 */
280 a.full = dfixed_const(16);
281 core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
282
265 if (crtc->vsc.full > dfixed_const(2)) 283 if (crtc->vsc.full > dfixed_const(2))
266 wm->num_line_pair.full = dfixed_const(2); 284 wm->num_line_pair.full = dfixed_const(2);
267 else 285 else
@@ -322,36 +340,36 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
322 wm->active_time.full = dfixed_div(wm->active_time, a); 340 wm->active_time.full = dfixed_div(wm->active_time, a);
323 341
324 /* Maximun bandwidth is the minimun bandwidth of all component */ 342 /* Maximun bandwidth is the minimun bandwidth of all component */
325 rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; 343 max_bandwidth = core_bandwidth;
326 if (rdev->mc.igp_sideport_enabled) { 344 if (rdev->mc.igp_sideport_enabled) {
327 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && 345 if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
328 rdev->pm.sideport_bandwidth.full) 346 rdev->pm.sideport_bandwidth.full)
329 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; 347 max_bandwidth = rdev->pm.sideport_bandwidth;
330 read_delay_latency.full = dfixed_const(370 * 800 * 1000); 348 read_delay_latency.full = dfixed_const(370 * 800 * 1000);
331 read_delay_latency.full = dfixed_div(read_delay_latency, 349 read_delay_latency.full = dfixed_div(read_delay_latency,
332 rdev->pm.igp_sideport_mclk); 350 rdev->pm.igp_sideport_mclk);
333 } else { 351 } else {
334 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && 352 if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
335 rdev->pm.k8_bandwidth.full) 353 rdev->pm.k8_bandwidth.full)
336 rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth; 354 max_bandwidth = rdev->pm.k8_bandwidth;
337 if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && 355 if (max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
338 rdev->pm.ht_bandwidth.full) 356 rdev->pm.ht_bandwidth.full)
339 rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; 357 max_bandwidth = rdev->pm.ht_bandwidth;
340 read_delay_latency.full = dfixed_const(5000); 358 read_delay_latency.full = dfixed_const(5000);
341 } 359 }
342 360
343 /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ 361 /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
344 a.full = dfixed_const(16); 362 a.full = dfixed_const(16);
345 rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a); 363 sclk.full = dfixed_mul(max_bandwidth, a);
346 a.full = dfixed_const(1000); 364 a.full = dfixed_const(1000);
347 rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk); 365 sclk.full = dfixed_div(a, sclk);
348 /* Determine chunk time 366 /* Determine chunk time
349 * ChunkTime = the time it takes the DCP to send one chunk of data 367 * ChunkTime = the time it takes the DCP to send one chunk of data
350 * to the LB which consists of pipeline delay and inter chunk gap 368 * to the LB which consists of pipeline delay and inter chunk gap
351 * sclk = system clock(ns) 369 * sclk = system clock(ns)
352 */ 370 */
353 a.full = dfixed_const(256 * 13); 371 a.full = dfixed_const(256 * 13);
354 chunk_time.full = dfixed_mul(rdev->pm.sclk, a); 372 chunk_time.full = dfixed_mul(sclk, a);
355 a.full = dfixed_const(10); 373 a.full = dfixed_const(10);
356 chunk_time.full = dfixed_div(chunk_time, a); 374 chunk_time.full = dfixed_div(chunk_time, a);
357 375
@@ -415,175 +433,200 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
415 } 433 }
416} 434}
417 435
418void rs690_bandwidth_update(struct radeon_device *rdev) 436static void rs690_compute_mode_priority(struct radeon_device *rdev,
437 struct rs690_watermark *wm0,
438 struct rs690_watermark *wm1,
439 struct drm_display_mode *mode0,
440 struct drm_display_mode *mode1,
441 u32 *d1mode_priority_a_cnt,
442 u32 *d2mode_priority_a_cnt)
419{ 443{
420 struct drm_display_mode *mode0 = NULL;
421 struct drm_display_mode *mode1 = NULL;
422 struct rs690_watermark wm0;
423 struct rs690_watermark wm1;
424 u32 tmp;
425 u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
426 u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
427 fixed20_12 priority_mark02, priority_mark12, fill_rate; 444 fixed20_12 priority_mark02, priority_mark12, fill_rate;
428 fixed20_12 a, b; 445 fixed20_12 a, b;
429 446
430 radeon_update_display_priority(rdev); 447 *d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
431 448 *d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
432 if (rdev->mode_info.crtcs[0]->base.enabled)
433 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
434 if (rdev->mode_info.crtcs[1]->base.enabled)
435 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
436 /*
437 * Set display0/1 priority up in the memory controller for
438 * modes if the user specifies HIGH for displaypriority
439 * option.
440 */
441 if ((rdev->disp_priority == 2) &&
442 ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
443 tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
444 tmp &= C_000104_MC_DISP0R_INIT_LAT;
445 tmp &= C_000104_MC_DISP1R_INIT_LAT;
446 if (mode0)
447 tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
448 if (mode1)
449 tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
450 WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
451 }
452 rs690_line_buffer_adjust(rdev, mode0, mode1);
453
454 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
455 WREG32(R_006C9C_DCP_CONTROL, 0);
456 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
457 WREG32(R_006C9C_DCP_CONTROL, 2);
458
459 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
460 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
461
462 tmp = (wm0.lb_request_fifo_depth - 1);
463 tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
464 WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
465 449
466 if (mode0 && mode1) { 450 if (mode0 && mode1) {
467 if (dfixed_trunc(wm0.dbpp) > 64) 451 if (dfixed_trunc(wm0->dbpp) > 64)
468 a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); 452 a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
469 else 453 else
470 a.full = wm0.num_line_pair.full; 454 a.full = wm0->num_line_pair.full;
471 if (dfixed_trunc(wm1.dbpp) > 64) 455 if (dfixed_trunc(wm1->dbpp) > 64)
472 b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); 456 b.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
473 else 457 else
474 b.full = wm1.num_line_pair.full; 458 b.full = wm1->num_line_pair.full;
475 a.full += b.full; 459 a.full += b.full;
476 fill_rate.full = dfixed_div(wm0.sclk, a); 460 fill_rate.full = dfixed_div(wm0->sclk, a);
477 if (wm0.consumption_rate.full > fill_rate.full) { 461 if (wm0->consumption_rate.full > fill_rate.full) {
478 b.full = wm0.consumption_rate.full - fill_rate.full; 462 b.full = wm0->consumption_rate.full - fill_rate.full;
479 b.full = dfixed_mul(b, wm0.active_time); 463 b.full = dfixed_mul(b, wm0->active_time);
480 a.full = dfixed_mul(wm0.worst_case_latency, 464 a.full = dfixed_mul(wm0->worst_case_latency,
481 wm0.consumption_rate); 465 wm0->consumption_rate);
482 a.full = a.full + b.full; 466 a.full = a.full + b.full;
483 b.full = dfixed_const(16 * 1000); 467 b.full = dfixed_const(16 * 1000);
484 priority_mark02.full = dfixed_div(a, b); 468 priority_mark02.full = dfixed_div(a, b);
485 } else { 469 } else {
486 a.full = dfixed_mul(wm0.worst_case_latency, 470 a.full = dfixed_mul(wm0->worst_case_latency,
487 wm0.consumption_rate); 471 wm0->consumption_rate);
488 b.full = dfixed_const(16 * 1000); 472 b.full = dfixed_const(16 * 1000);
489 priority_mark02.full = dfixed_div(a, b); 473 priority_mark02.full = dfixed_div(a, b);
490 } 474 }
491 if (wm1.consumption_rate.full > fill_rate.full) { 475 if (wm1->consumption_rate.full > fill_rate.full) {
492 b.full = wm1.consumption_rate.full - fill_rate.full; 476 b.full = wm1->consumption_rate.full - fill_rate.full;
493 b.full = dfixed_mul(b, wm1.active_time); 477 b.full = dfixed_mul(b, wm1->active_time);
494 a.full = dfixed_mul(wm1.worst_case_latency, 478 a.full = dfixed_mul(wm1->worst_case_latency,
495 wm1.consumption_rate); 479 wm1->consumption_rate);
496 a.full = a.full + b.full; 480 a.full = a.full + b.full;
497 b.full = dfixed_const(16 * 1000); 481 b.full = dfixed_const(16 * 1000);
498 priority_mark12.full = dfixed_div(a, b); 482 priority_mark12.full = dfixed_div(a, b);
499 } else { 483 } else {
500 a.full = dfixed_mul(wm1.worst_case_latency, 484 a.full = dfixed_mul(wm1->worst_case_latency,
501 wm1.consumption_rate); 485 wm1->consumption_rate);
502 b.full = dfixed_const(16 * 1000); 486 b.full = dfixed_const(16 * 1000);
503 priority_mark12.full = dfixed_div(a, b); 487 priority_mark12.full = dfixed_div(a, b);
504 } 488 }
505 if (wm0.priority_mark.full > priority_mark02.full) 489 if (wm0->priority_mark.full > priority_mark02.full)
506 priority_mark02.full = wm0.priority_mark.full; 490 priority_mark02.full = wm0->priority_mark.full;
507 if (dfixed_trunc(priority_mark02) < 0) 491 if (dfixed_trunc(priority_mark02) < 0)
508 priority_mark02.full = 0; 492 priority_mark02.full = 0;
509 if (wm0.priority_mark_max.full > priority_mark02.full) 493 if (wm0->priority_mark_max.full > priority_mark02.full)
510 priority_mark02.full = wm0.priority_mark_max.full; 494 priority_mark02.full = wm0->priority_mark_max.full;
511 if (wm1.priority_mark.full > priority_mark12.full) 495 if (wm1->priority_mark.full > priority_mark12.full)
512 priority_mark12.full = wm1.priority_mark.full; 496 priority_mark12.full = wm1->priority_mark.full;
513 if (dfixed_trunc(priority_mark12) < 0) 497 if (dfixed_trunc(priority_mark12) < 0)
514 priority_mark12.full = 0; 498 priority_mark12.full = 0;
515 if (wm1.priority_mark_max.full > priority_mark12.full) 499 if (wm1->priority_mark_max.full > priority_mark12.full)
516 priority_mark12.full = wm1.priority_mark_max.full; 500 priority_mark12.full = wm1->priority_mark_max.full;
517 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 501 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
518 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 502 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
519 if (rdev->disp_priority == 2) { 503 if (rdev->disp_priority == 2) {
520 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); 504 *d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
521 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); 505 *d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
522 } 506 }
523 } else if (mode0) { 507 } else if (mode0) {
524 if (dfixed_trunc(wm0.dbpp) > 64) 508 if (dfixed_trunc(wm0->dbpp) > 64)
525 a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); 509 a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
526 else 510 else
527 a.full = wm0.num_line_pair.full; 511 a.full = wm0->num_line_pair.full;
528 fill_rate.full = dfixed_div(wm0.sclk, a); 512 fill_rate.full = dfixed_div(wm0->sclk, a);
529 if (wm0.consumption_rate.full > fill_rate.full) { 513 if (wm0->consumption_rate.full > fill_rate.full) {
530 b.full = wm0.consumption_rate.full - fill_rate.full; 514 b.full = wm0->consumption_rate.full - fill_rate.full;
531 b.full = dfixed_mul(b, wm0.active_time); 515 b.full = dfixed_mul(b, wm0->active_time);
532 a.full = dfixed_mul(wm0.worst_case_latency, 516 a.full = dfixed_mul(wm0->worst_case_latency,
533 wm0.consumption_rate); 517 wm0->consumption_rate);
534 a.full = a.full + b.full; 518 a.full = a.full + b.full;
535 b.full = dfixed_const(16 * 1000); 519 b.full = dfixed_const(16 * 1000);
536 priority_mark02.full = dfixed_div(a, b); 520 priority_mark02.full = dfixed_div(a, b);
537 } else { 521 } else {
538 a.full = dfixed_mul(wm0.worst_case_latency, 522 a.full = dfixed_mul(wm0->worst_case_latency,
539 wm0.consumption_rate); 523 wm0->consumption_rate);
540 b.full = dfixed_const(16 * 1000); 524 b.full = dfixed_const(16 * 1000);
541 priority_mark02.full = dfixed_div(a, b); 525 priority_mark02.full = dfixed_div(a, b);
542 } 526 }
543 if (wm0.priority_mark.full > priority_mark02.full) 527 if (wm0->priority_mark.full > priority_mark02.full)
544 priority_mark02.full = wm0.priority_mark.full; 528 priority_mark02.full = wm0->priority_mark.full;
545 if (dfixed_trunc(priority_mark02) < 0) 529 if (dfixed_trunc(priority_mark02) < 0)
546 priority_mark02.full = 0; 530 priority_mark02.full = 0;
547 if (wm0.priority_mark_max.full > priority_mark02.full) 531 if (wm0->priority_mark_max.full > priority_mark02.full)
548 priority_mark02.full = wm0.priority_mark_max.full; 532 priority_mark02.full = wm0->priority_mark_max.full;
549 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 533 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
550 if (rdev->disp_priority == 2) 534 if (rdev->disp_priority == 2)
551 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); 535 *d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
552 } else if (mode1) { 536 } else if (mode1) {
553 if (dfixed_trunc(wm1.dbpp) > 64) 537 if (dfixed_trunc(wm1->dbpp) > 64)
554 a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); 538 a.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
555 else 539 else
556 a.full = wm1.num_line_pair.full; 540 a.full = wm1->num_line_pair.full;
557 fill_rate.full = dfixed_div(wm1.sclk, a); 541 fill_rate.full = dfixed_div(wm1->sclk, a);
558 if (wm1.consumption_rate.full > fill_rate.full) { 542 if (wm1->consumption_rate.full > fill_rate.full) {
559 b.full = wm1.consumption_rate.full - fill_rate.full; 543 b.full = wm1->consumption_rate.full - fill_rate.full;
560 b.full = dfixed_mul(b, wm1.active_time); 544 b.full = dfixed_mul(b, wm1->active_time);
561 a.full = dfixed_mul(wm1.worst_case_latency, 545 a.full = dfixed_mul(wm1->worst_case_latency,
562 wm1.consumption_rate); 546 wm1->consumption_rate);
563 a.full = a.full + b.full; 547 a.full = a.full + b.full;
564 b.full = dfixed_const(16 * 1000); 548 b.full = dfixed_const(16 * 1000);
565 priority_mark12.full = dfixed_div(a, b); 549 priority_mark12.full = dfixed_div(a, b);
566 } else { 550 } else {
567 a.full = dfixed_mul(wm1.worst_case_latency, 551 a.full = dfixed_mul(wm1->worst_case_latency,
568 wm1.consumption_rate); 552 wm1->consumption_rate);
569 b.full = dfixed_const(16 * 1000); 553 b.full = dfixed_const(16 * 1000);
570 priority_mark12.full = dfixed_div(a, b); 554 priority_mark12.full = dfixed_div(a, b);
571 } 555 }
572 if (wm1.priority_mark.full > priority_mark12.full) 556 if (wm1->priority_mark.full > priority_mark12.full)
573 priority_mark12.full = wm1.priority_mark.full; 557 priority_mark12.full = wm1->priority_mark.full;
574 if (dfixed_trunc(priority_mark12) < 0) 558 if (dfixed_trunc(priority_mark12) < 0)
575 priority_mark12.full = 0; 559 priority_mark12.full = 0;
576 if (wm1.priority_mark_max.full > priority_mark12.full) 560 if (wm1->priority_mark_max.full > priority_mark12.full)
577 priority_mark12.full = wm1.priority_mark_max.full; 561 priority_mark12.full = wm1->priority_mark_max.full;
578 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 562 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
579 if (rdev->disp_priority == 2) 563 if (rdev->disp_priority == 2)
580 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); 564 *d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
581 } 565 }
566}
567
568void rs690_bandwidth_update(struct radeon_device *rdev)
569{
570 struct drm_display_mode *mode0 = NULL;
571 struct drm_display_mode *mode1 = NULL;
572 struct rs690_watermark wm0_high, wm0_low;
573 struct rs690_watermark wm1_high, wm1_low;
574 u32 tmp;
575 u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
576 u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
577
578 radeon_update_display_priority(rdev);
579
580 if (rdev->mode_info.crtcs[0]->base.enabled)
581 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
582 if (rdev->mode_info.crtcs[1]->base.enabled)
583 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
584 /*
585 * Set display0/1 priority up in the memory controller for
586 * modes if the user specifies HIGH for displaypriority
587 * option.
588 */
589 if ((rdev->disp_priority == 2) &&
590 ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
591 tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
592 tmp &= C_000104_MC_DISP0R_INIT_LAT;
593 tmp &= C_000104_MC_DISP1R_INIT_LAT;
594 if (mode0)
595 tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
596 if (mode1)
597 tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
598 WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
599 }
600 rs690_line_buffer_adjust(rdev, mode0, mode1);
601
602 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
603 WREG32(R_006C9C_DCP_CONTROL, 0);
604 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
605 WREG32(R_006C9C_DCP_CONTROL, 2);
606
607 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
608 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);
609
610 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, true);
611 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, true);
612
613 tmp = (wm0_high.lb_request_fifo_depth - 1);
614 tmp |= (wm1_high.lb_request_fifo_depth - 1) << 16;
615 WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
616
617 rs690_compute_mode_priority(rdev,
618 &wm0_high, &wm1_high,
619 mode0, mode1,
620 &d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
621 rs690_compute_mode_priority(rdev,
622 &wm0_low, &wm1_low,
623 mode0, mode1,
624 &d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
582 625
583 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); 626 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
584 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); 627 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
585 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); 628 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
586 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); 629 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
587} 630}
588 631
589uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) 632uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
new file mode 100644
index 000000000000..bef832a62fee
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -0,0 +1,963 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "rs780d.h"
28#include "r600_dpm.h"
29#include "rs780_dpm.h"
30#include "atom.h"
31
32static struct igp_ps *rs780_get_ps(struct radeon_ps *rps)
33{
34 struct igp_ps *ps = rps->ps_priv;
35
36 return ps;
37}
38
39static struct igp_power_info *rs780_get_pi(struct radeon_device *rdev)
40{
41 struct igp_power_info *pi = rdev->pm.dpm.priv;
42
43 return pi;
44}
45
46static void rs780_get_pm_mode_parameters(struct radeon_device *rdev)
47{
48 struct igp_power_info *pi = rs780_get_pi(rdev);
49 struct radeon_mode_info *minfo = &rdev->mode_info;
50 struct drm_crtc *crtc;
51 struct radeon_crtc *radeon_crtc;
52 int i;
53
54 /* defaults */
55 pi->crtc_id = 0;
56 pi->refresh_rate = 60;
57
58 for (i = 0; i < rdev->num_crtc; i++) {
59 crtc = (struct drm_crtc *)minfo->crtcs[i];
60 if (crtc && crtc->enabled) {
61 radeon_crtc = to_radeon_crtc(crtc);
62 pi->crtc_id = radeon_crtc->crtc_id;
63 if (crtc->mode.htotal && crtc->mode.vtotal)
64 pi->refresh_rate =
65 (crtc->mode.clock * 1000) /
66 (crtc->mode.htotal * crtc->mode.vtotal);
67 break;
68 }
69 }
70}
71
72static void rs780_voltage_scaling_enable(struct radeon_device *rdev, bool enable);
73
74static int rs780_initialize_dpm_power_state(struct radeon_device *rdev,
75 struct radeon_ps *boot_ps)
76{
77 struct atom_clock_dividers dividers;
78 struct igp_ps *default_state = rs780_get_ps(boot_ps);
79 int i, ret;
80
81 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
82 default_state->sclk_low, false, &dividers);
83 if (ret)
84 return ret;
85
86 r600_engine_clock_entry_set_reference_divider(rdev, 0, dividers.ref_div);
87 r600_engine_clock_entry_set_feedback_divider(rdev, 0, dividers.fb_div);
88 r600_engine_clock_entry_set_post_divider(rdev, 0, dividers.post_div);
89
90 if (dividers.enable_post_div)
91 r600_engine_clock_entry_enable_post_divider(rdev, 0, true);
92 else
93 r600_engine_clock_entry_enable_post_divider(rdev, 0, false);
94
95 r600_engine_clock_entry_set_step_time(rdev, 0, R600_SST_DFLT);
96 r600_engine_clock_entry_enable_pulse_skipping(rdev, 0, false);
97
98 r600_engine_clock_entry_enable(rdev, 0, true);
99 for (i = 1; i < R600_PM_NUMBER_OF_SCLKS; i++)
100 r600_engine_clock_entry_enable(rdev, i, false);
101
102 r600_enable_mclk_control(rdev, false);
103 r600_voltage_control_enable_pins(rdev, 0);
104
105 return 0;
106}
107
108static int rs780_initialize_dpm_parameters(struct radeon_device *rdev,
109 struct radeon_ps *boot_ps)
110{
111 int ret = 0;
112 int i;
113
114 r600_set_bsp(rdev, R600_BSU_DFLT, R600_BSP_DFLT);
115
116 r600_set_at(rdev, 0, 0, 0, 0);
117
118 r600_set_git(rdev, R600_GICST_DFLT);
119
120 for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
121 r600_set_tc(rdev, i, 0, 0);
122
123 r600_select_td(rdev, R600_TD_DFLT);
124 r600_set_vrc(rdev, 0);
125
126 r600_set_tpu(rdev, R600_TPU_DFLT);
127 r600_set_tpc(rdev, R600_TPC_DFLT);
128
129 r600_set_sstu(rdev, R600_SSTU_DFLT);
130 r600_set_sst(rdev, R600_SST_DFLT);
131
132 r600_set_fctu(rdev, R600_FCTU_DFLT);
133 r600_set_fct(rdev, R600_FCT_DFLT);
134
135 r600_set_vddc3d_oorsu(rdev, R600_VDDC3DOORSU_DFLT);
136 r600_set_vddc3d_oorphc(rdev, R600_VDDC3DOORPHC_DFLT);
137 r600_set_vddc3d_oorsdc(rdev, R600_VDDC3DOORSDC_DFLT);
138 r600_set_ctxcgtt3d_rphc(rdev, R600_CTXCGTT3DRPHC_DFLT);
139 r600_set_ctxcgtt3d_rsdc(rdev, R600_CTXCGTT3DRSDC_DFLT);
140
141 r600_vid_rt_set_vru(rdev, R600_VRU_DFLT);
142 r600_vid_rt_set_vrt(rdev, R600_VOLTAGERESPONSETIME_DFLT);
143 r600_vid_rt_set_ssu(rdev, R600_SPLLSTEPUNIT_DFLT);
144
145 ret = rs780_initialize_dpm_power_state(rdev, boot_ps);
146
147 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_LOW, 0);
148 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_MEDIUM, 0);
149 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_HIGH, 0);
150
151 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_LOW, 0);
152 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_MEDIUM, 0);
153 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_HIGH, 0);
154
155 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_LOW, 0);
156 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_MEDIUM, 0);
157 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_HIGH, 0);
158
159 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_LOW, R600_DISPLAY_WATERMARK_HIGH);
160 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_MEDIUM, R600_DISPLAY_WATERMARK_HIGH);
161 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_HIGH, R600_DISPLAY_WATERMARK_HIGH);
162
163 r600_power_level_enable(rdev, R600_POWER_LEVEL_CTXSW, false);
164 r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
165 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
166 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
167
168 r600_power_level_set_enter_index(rdev, R600_POWER_LEVEL_LOW);
169
170 r600_set_vrc(rdev, RS780_CGFTV_DFLT);
171
172 return ret;
173}
174
175static void rs780_start_dpm(struct radeon_device *rdev)
176{
177 r600_enable_sclk_control(rdev, false);
178 r600_enable_mclk_control(rdev, false);
179
180 r600_dynamicpm_enable(rdev, true);
181
182 radeon_wait_for_vblank(rdev, 0);
183 radeon_wait_for_vblank(rdev, 1);
184
185 r600_enable_spll_bypass(rdev, true);
186 r600_wait_for_spll_change(rdev);
187 r600_enable_spll_bypass(rdev, false);
188 r600_wait_for_spll_change(rdev);
189
190 r600_enable_spll_bypass(rdev, true);
191 r600_wait_for_spll_change(rdev);
192 r600_enable_spll_bypass(rdev, false);
193 r600_wait_for_spll_change(rdev);
194
195 r600_enable_sclk_control(rdev, true);
196}
197
198
199static void rs780_preset_ranges_slow_clk_fbdiv_en(struct radeon_device *rdev)
200{
201 WREG32_P(FVTHROT_SLOW_CLK_FEEDBACK_DIV_REG1, RANGE_SLOW_CLK_FEEDBACK_DIV_EN,
202 ~RANGE_SLOW_CLK_FEEDBACK_DIV_EN);
203
204 WREG32_P(FVTHROT_SLOW_CLK_FEEDBACK_DIV_REG1,
205 RANGE0_SLOW_CLK_FEEDBACK_DIV(RS780_SLOWCLKFEEDBACKDIV_DFLT),
206 ~RANGE0_SLOW_CLK_FEEDBACK_DIV_MASK);
207}
208
209static void rs780_preset_starting_fbdiv(struct radeon_device *rdev)
210{
211 u32 fbdiv = (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
212
213 WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fbdiv),
214 ~STARTING_FEEDBACK_DIV_MASK);
215
216 WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fbdiv),
217 ~FORCED_FEEDBACK_DIV_MASK);
218
219 WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
220}
221
222static void rs780_voltage_scaling_init(struct radeon_device *rdev)
223{
224 struct igp_power_info *pi = rs780_get_pi(rdev);
225 struct drm_device *dev = rdev->ddev;
226 u32 fv_throt_pwm_fb_div_range[3];
227 u32 fv_throt_pwm_range[4];
228
229 if (dev->pdev->device == 0x9614) {
230 fv_throt_pwm_fb_div_range[0] = RS780D_FVTHROTPWMFBDIVRANGEREG0_DFLT;
231 fv_throt_pwm_fb_div_range[1] = RS780D_FVTHROTPWMFBDIVRANGEREG1_DFLT;
232 fv_throt_pwm_fb_div_range[2] = RS780D_FVTHROTPWMFBDIVRANGEREG2_DFLT;
233 } else if ((dev->pdev->device == 0x9714) ||
234 (dev->pdev->device == 0x9715)) {
235 fv_throt_pwm_fb_div_range[0] = RS880D_FVTHROTPWMFBDIVRANGEREG0_DFLT;
236 fv_throt_pwm_fb_div_range[1] = RS880D_FVTHROTPWMFBDIVRANGEREG1_DFLT;
237 fv_throt_pwm_fb_div_range[2] = RS880D_FVTHROTPWMFBDIVRANGEREG2_DFLT;
238 } else {
239 fv_throt_pwm_fb_div_range[0] = RS780_FVTHROTPWMFBDIVRANGEREG0_DFLT;
240 fv_throt_pwm_fb_div_range[1] = RS780_FVTHROTPWMFBDIVRANGEREG1_DFLT;
241 fv_throt_pwm_fb_div_range[2] = RS780_FVTHROTPWMFBDIVRANGEREG2_DFLT;
242 }
243
244 if (pi->pwm_voltage_control) {
245 fv_throt_pwm_range[0] = pi->min_voltage;
246 fv_throt_pwm_range[1] = pi->min_voltage;
247 fv_throt_pwm_range[2] = pi->max_voltage;
248 fv_throt_pwm_range[3] = pi->max_voltage;
249 } else {
250 fv_throt_pwm_range[0] = pi->invert_pwm_required ?
251 RS780_FVTHROTPWMRANGE3_GPIO_DFLT : RS780_FVTHROTPWMRANGE0_GPIO_DFLT;
252 fv_throt_pwm_range[1] = pi->invert_pwm_required ?
253 RS780_FVTHROTPWMRANGE2_GPIO_DFLT : RS780_FVTHROTPWMRANGE1_GPIO_DFLT;
254 fv_throt_pwm_range[2] = pi->invert_pwm_required ?
255 RS780_FVTHROTPWMRANGE1_GPIO_DFLT : RS780_FVTHROTPWMRANGE2_GPIO_DFLT;
256 fv_throt_pwm_range[3] = pi->invert_pwm_required ?
257 RS780_FVTHROTPWMRANGE0_GPIO_DFLT : RS780_FVTHROTPWMRANGE3_GPIO_DFLT;
258 }
259
260 WREG32_P(FVTHROT_PWM_CTRL_REG0,
261 STARTING_PWM_HIGHTIME(pi->max_voltage),
262 ~STARTING_PWM_HIGHTIME_MASK);
263
264 WREG32_P(FVTHROT_PWM_CTRL_REG0,
265 NUMBER_OF_CYCLES_IN_PERIOD(pi->num_of_cycles_in_period),
266 ~NUMBER_OF_CYCLES_IN_PERIOD_MASK);
267
268 WREG32_P(FVTHROT_PWM_CTRL_REG0, FORCE_STARTING_PWM_HIGHTIME,
269 ~FORCE_STARTING_PWM_HIGHTIME);
270
271 if (pi->invert_pwm_required)
272 WREG32_P(FVTHROT_PWM_CTRL_REG0, INVERT_PWM_WAVEFORM, ~INVERT_PWM_WAVEFORM);
273 else
274 WREG32_P(FVTHROT_PWM_CTRL_REG0, 0, ~INVERT_PWM_WAVEFORM);
275
276 rs780_voltage_scaling_enable(rdev, true);
277
278 WREG32(FVTHROT_PWM_CTRL_REG1,
279 (MIN_PWM_HIGHTIME(pi->min_voltage) |
280 MAX_PWM_HIGHTIME(pi->max_voltage)));
281
282 WREG32(FVTHROT_PWM_US_REG0, RS780_FVTHROTPWMUSREG0_DFLT);
283 WREG32(FVTHROT_PWM_US_REG1, RS780_FVTHROTPWMUSREG1_DFLT);
284 WREG32(FVTHROT_PWM_DS_REG0, RS780_FVTHROTPWMDSREG0_DFLT);
285 WREG32(FVTHROT_PWM_DS_REG1, RS780_FVTHROTPWMDSREG1_DFLT);
286
287 WREG32_P(FVTHROT_PWM_FEEDBACK_DIV_REG1,
288 RANGE0_PWM_FEEDBACK_DIV(fv_throt_pwm_fb_div_range[0]),
289 ~RANGE0_PWM_FEEDBACK_DIV_MASK);
290
291 WREG32(FVTHROT_PWM_FEEDBACK_DIV_REG2,
292 (RANGE1_PWM_FEEDBACK_DIV(fv_throt_pwm_fb_div_range[1]) |
293 RANGE2_PWM_FEEDBACK_DIV(fv_throt_pwm_fb_div_range[2])));
294
295 WREG32(FVTHROT_PWM_FEEDBACK_DIV_REG3,
296 (RANGE0_PWM(fv_throt_pwm_range[1]) |
297 RANGE1_PWM(fv_throt_pwm_range[2])));
298 WREG32(FVTHROT_PWM_FEEDBACK_DIV_REG4,
299 (RANGE2_PWM(fv_throt_pwm_range[1]) |
300 RANGE3_PWM(fv_throt_pwm_range[2])));
301}
302
303static void rs780_clk_scaling_enable(struct radeon_device *rdev, bool enable)
304{
305 if (enable)
306 WREG32_P(FVTHROT_CNTRL_REG, ENABLE_FV_THROT | ENABLE_FV_UPDATE,
307 ~(ENABLE_FV_THROT | ENABLE_FV_UPDATE));
308 else
309 WREG32_P(FVTHROT_CNTRL_REG, 0,
310 ~(ENABLE_FV_THROT | ENABLE_FV_UPDATE));
311}
312
313static void rs780_voltage_scaling_enable(struct radeon_device *rdev, bool enable)
314{
315 if (enable)
316 WREG32_P(FVTHROT_CNTRL_REG, ENABLE_FV_THROT_IO, ~ENABLE_FV_THROT_IO);
317 else
318 WREG32_P(FVTHROT_CNTRL_REG, 0, ~ENABLE_FV_THROT_IO);
319}
320
321static void rs780_set_engine_clock_wfc(struct radeon_device *rdev)
322{
323 WREG32(FVTHROT_UTC0, RS780_FVTHROTUTC0_DFLT);
324 WREG32(FVTHROT_UTC1, RS780_FVTHROTUTC1_DFLT);
325 WREG32(FVTHROT_UTC2, RS780_FVTHROTUTC2_DFLT);
326 WREG32(FVTHROT_UTC3, RS780_FVTHROTUTC3_DFLT);
327 WREG32(FVTHROT_UTC4, RS780_FVTHROTUTC4_DFLT);
328
329 WREG32(FVTHROT_DTC0, RS780_FVTHROTDTC0_DFLT);
330 WREG32(FVTHROT_DTC1, RS780_FVTHROTDTC1_DFLT);
331 WREG32(FVTHROT_DTC2, RS780_FVTHROTDTC2_DFLT);
332 WREG32(FVTHROT_DTC3, RS780_FVTHROTDTC3_DFLT);
333 WREG32(FVTHROT_DTC4, RS780_FVTHROTDTC4_DFLT);
334}
335
336static void rs780_set_engine_clock_sc(struct radeon_device *rdev)
337{
338 WREG32_P(FVTHROT_FBDIV_REG2,
339 FB_DIV_TIMER_VAL(RS780_FBDIVTIMERVAL_DFLT),
340 ~FB_DIV_TIMER_VAL_MASK);
341
342 WREG32_P(FVTHROT_CNTRL_REG,
343 REFRESH_RATE_DIVISOR(0) | MINIMUM_CIP(0xf),
344 ~(REFRESH_RATE_DIVISOR_MASK | MINIMUM_CIP_MASK));
345}
346
347static void rs780_set_engine_clock_tdc(struct radeon_device *rdev)
348{
349 WREG32_P(FVTHROT_CNTRL_REG, 0, ~(FORCE_TREND_SEL | TREND_SEL_MODE));
350}
351
352static void rs780_set_engine_clock_ssc(struct radeon_device *rdev)
353{
354 WREG32(FVTHROT_FB_US_REG0, RS780_FVTHROTFBUSREG0_DFLT);
355 WREG32(FVTHROT_FB_US_REG1, RS780_FVTHROTFBUSREG1_DFLT);
356 WREG32(FVTHROT_FB_DS_REG0, RS780_FVTHROTFBDSREG0_DFLT);
357 WREG32(FVTHROT_FB_DS_REG1, RS780_FVTHROTFBDSREG1_DFLT);
358
359 WREG32_P(FVTHROT_FBDIV_REG1, MAX_FEEDBACK_STEP(1), ~MAX_FEEDBACK_STEP_MASK);
360}
361
362static void rs780_program_at(struct radeon_device *rdev)
363{
364 struct igp_power_info *pi = rs780_get_pi(rdev);
365
366 WREG32(FVTHROT_TARGET_REG, 30000000 / pi->refresh_rate);
367 WREG32(FVTHROT_CB1, 1000000 * 5 / pi->refresh_rate);
368 WREG32(FVTHROT_CB2, 1000000 * 10 / pi->refresh_rate);
369 WREG32(FVTHROT_CB3, 1000000 * 30 / pi->refresh_rate);
370 WREG32(FVTHROT_CB4, 1000000 * 50 / pi->refresh_rate);
371}
372
373static void rs780_disable_vbios_powersaving(struct radeon_device *rdev)
374{
375 WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000);
376}
377
378static void rs780_force_voltage_to_high(struct radeon_device *rdev)
379{
380 struct igp_power_info *pi = rs780_get_pi(rdev);
381 struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
382
383 if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) &&
384 (current_state->min_voltage == RS780_VDDC_LEVEL_HIGH))
385 return;
386
387 WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
388
389 udelay(1);
390
391 WREG32_P(FVTHROT_PWM_CTRL_REG0,
392 STARTING_PWM_HIGHTIME(pi->max_voltage),
393 ~STARTING_PWM_HIGHTIME_MASK);
394
395 WREG32_P(FVTHROT_PWM_CTRL_REG0,
396 FORCE_STARTING_PWM_HIGHTIME, ~FORCE_STARTING_PWM_HIGHTIME);
397
398 WREG32_P(FVTHROT_PWM_FEEDBACK_DIV_REG1, 0,
399 ~RANGE_PWM_FEEDBACK_DIV_EN);
400
401 udelay(1);
402
403 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
404}
405
406static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
407 struct radeon_ps *new_ps,
408 struct radeon_ps *old_ps)
409{
410 struct atom_clock_dividers min_dividers, max_dividers, current_max_dividers;
411 struct igp_ps *new_state = rs780_get_ps(new_ps);
412 struct igp_ps *old_state = rs780_get_ps(old_ps);
413 int ret;
414
415 if ((new_state->sclk_high == old_state->sclk_high) &&
416 (new_state->sclk_low == old_state->sclk_low))
417 return 0;
418
419 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
420 new_state->sclk_low, false, &min_dividers);
421 if (ret)
422 return ret;
423
424 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
425 new_state->sclk_high, false, &max_dividers);
426 if (ret)
427 return ret;
428
429 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
430 old_state->sclk_high, false, &current_max_dividers);
431 if (ret)
432 return ret;
433
434 WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
435
436 WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(max_dividers.fb_div),
437 ~FORCED_FEEDBACK_DIV_MASK);
438 WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(max_dividers.fb_div),
439 ~STARTING_FEEDBACK_DIV_MASK);
440 WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
441
442 udelay(100);
443
444 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
445
446 if (max_dividers.fb_div > min_dividers.fb_div) {
447 WREG32_P(FVTHROT_FBDIV_REG0,
448 MIN_FEEDBACK_DIV(min_dividers.fb_div) |
449 MAX_FEEDBACK_DIV(max_dividers.fb_div),
450 ~(MIN_FEEDBACK_DIV_MASK | MAX_FEEDBACK_DIV_MASK));
451
452 WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV);
453 }
454
455 return 0;
456}
457
458static void rs780_set_engine_clock_spc(struct radeon_device *rdev,
459 struct radeon_ps *new_ps,
460 struct radeon_ps *old_ps)
461{
462 struct igp_ps *new_state = rs780_get_ps(new_ps);
463 struct igp_ps *old_state = rs780_get_ps(old_ps);
464 struct igp_power_info *pi = rs780_get_pi(rdev);
465
466 if ((new_state->sclk_high == old_state->sclk_high) &&
467 (new_state->sclk_low == old_state->sclk_low))
468 return;
469
470 if (pi->crtc_id == 0)
471 WREG32_P(CG_INTGFX_MISC, 0, ~FVTHROT_VBLANK_SEL);
472 else
473 WREG32_P(CG_INTGFX_MISC, FVTHROT_VBLANK_SEL, ~FVTHROT_VBLANK_SEL);
474
475}
476
477static void rs780_activate_engine_clk_scaling(struct radeon_device *rdev,
478 struct radeon_ps *new_ps,
479 struct radeon_ps *old_ps)
480{
481 struct igp_ps *new_state = rs780_get_ps(new_ps);
482 struct igp_ps *old_state = rs780_get_ps(old_ps);
483
484 if ((new_state->sclk_high == old_state->sclk_high) &&
485 (new_state->sclk_low == old_state->sclk_low))
486 return;
487
488 rs780_clk_scaling_enable(rdev, true);
489}
490
491static u32 rs780_get_voltage_for_vddc_level(struct radeon_device *rdev,
492 enum rs780_vddc_level vddc)
493{
494 struct igp_power_info *pi = rs780_get_pi(rdev);
495
496 if (vddc == RS780_VDDC_LEVEL_HIGH)
497 return pi->max_voltage;
498 else if (vddc == RS780_VDDC_LEVEL_LOW)
499 return pi->min_voltage;
500 else
501 return pi->max_voltage;
502}
503
504static void rs780_enable_voltage_scaling(struct radeon_device *rdev,
505 struct radeon_ps *new_ps)
506{
507 struct igp_ps *new_state = rs780_get_ps(new_ps);
508 struct igp_power_info *pi = rs780_get_pi(rdev);
509 enum rs780_vddc_level vddc_high, vddc_low;
510
511 udelay(100);
512
513 if ((new_state->max_voltage == RS780_VDDC_LEVEL_HIGH) &&
514 (new_state->min_voltage == RS780_VDDC_LEVEL_HIGH))
515 return;
516
517 vddc_high = rs780_get_voltage_for_vddc_level(rdev,
518 new_state->max_voltage);
519 vddc_low = rs780_get_voltage_for_vddc_level(rdev,
520 new_state->min_voltage);
521
522 WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
523
524 udelay(1);
525 if (vddc_high > vddc_low) {
526 WREG32_P(FVTHROT_PWM_FEEDBACK_DIV_REG1,
527 RANGE_PWM_FEEDBACK_DIV_EN, ~RANGE_PWM_FEEDBACK_DIV_EN);
528
529 WREG32_P(FVTHROT_PWM_CTRL_REG0, 0, ~FORCE_STARTING_PWM_HIGHTIME);
530 } else if (vddc_high == vddc_low) {
531 if (pi->max_voltage != vddc_high) {
532 WREG32_P(FVTHROT_PWM_CTRL_REG0,
533 STARTING_PWM_HIGHTIME(vddc_high),
534 ~STARTING_PWM_HIGHTIME_MASK);
535
536 WREG32_P(FVTHROT_PWM_CTRL_REG0,
537 FORCE_STARTING_PWM_HIGHTIME,
538 ~FORCE_STARTING_PWM_HIGHTIME);
539 }
540 }
541
542 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
543}
544
545static void rs780_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
546 struct radeon_ps *new_ps,
547 struct radeon_ps *old_ps)
548{
549 struct igp_ps *new_state = rs780_get_ps(new_ps);
550 struct igp_ps *current_state = rs780_get_ps(old_ps);
551
552 if ((new_ps->vclk == old_ps->vclk) &&
553 (new_ps->dclk == old_ps->dclk))
554 return;
555
556 if (new_state->sclk_high >= current_state->sclk_high)
557 return;
558
559 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
560}
561
562static void rs780_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
563 struct radeon_ps *new_ps,
564 struct radeon_ps *old_ps)
565{
566 struct igp_ps *new_state = rs780_get_ps(new_ps);
567 struct igp_ps *current_state = rs780_get_ps(old_ps);
568
569 if ((new_ps->vclk == old_ps->vclk) &&
570 (new_ps->dclk == old_ps->dclk))
571 return;
572
573 if (new_state->sclk_high < current_state->sclk_high)
574 return;
575
576 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
577}
578
579int rs780_dpm_enable(struct radeon_device *rdev)
580{
581 struct igp_power_info *pi = rs780_get_pi(rdev);
582 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
583 int ret;
584
585 rs780_get_pm_mode_parameters(rdev);
586 rs780_disable_vbios_powersaving(rdev);
587
588 if (r600_dynamicpm_enabled(rdev))
589 return -EINVAL;
590 ret = rs780_initialize_dpm_parameters(rdev, boot_ps);
591 if (ret)
592 return ret;
593 rs780_start_dpm(rdev);
594
595 rs780_preset_ranges_slow_clk_fbdiv_en(rdev);
596 rs780_preset_starting_fbdiv(rdev);
597 if (pi->voltage_control)
598 rs780_voltage_scaling_init(rdev);
599 rs780_clk_scaling_enable(rdev, true);
600 rs780_set_engine_clock_sc(rdev);
601 rs780_set_engine_clock_wfc(rdev);
602 rs780_program_at(rdev);
603 rs780_set_engine_clock_tdc(rdev);
604 rs780_set_engine_clock_ssc(rdev);
605
606 if (pi->gfx_clock_gating)
607 r600_gfx_clockgating_enable(rdev, true);
608
609 if (rdev->irq.installed && (rdev->pm.int_thermal_type == THERMAL_TYPE_RV6XX)) {
610 ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
611 if (ret)
612 return ret;
613 rdev->irq.dpm_thermal = true;
614 radeon_irq_set(rdev);
615 }
616
617 return 0;
618}
619
620void rs780_dpm_disable(struct radeon_device *rdev)
621{
622 struct igp_power_info *pi = rs780_get_pi(rdev);
623
624 r600_dynamicpm_enable(rdev, false);
625
626 rs780_clk_scaling_enable(rdev, false);
627 rs780_voltage_scaling_enable(rdev, false);
628
629 if (pi->gfx_clock_gating)
630 r600_gfx_clockgating_enable(rdev, false);
631
632 if (rdev->irq.installed &&
633 (rdev->pm.int_thermal_type == THERMAL_TYPE_RV6XX)) {
634 rdev->irq.dpm_thermal = false;
635 radeon_irq_set(rdev);
636 }
637}
638
639int rs780_dpm_set_power_state(struct radeon_device *rdev)
640{
641 struct igp_power_info *pi = rs780_get_pi(rdev);
642 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
643 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
644 int ret;
645
646 rs780_get_pm_mode_parameters(rdev);
647
648 rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
649
650 if (pi->voltage_control) {
651 rs780_force_voltage_to_high(rdev);
652 mdelay(5);
653 }
654
655 ret = rs780_set_engine_clock_scaling(rdev, new_ps, old_ps);
656 if (ret)
657 return ret;
658 rs780_set_engine_clock_spc(rdev, new_ps, old_ps);
659
660 rs780_activate_engine_clk_scaling(rdev, new_ps, old_ps);
661
662 if (pi->voltage_control)
663 rs780_enable_voltage_scaling(rdev, new_ps);
664
665 rs780_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
666
667 return 0;
668}
669
670void rs780_dpm_setup_asic(struct radeon_device *rdev)
671{
672
673}
674
675void rs780_dpm_display_configuration_changed(struct radeon_device *rdev)
676{
677 rs780_get_pm_mode_parameters(rdev);
678 rs780_program_at(rdev);
679}
680
681union igp_info {
682 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
683 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
684};
685
686union power_info {
687 struct _ATOM_POWERPLAY_INFO info;
688 struct _ATOM_POWERPLAY_INFO_V2 info_2;
689 struct _ATOM_POWERPLAY_INFO_V3 info_3;
690 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
691 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
692 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
693};
694
695union pplib_clock_info {
696 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
697 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
698 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
699 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
700};
701
702union pplib_power_state {
703 struct _ATOM_PPLIB_STATE v1;
704 struct _ATOM_PPLIB_STATE_V2 v2;
705};
706
707static void rs780_parse_pplib_non_clock_info(struct radeon_device *rdev,
708 struct radeon_ps *rps,
709 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
710 u8 table_rev)
711{
712 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
713 rps->class = le16_to_cpu(non_clock_info->usClassification);
714 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
715
716 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
717 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
718 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
719 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
720 rps->vclk = RS780_DEFAULT_VCLK_FREQ;
721 rps->dclk = RS780_DEFAULT_DCLK_FREQ;
722 } else {
723 rps->vclk = 0;
724 rps->dclk = 0;
725 }
726
727 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
728 rdev->pm.dpm.boot_ps = rps;
729 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
730 rdev->pm.dpm.uvd_ps = rps;
731}
732
733static void rs780_parse_pplib_clock_info(struct radeon_device *rdev,
734 struct radeon_ps *rps,
735 union pplib_clock_info *clock_info)
736{
737 struct igp_ps *ps = rs780_get_ps(rps);
738 u32 sclk;
739
740 sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow);
741 sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
742 ps->sclk_low = sclk;
743 sclk = le16_to_cpu(clock_info->rs780.usHighEngineClockLow);
744 sclk |= clock_info->rs780.ucHighEngineClockHigh << 16;
745 ps->sclk_high = sclk;
746 switch (le16_to_cpu(clock_info->rs780.usVDDC)) {
747 case ATOM_PPLIB_RS780_VOLTAGE_NONE:
748 default:
749 ps->min_voltage = RS780_VDDC_LEVEL_UNKNOWN;
750 ps->max_voltage = RS780_VDDC_LEVEL_UNKNOWN;
751 break;
752 case ATOM_PPLIB_RS780_VOLTAGE_LOW:
753 ps->min_voltage = RS780_VDDC_LEVEL_LOW;
754 ps->max_voltage = RS780_VDDC_LEVEL_LOW;
755 break;
756 case ATOM_PPLIB_RS780_VOLTAGE_HIGH:
757 ps->min_voltage = RS780_VDDC_LEVEL_HIGH;
758 ps->max_voltage = RS780_VDDC_LEVEL_HIGH;
759 break;
760 case ATOM_PPLIB_RS780_VOLTAGE_VARIABLE:
761 ps->min_voltage = RS780_VDDC_LEVEL_LOW;
762 ps->max_voltage = RS780_VDDC_LEVEL_HIGH;
763 break;
764 }
765 ps->flags = le32_to_cpu(clock_info->rs780.ulFlags);
766
767 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
768 ps->sclk_low = rdev->clock.default_sclk;
769 ps->sclk_high = rdev->clock.default_sclk;
770 ps->min_voltage = RS780_VDDC_LEVEL_HIGH;
771 ps->max_voltage = RS780_VDDC_LEVEL_HIGH;
772 }
773}
774
775static int rs780_parse_power_table(struct radeon_device *rdev)
776{
777 struct radeon_mode_info *mode_info = &rdev->mode_info;
778 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
779 union pplib_power_state *power_state;
780 int i;
781 union pplib_clock_info *clock_info;
782 union power_info *power_info;
783 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
784 u16 data_offset;
785 u8 frev, crev;
786 struct igp_ps *ps;
787
788 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
789 &frev, &crev, &data_offset))
790 return -EINVAL;
791 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
792
793 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
794 power_info->pplib.ucNumStates, GFP_KERNEL);
795 if (!rdev->pm.dpm.ps)
796 return -ENOMEM;
797 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
798 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
799 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
800
801 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
802 power_state = (union pplib_power_state *)
803 (mode_info->atom_context->bios + data_offset +
804 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
805 i * power_info->pplib.ucStateEntrySize);
806 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
807 (mode_info->atom_context->bios + data_offset +
808 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
809 (power_state->v1.ucNonClockStateIndex *
810 power_info->pplib.ucNonClockSize));
811 if (power_info->pplib.ucStateEntrySize - 1) {
812 clock_info = (union pplib_clock_info *)
813 (mode_info->atom_context->bios + data_offset +
814 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
815 (power_state->v1.ucClockStateIndices[0] *
816 power_info->pplib.ucClockInfoSize));
817 ps = kzalloc(sizeof(struct igp_ps), GFP_KERNEL);
818 if (ps == NULL) {
819 kfree(rdev->pm.dpm.ps);
820 return -ENOMEM;
821 }
822 rdev->pm.dpm.ps[i].ps_priv = ps;
823 rs780_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
824 non_clock_info,
825 power_info->pplib.ucNonClockSize);
826 rs780_parse_pplib_clock_info(rdev,
827 &rdev->pm.dpm.ps[i],
828 clock_info);
829 }
830 }
831 rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
832 return 0;
833}
834
835int rs780_dpm_init(struct radeon_device *rdev)
836{
837 struct igp_power_info *pi;
838 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
839 union igp_info *info;
840 u16 data_offset;
841 u8 frev, crev;
842 int ret;
843
844 pi = kzalloc(sizeof(struct igp_power_info), GFP_KERNEL);
845 if (pi == NULL)
846 return -ENOMEM;
847 rdev->pm.dpm.priv = pi;
848
849 ret = rs780_parse_power_table(rdev);
850 if (ret)
851 return ret;
852
853 pi->voltage_control = false;
854 pi->gfx_clock_gating = true;
855
856 if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
857 &frev, &crev, &data_offset)) {
858 info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset);
859
860 /* Get various system informations from bios */
861 switch (crev) {
862 case 1:
863 pi->num_of_cycles_in_period =
864 info->info.ucNumberOfCyclesInPeriod;
865 pi->num_of_cycles_in_period |=
866 info->info.ucNumberOfCyclesInPeriodHi << 8;
867 pi->invert_pwm_required =
868 (pi->num_of_cycles_in_period & 0x8000) ? true : false;
869 pi->boot_voltage = info->info.ucStartingPWM_HighTime;
870 pi->max_voltage = info->info.ucMaxNBVoltage;
871 pi->max_voltage |= info->info.ucMaxNBVoltageHigh << 8;
872 pi->min_voltage = info->info.ucMinNBVoltage;
873 pi->min_voltage |= info->info.ucMinNBVoltageHigh << 8;
874 pi->inter_voltage_low =
875 le16_to_cpu(info->info.usInterNBVoltageLow);
876 pi->inter_voltage_high =
877 le16_to_cpu(info->info.usInterNBVoltageHigh);
878 pi->voltage_control = true;
879 pi->bootup_uma_clk = info->info.usK8MemoryClock * 100;
880 break;
881 case 2:
882 pi->num_of_cycles_in_period =
883 le16_to_cpu(info->info_2.usNumberOfCyclesInPeriod);
884 pi->invert_pwm_required =
885 (pi->num_of_cycles_in_period & 0x8000) ? true : false;
886 pi->boot_voltage =
887 le16_to_cpu(info->info_2.usBootUpNBVoltage);
888 pi->max_voltage =
889 le16_to_cpu(info->info_2.usMaxNBVoltage);
890 pi->min_voltage =
891 le16_to_cpu(info->info_2.usMinNBVoltage);
892 pi->system_config =
893 le32_to_cpu(info->info_2.ulSystemConfig);
894 pi->pwm_voltage_control =
895 (pi->system_config & 0x4) ? true : false;
896 pi->voltage_control = true;
897 pi->bootup_uma_clk = le32_to_cpu(info->info_2.ulBootUpUMAClock);
898 break;
899 default:
900 DRM_ERROR("No integrated system info for your GPU\n");
901 return -EINVAL;
902 }
903 if (pi->min_voltage > pi->max_voltage)
904 pi->voltage_control = false;
905 if (pi->pwm_voltage_control) {
906 if ((pi->num_of_cycles_in_period == 0) ||
907 (pi->max_voltage == 0) ||
908 (pi->min_voltage == 0))
909 pi->voltage_control = false;
910 } else {
911 if ((pi->num_of_cycles_in_period == 0) ||
912 (pi->max_voltage == 0))
913 pi->voltage_control = false;
914 }
915
916 return 0;
917 }
918 radeon_dpm_fini(rdev);
919 return -EINVAL;
920}
921
922void rs780_dpm_print_power_state(struct radeon_device *rdev,
923 struct radeon_ps *rps)
924{
925 struct igp_ps *ps = rs780_get_ps(rps);
926
927 r600_dpm_print_class_info(rps->class, rps->class2);
928 r600_dpm_print_cap_info(rps->caps);
929 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
930 printk("\t\tpower level 0 sclk: %u vddc_index: %d\n",
931 ps->sclk_low, ps->min_voltage);
932 printk("\t\tpower level 1 sclk: %u vddc_index: %d\n",
933 ps->sclk_high, ps->max_voltage);
934 r600_dpm_print_ps_status(rdev, rps);
935}
936
937void rs780_dpm_fini(struct radeon_device *rdev)
938{
939 int i;
940
941 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
942 kfree(rdev->pm.dpm.ps[i].ps_priv);
943 }
944 kfree(rdev->pm.dpm.ps);
945 kfree(rdev->pm.dpm.priv);
946}
947
948u32 rs780_dpm_get_sclk(struct radeon_device *rdev, bool low)
949{
950 struct igp_ps *requested_state = rs780_get_ps(rdev->pm.dpm.requested_ps);
951
952 if (low)
953 return requested_state->sclk_low;
954 else
955 return requested_state->sclk_high;
956}
957
958u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low)
959{
960 struct igp_power_info *pi = rs780_get_pi(rdev);
961
962 return pi->bootup_uma_clk;
963}
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.h b/drivers/gpu/drm/radeon/rs780_dpm.h
new file mode 100644
index 000000000000..47a40b14fa43
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs780_dpm.h
@@ -0,0 +1,109 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __RS780_DPM_H__
24#define __RS780_DPM_H__
25
26enum rs780_vddc_level {
27 RS780_VDDC_LEVEL_UNKNOWN = 0,
28 RS780_VDDC_LEVEL_LOW = 1,
29 RS780_VDDC_LEVEL_HIGH = 2,
30};
31
32struct igp_power_info {
33 /* flags */
34 bool invert_pwm_required;
35 bool pwm_voltage_control;
36 bool voltage_control;
37 bool gfx_clock_gating;
38 /* stored values */
39 u32 system_config;
40 u32 bootup_uma_clk;
41 u16 max_voltage;
42 u16 min_voltage;
43 u16 boot_voltage;
44 u16 inter_voltage_low;
45 u16 inter_voltage_high;
46 u16 num_of_cycles_in_period;
47 /* variable */
48 int crtc_id;
49 int refresh_rate;
50};
51
52struct igp_ps {
53 enum rs780_vddc_level min_voltage;
54 enum rs780_vddc_level max_voltage;
55 u32 sclk_low;
56 u32 sclk_high;
57 u32 flags;
58};
59
60#define RS780_CGFTV_DFLT 0x0303000f
61#define RS780_FBDIVTIMERVAL_DFLT 0x2710
62
63#define RS780_FVTHROTUTC0_DFLT 0x04010040
64#define RS780_FVTHROTUTC1_DFLT 0x04010040
65#define RS780_FVTHROTUTC2_DFLT 0x04010040
66#define RS780_FVTHROTUTC3_DFLT 0x04010040
67#define RS780_FVTHROTUTC4_DFLT 0x04010040
68
69#define RS780_FVTHROTDTC0_DFLT 0x04010040
70#define RS780_FVTHROTDTC1_DFLT 0x04010040
71#define RS780_FVTHROTDTC2_DFLT 0x04010040
72#define RS780_FVTHROTDTC3_DFLT 0x04010040
73#define RS780_FVTHROTDTC4_DFLT 0x04010040
74
75#define RS780_FVTHROTFBUSREG0_DFLT 0x00001001
76#define RS780_FVTHROTFBUSREG1_DFLT 0x00002002
77#define RS780_FVTHROTFBDSREG0_DFLT 0x00004001
78#define RS780_FVTHROTFBDSREG1_DFLT 0x00020010
79
80#define RS780_FVTHROTPWMUSREG0_DFLT 0x00002001
81#define RS780_FVTHROTPWMUSREG1_DFLT 0x00004003
82#define RS780_FVTHROTPWMDSREG0_DFLT 0x00002001
83#define RS780_FVTHROTPWMDSREG1_DFLT 0x00004003
84
85#define RS780_FVTHROTPWMFBDIVRANGEREG0_DFLT 0x37
86#define RS780_FVTHROTPWMFBDIVRANGEREG1_DFLT 0x4b
87#define RS780_FVTHROTPWMFBDIVRANGEREG2_DFLT 0x8b
88
89#define RS780D_FVTHROTPWMFBDIVRANGEREG0_DFLT 0x8b
90#define RS780D_FVTHROTPWMFBDIVRANGEREG1_DFLT 0x8c
91#define RS780D_FVTHROTPWMFBDIVRANGEREG2_DFLT 0xb5
92
93#define RS880D_FVTHROTPWMFBDIVRANGEREG0_DFLT 0x8d
94#define RS880D_FVTHROTPWMFBDIVRANGEREG1_DFLT 0x8e
95#define RS880D_FVTHROTPWMFBDIVRANGEREG2_DFLT 0xBa
96
97#define RS780_FVTHROTPWMRANGE0_GPIO_DFLT 0x1a
98#define RS780_FVTHROTPWMRANGE1_GPIO_DFLT 0x1a
99#define RS780_FVTHROTPWMRANGE2_GPIO_DFLT 0x0
100#define RS780_FVTHROTPWMRANGE3_GPIO_DFLT 0x0
101
102#define RS780_SLOWCLKFEEDBACKDIV_DFLT 110
103
104#define RS780_CGCLKGATING_DFLT 0x0000E204
105
106#define RS780_DEFAULT_VCLK_FREQ 53300 /* 10 khz */
107#define RS780_DEFAULT_DCLK_FREQ 40000 /* 10 khz */
108
109#endif
diff --git a/drivers/gpu/drm/radeon/rs780d.h b/drivers/gpu/drm/radeon/rs780d.h
new file mode 100644
index 000000000000..b1142ed1c628
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs780d.h
@@ -0,0 +1,168 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __RS780D_H__
24#define __RS780D_H__
25
26#define CG_SPLL_FUNC_CNTL 0x600
27# define SPLL_RESET (1 << 0)
28# define SPLL_SLEEP (1 << 1)
29# define SPLL_REF_DIV(x) ((x) << 2)
30# define SPLL_REF_DIV_MASK (7 << 2)
31# define SPLL_FB_DIV(x) ((x) << 5)
32# define SPLL_FB_DIV_MASK (0xff << 2)
33# define SPLL_FB_DIV_SHIFT 2
34# define SPLL_PULSEEN (1 << 13)
35# define SPLL_PULSENUM(x) ((x) << 14)
36# define SPLL_PULSENUM_MASK (3 << 14)
37# define SPLL_SW_HILEN(x) ((x) << 16)
38# define SPLL_SW_HILEN_MASK (0xf << 16)
39# define SPLL_SW_LOLEN(x) ((x) << 20)
40# define SPLL_SW_LOLEN_MASK (0xf << 20)
41# define SPLL_DIVEN (1 << 24)
42# define SPLL_BYPASS_EN (1 << 25)
43# define SPLL_CHG_STATUS (1 << 29)
44# define SPLL_CTLREQ (1 << 30)
45# define SPLL_CTLACK (1 << 31)
46
47/* RS780/RS880 PM */
48#define FVTHROT_CNTRL_REG 0x3000
49#define DONT_WAIT_FOR_FBDIV_WRAP (1 << 0)
50#define MINIMUM_CIP(x) ((x) << 1)
51#define MINIMUM_CIP_SHIFT 1
52#define MINIMUM_CIP_MASK 0x1fffffe
53#define REFRESH_RATE_DIVISOR(x) ((x) << 25)
54#define REFRESH_RATE_DIVISOR_SHIFT 25
55#define REFRESH_RATE_DIVISOR_MASK (0x3 << 25)
56#define ENABLE_FV_THROT (1 << 27)
57#define ENABLE_FV_UPDATE (1 << 28)
58#define TREND_SEL_MODE (1 << 29)
59#define FORCE_TREND_SEL (1 << 30)
60#define ENABLE_FV_THROT_IO (1 << 31)
61#define FVTHROT_TARGET_REG 0x3004
62#define TARGET_IDLE_COUNT(x) ((x) << 0)
63#define TARGET_IDLE_COUNT_MASK 0xffffff
64#define TARGET_IDLE_COUNT_SHIFT 0
65#define FVTHROT_CB1 0x3008
66#define FVTHROT_CB2 0x300c
67#define FVTHROT_CB3 0x3010
68#define FVTHROT_CB4 0x3014
69#define FVTHROT_UTC0 0x3018
70#define FVTHROT_UTC1 0x301c
71#define FVTHROT_UTC2 0x3020
72#define FVTHROT_UTC3 0x3024
73#define FVTHROT_UTC4 0x3028
74#define FVTHROT_DTC0 0x302c
75#define FVTHROT_DTC1 0x3030
76#define FVTHROT_DTC2 0x3034
77#define FVTHROT_DTC3 0x3038
78#define FVTHROT_DTC4 0x303c
79#define FVTHROT_FBDIV_REG0 0x3040
80#define MIN_FEEDBACK_DIV(x) ((x) << 0)
81#define MIN_FEEDBACK_DIV_MASK 0xfff
82#define MIN_FEEDBACK_DIV_SHIFT 0
83#define MAX_FEEDBACK_DIV(x) ((x) << 12)
84#define MAX_FEEDBACK_DIV_MASK (0xfff << 12)
85#define MAX_FEEDBACK_DIV_SHIFT 12
86#define FVTHROT_FBDIV_REG1 0x3044
87#define MAX_FEEDBACK_STEP(x) ((x) << 0)
88#define MAX_FEEDBACK_STEP_MASK 0xfff
89#define MAX_FEEDBACK_STEP_SHIFT 0
90#define STARTING_FEEDBACK_DIV(x) ((x) << 12)
91#define STARTING_FEEDBACK_DIV_MASK (0xfff << 12)
92#define STARTING_FEEDBACK_DIV_SHIFT 12
93#define FORCE_FEEDBACK_DIV (1 << 24)
94#define FVTHROT_FBDIV_REG2 0x3048
95#define FORCED_FEEDBACK_DIV(x) ((x) << 0)
96#define FORCED_FEEDBACK_DIV_MASK 0xfff
97#define FORCED_FEEDBACK_DIV_SHIFT 0
98#define FB_DIV_TIMER_VAL(x) ((x) << 12)
99#define FB_DIV_TIMER_VAL_MASK (0xffff << 12)
100#define FB_DIV_TIMER_VAL_SHIFT 12
101#define FVTHROT_FB_US_REG0 0x304c
102#define FVTHROT_FB_US_REG1 0x3050
103#define FVTHROT_FB_DS_REG0 0x3054
104#define FVTHROT_FB_DS_REG1 0x3058
105#define FVTHROT_PWM_CTRL_REG0 0x305c
106#define STARTING_PWM_HIGHTIME(x) ((x) << 0)
107#define STARTING_PWM_HIGHTIME_MASK 0xfff
108#define STARTING_PWM_HIGHTIME_SHIFT 0
109#define NUMBER_OF_CYCLES_IN_PERIOD(x) ((x) << 12)
110#define NUMBER_OF_CYCLES_IN_PERIOD_MASK (0xfff << 12)
111#define NUMBER_OF_CYCLES_IN_PERIOD_SHIFT 12
112#define FORCE_STARTING_PWM_HIGHTIME (1 << 24)
113#define INVERT_PWM_WAVEFORM (1 << 25)
114#define FVTHROT_PWM_CTRL_REG1 0x3060
115#define MIN_PWM_HIGHTIME(x) ((x) << 0)
116#define MIN_PWM_HIGHTIME_MASK 0xfff
117#define MIN_PWM_HIGHTIME_SHIFT 0
118#define MAX_PWM_HIGHTIME(x) ((x) << 12)
119#define MAX_PWM_HIGHTIME_MASK (0xfff << 12)
120#define MAX_PWM_HIGHTIME_SHIFT 12
121#define FVTHROT_PWM_US_REG0 0x3064
122#define FVTHROT_PWM_US_REG1 0x3068
123#define FVTHROT_PWM_DS_REG0 0x306c
124#define FVTHROT_PWM_DS_REG1 0x3070
125#define FVTHROT_STATUS_REG0 0x3074
126#define CURRENT_FEEDBACK_DIV_MASK 0xfff
127#define CURRENT_FEEDBACK_DIV_SHIFT 0
128#define FVTHROT_STATUS_REG1 0x3078
129#define FVTHROT_STATUS_REG2 0x307c
130#define CG_INTGFX_MISC 0x3080
131#define FVTHROT_VBLANK_SEL (1 << 9)
132#define FVTHROT_PWM_FEEDBACK_DIV_REG1 0x308c
133#define RANGE0_PWM_FEEDBACK_DIV(x) ((x) << 0)
134#define RANGE0_PWM_FEEDBACK_DIV_MASK 0xfff
135#define RANGE0_PWM_FEEDBACK_DIV_SHIFT 0
136#define RANGE_PWM_FEEDBACK_DIV_EN (1 << 12)
137#define FVTHROT_PWM_FEEDBACK_DIV_REG2 0x3090
138#define RANGE1_PWM_FEEDBACK_DIV(x) ((x) << 0)
139#define RANGE1_PWM_FEEDBACK_DIV_MASK 0xfff
140#define RANGE1_PWM_FEEDBACK_DIV_SHIFT 0
141#define RANGE2_PWM_FEEDBACK_DIV(x) ((x) << 12)
142#define RANGE2_PWM_FEEDBACK_DIV_MASK (0xfff << 12)
143#define RANGE2_PWM_FEEDBACK_DIV_SHIFT 12
144#define FVTHROT_PWM_FEEDBACK_DIV_REG3 0x3094
145#define RANGE0_PWM(x) ((x) << 0)
146#define RANGE0_PWM_MASK 0xfff
147#define RANGE0_PWM_SHIFT 0
148#define RANGE1_PWM(x) ((x) << 12)
149#define RANGE1_PWM_MASK (0xfff << 12)
150#define RANGE1_PWM_SHIFT 12
151#define FVTHROT_PWM_FEEDBACK_DIV_REG4 0x3098
152#define RANGE2_PWM(x) ((x) << 0)
153#define RANGE2_PWM_MASK 0xfff
154#define RANGE2_PWM_SHIFT 0
155#define RANGE3_PWM(x) ((x) << 12)
156#define RANGE3_PWM_MASK (0xfff << 12)
157#define RANGE3_PWM_SHIFT 12
158#define FVTHROT_SLOW_CLK_FEEDBACK_DIV_REG1 0x30ac
159#define RANGE0_SLOW_CLK_FEEDBACK_DIV(x) ((x) << 0)
160#define RANGE0_SLOW_CLK_FEEDBACK_DIV_MASK 0xfff
161#define RANGE0_SLOW_CLK_FEEDBACK_DIV_SHIFT 0
162#define RANGE_SLOW_CLK_FEEDBACK_DIV_EN (1 << 12)
163
164#define GFX_MACRO_BYPASS_CNTL 0x30c0
165#define SPLL_BYPASS_CNTL (1 << 0)
166#define UPLL_BYPASS_CNTL (1 << 1)
167
168#endif
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 21c7d7b26e55..8ea1573ae820 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -937,13 +937,16 @@ struct rv515_watermark {
937}; 937};
938 938
939static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, 939static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
940 struct radeon_crtc *crtc, 940 struct radeon_crtc *crtc,
941 struct rv515_watermark *wm) 941 struct rv515_watermark *wm,
942 bool low)
942{ 943{
943 struct drm_display_mode *mode = &crtc->base.mode; 944 struct drm_display_mode *mode = &crtc->base.mode;
944 fixed20_12 a, b, c; 945 fixed20_12 a, b, c;
945 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; 946 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
946 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; 947 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
948 fixed20_12 sclk;
949 u32 selected_sclk;
947 950
948 if (!crtc->base.enabled) { 951 if (!crtc->base.enabled) {
949 /* FIXME: wouldn't it better to set priority mark to maximum */ 952 /* FIXME: wouldn't it better to set priority mark to maximum */
@@ -951,6 +954,18 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
951 return; 954 return;
952 } 955 }
953 956
957 /* rv6xx, rv7xx */
958 if ((rdev->family >= CHIP_RV610) &&
959 (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
960 selected_sclk = radeon_dpm_get_sclk(rdev, low);
961 else
962 selected_sclk = rdev->pm.current_sclk;
963
964 /* sclk in Mhz */
965 a.full = dfixed_const(100);
966 sclk.full = dfixed_const(selected_sclk);
967 sclk.full = dfixed_div(sclk, a);
968
954 if (crtc->vsc.full > dfixed_const(2)) 969 if (crtc->vsc.full > dfixed_const(2))
955 wm->num_line_pair.full = dfixed_const(2); 970 wm->num_line_pair.full = dfixed_const(2);
956 else 971 else
@@ -1016,7 +1031,7 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
1016 * sclk = system clock(Mhz) 1031 * sclk = system clock(Mhz)
1017 */ 1032 */
1018 a.full = dfixed_const(600 * 1000); 1033 a.full = dfixed_const(600 * 1000);
1019 chunk_time.full = dfixed_div(a, rdev->pm.sclk); 1034 chunk_time.full = dfixed_div(a, sclk);
1020 read_delay_latency.full = dfixed_const(1000); 1035 read_delay_latency.full = dfixed_const(1000);
1021 1036
1022 /* Determine the worst case latency 1037 /* Determine the worst case latency
@@ -1077,152 +1092,177 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
1077 } 1092 }
1078} 1093}
1079 1094
1080void rv515_bandwidth_avivo_update(struct radeon_device *rdev) 1095static void rv515_compute_mode_priority(struct radeon_device *rdev,
1096 struct rv515_watermark *wm0,
1097 struct rv515_watermark *wm1,
1098 struct drm_display_mode *mode0,
1099 struct drm_display_mode *mode1,
1100 u32 *d1mode_priority_a_cnt,
1101 u32 *d2mode_priority_a_cnt)
1081{ 1102{
1082 struct drm_display_mode *mode0 = NULL;
1083 struct drm_display_mode *mode1 = NULL;
1084 struct rv515_watermark wm0;
1085 struct rv515_watermark wm1;
1086 u32 tmp;
1087 u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
1088 u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
1089 fixed20_12 priority_mark02, priority_mark12, fill_rate; 1103 fixed20_12 priority_mark02, priority_mark12, fill_rate;
1090 fixed20_12 a, b; 1104 fixed20_12 a, b;
1091 1105
1092 if (rdev->mode_info.crtcs[0]->base.enabled) 1106 *d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
1093 mode0 = &rdev->mode_info.crtcs[0]->base.mode; 1107 *d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
1094 if (rdev->mode_info.crtcs[1]->base.enabled)
1095 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
1096 rs690_line_buffer_adjust(rdev, mode0, mode1);
1097
1098 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
1099 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
1100
1101 tmp = wm0.lb_request_fifo_depth;
1102 tmp |= wm1.lb_request_fifo_depth << 16;
1103 WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
1104 1108
1105 if (mode0 && mode1) { 1109 if (mode0 && mode1) {
1106 if (dfixed_trunc(wm0.dbpp) > 64) 1110 if (dfixed_trunc(wm0->dbpp) > 64)
1107 a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); 1111 a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair);
1108 else 1112 else
1109 a.full = wm0.num_line_pair.full; 1113 a.full = wm0->num_line_pair.full;
1110 if (dfixed_trunc(wm1.dbpp) > 64) 1114 if (dfixed_trunc(wm1->dbpp) > 64)
1111 b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); 1115 b.full = dfixed_div(wm1->dbpp, wm1->num_line_pair);
1112 else 1116 else
1113 b.full = wm1.num_line_pair.full; 1117 b.full = wm1->num_line_pair.full;
1114 a.full += b.full; 1118 a.full += b.full;
1115 fill_rate.full = dfixed_div(wm0.sclk, a); 1119 fill_rate.full = dfixed_div(wm0->sclk, a);
1116 if (wm0.consumption_rate.full > fill_rate.full) { 1120 if (wm0->consumption_rate.full > fill_rate.full) {
1117 b.full = wm0.consumption_rate.full - fill_rate.full; 1121 b.full = wm0->consumption_rate.full - fill_rate.full;
1118 b.full = dfixed_mul(b, wm0.active_time); 1122 b.full = dfixed_mul(b, wm0->active_time);
1119 a.full = dfixed_const(16); 1123 a.full = dfixed_const(16);
1120 b.full = dfixed_div(b, a); 1124 b.full = dfixed_div(b, a);
1121 a.full = dfixed_mul(wm0.worst_case_latency, 1125 a.full = dfixed_mul(wm0->worst_case_latency,
1122 wm0.consumption_rate); 1126 wm0->consumption_rate);
1123 priority_mark02.full = a.full + b.full; 1127 priority_mark02.full = a.full + b.full;
1124 } else { 1128 } else {
1125 a.full = dfixed_mul(wm0.worst_case_latency, 1129 a.full = dfixed_mul(wm0->worst_case_latency,
1126 wm0.consumption_rate); 1130 wm0->consumption_rate);
1127 b.full = dfixed_const(16 * 1000); 1131 b.full = dfixed_const(16 * 1000);
1128 priority_mark02.full = dfixed_div(a, b); 1132 priority_mark02.full = dfixed_div(a, b);
1129 } 1133 }
1130 if (wm1.consumption_rate.full > fill_rate.full) { 1134 if (wm1->consumption_rate.full > fill_rate.full) {
1131 b.full = wm1.consumption_rate.full - fill_rate.full; 1135 b.full = wm1->consumption_rate.full - fill_rate.full;
1132 b.full = dfixed_mul(b, wm1.active_time); 1136 b.full = dfixed_mul(b, wm1->active_time);
1133 a.full = dfixed_const(16); 1137 a.full = dfixed_const(16);
1134 b.full = dfixed_div(b, a); 1138 b.full = dfixed_div(b, a);
1135 a.full = dfixed_mul(wm1.worst_case_latency, 1139 a.full = dfixed_mul(wm1->worst_case_latency,
1136 wm1.consumption_rate); 1140 wm1->consumption_rate);
1137 priority_mark12.full = a.full + b.full; 1141 priority_mark12.full = a.full + b.full;
1138 } else { 1142 } else {
1139 a.full = dfixed_mul(wm1.worst_case_latency, 1143 a.full = dfixed_mul(wm1->worst_case_latency,
1140 wm1.consumption_rate); 1144 wm1->consumption_rate);
1141 b.full = dfixed_const(16 * 1000); 1145 b.full = dfixed_const(16 * 1000);
1142 priority_mark12.full = dfixed_div(a, b); 1146 priority_mark12.full = dfixed_div(a, b);
1143 } 1147 }
1144 if (wm0.priority_mark.full > priority_mark02.full) 1148 if (wm0->priority_mark.full > priority_mark02.full)
1145 priority_mark02.full = wm0.priority_mark.full; 1149 priority_mark02.full = wm0->priority_mark.full;
1146 if (dfixed_trunc(priority_mark02) < 0) 1150 if (dfixed_trunc(priority_mark02) < 0)
1147 priority_mark02.full = 0; 1151 priority_mark02.full = 0;
1148 if (wm0.priority_mark_max.full > priority_mark02.full) 1152 if (wm0->priority_mark_max.full > priority_mark02.full)
1149 priority_mark02.full = wm0.priority_mark_max.full; 1153 priority_mark02.full = wm0->priority_mark_max.full;
1150 if (wm1.priority_mark.full > priority_mark12.full) 1154 if (wm1->priority_mark.full > priority_mark12.full)
1151 priority_mark12.full = wm1.priority_mark.full; 1155 priority_mark12.full = wm1->priority_mark.full;
1152 if (dfixed_trunc(priority_mark12) < 0) 1156 if (dfixed_trunc(priority_mark12) < 0)
1153 priority_mark12.full = 0; 1157 priority_mark12.full = 0;
1154 if (wm1.priority_mark_max.full > priority_mark12.full) 1158 if (wm1->priority_mark_max.full > priority_mark12.full)
1155 priority_mark12.full = wm1.priority_mark_max.full; 1159 priority_mark12.full = wm1->priority_mark_max.full;
1156 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 1160 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
1157 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 1161 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
1158 if (rdev->disp_priority == 2) { 1162 if (rdev->disp_priority == 2) {
1159 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 1163 *d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1160 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 1164 *d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1161 } 1165 }
1162 } else if (mode0) { 1166 } else if (mode0) {
1163 if (dfixed_trunc(wm0.dbpp) > 64) 1167 if (dfixed_trunc(wm0->dbpp) > 64)
1164 a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); 1168 a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair);
1165 else 1169 else
1166 a.full = wm0.num_line_pair.full; 1170 a.full = wm0->num_line_pair.full;
1167 fill_rate.full = dfixed_div(wm0.sclk, a); 1171 fill_rate.full = dfixed_div(wm0->sclk, a);
1168 if (wm0.consumption_rate.full > fill_rate.full) { 1172 if (wm0->consumption_rate.full > fill_rate.full) {
1169 b.full = wm0.consumption_rate.full - fill_rate.full; 1173 b.full = wm0->consumption_rate.full - fill_rate.full;
1170 b.full = dfixed_mul(b, wm0.active_time); 1174 b.full = dfixed_mul(b, wm0->active_time);
1171 a.full = dfixed_const(16); 1175 a.full = dfixed_const(16);
1172 b.full = dfixed_div(b, a); 1176 b.full = dfixed_div(b, a);
1173 a.full = dfixed_mul(wm0.worst_case_latency, 1177 a.full = dfixed_mul(wm0->worst_case_latency,
1174 wm0.consumption_rate); 1178 wm0->consumption_rate);
1175 priority_mark02.full = a.full + b.full; 1179 priority_mark02.full = a.full + b.full;
1176 } else { 1180 } else {
1177 a.full = dfixed_mul(wm0.worst_case_latency, 1181 a.full = dfixed_mul(wm0->worst_case_latency,
1178 wm0.consumption_rate); 1182 wm0->consumption_rate);
1179 b.full = dfixed_const(16); 1183 b.full = dfixed_const(16);
1180 priority_mark02.full = dfixed_div(a, b); 1184 priority_mark02.full = dfixed_div(a, b);
1181 } 1185 }
1182 if (wm0.priority_mark.full > priority_mark02.full) 1186 if (wm0->priority_mark.full > priority_mark02.full)
1183 priority_mark02.full = wm0.priority_mark.full; 1187 priority_mark02.full = wm0->priority_mark.full;
1184 if (dfixed_trunc(priority_mark02) < 0) 1188 if (dfixed_trunc(priority_mark02) < 0)
1185 priority_mark02.full = 0; 1189 priority_mark02.full = 0;
1186 if (wm0.priority_mark_max.full > priority_mark02.full) 1190 if (wm0->priority_mark_max.full > priority_mark02.full)
1187 priority_mark02.full = wm0.priority_mark_max.full; 1191 priority_mark02.full = wm0->priority_mark_max.full;
1188 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 1192 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
1189 if (rdev->disp_priority == 2) 1193 if (rdev->disp_priority == 2)
1190 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 1194 *d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1191 } else if (mode1) { 1195 } else if (mode1) {
1192 if (dfixed_trunc(wm1.dbpp) > 64) 1196 if (dfixed_trunc(wm1->dbpp) > 64)
1193 a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); 1197 a.full = dfixed_div(wm1->dbpp, wm1->num_line_pair);
1194 else 1198 else
1195 a.full = wm1.num_line_pair.full; 1199 a.full = wm1->num_line_pair.full;
1196 fill_rate.full = dfixed_div(wm1.sclk, a); 1200 fill_rate.full = dfixed_div(wm1->sclk, a);
1197 if (wm1.consumption_rate.full > fill_rate.full) { 1201 if (wm1->consumption_rate.full > fill_rate.full) {
1198 b.full = wm1.consumption_rate.full - fill_rate.full; 1202 b.full = wm1->consumption_rate.full - fill_rate.full;
1199 b.full = dfixed_mul(b, wm1.active_time); 1203 b.full = dfixed_mul(b, wm1->active_time);
1200 a.full = dfixed_const(16); 1204 a.full = dfixed_const(16);
1201 b.full = dfixed_div(b, a); 1205 b.full = dfixed_div(b, a);
1202 a.full = dfixed_mul(wm1.worst_case_latency, 1206 a.full = dfixed_mul(wm1->worst_case_latency,
1203 wm1.consumption_rate); 1207 wm1->consumption_rate);
1204 priority_mark12.full = a.full + b.full; 1208 priority_mark12.full = a.full + b.full;
1205 } else { 1209 } else {
1206 a.full = dfixed_mul(wm1.worst_case_latency, 1210 a.full = dfixed_mul(wm1->worst_case_latency,
1207 wm1.consumption_rate); 1211 wm1->consumption_rate);
1208 b.full = dfixed_const(16 * 1000); 1212 b.full = dfixed_const(16 * 1000);
1209 priority_mark12.full = dfixed_div(a, b); 1213 priority_mark12.full = dfixed_div(a, b);
1210 } 1214 }
1211 if (wm1.priority_mark.full > priority_mark12.full) 1215 if (wm1->priority_mark.full > priority_mark12.full)
1212 priority_mark12.full = wm1.priority_mark.full; 1216 priority_mark12.full = wm1->priority_mark.full;
1213 if (dfixed_trunc(priority_mark12) < 0) 1217 if (dfixed_trunc(priority_mark12) < 0)
1214 priority_mark12.full = 0; 1218 priority_mark12.full = 0;
1215 if (wm1.priority_mark_max.full > priority_mark12.full) 1219 if (wm1->priority_mark_max.full > priority_mark12.full)
1216 priority_mark12.full = wm1.priority_mark_max.full; 1220 priority_mark12.full = wm1->priority_mark_max.full;
1217 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 1221 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
1218 if (rdev->disp_priority == 2) 1222 if (rdev->disp_priority == 2)
1219 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; 1223 *d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1220 } 1224 }
1225}
1226
1227void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1228{
1229 struct drm_display_mode *mode0 = NULL;
1230 struct drm_display_mode *mode1 = NULL;
1231 struct rv515_watermark wm0_high, wm0_low;
1232 struct rv515_watermark wm1_high, wm1_low;
1233 u32 tmp;
1234 u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
1235 u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
1236
1237 if (rdev->mode_info.crtcs[0]->base.enabled)
1238 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
1239 if (rdev->mode_info.crtcs[1]->base.enabled)
1240 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
1241 rs690_line_buffer_adjust(rdev, mode0, mode1);
1242
1243 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
1244 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);
1245
1246 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, false);
1247 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, false);
1248
1249 tmp = wm0_high.lb_request_fifo_depth;
1250 tmp |= wm1_high.lb_request_fifo_depth << 16;
1251 WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
1252
1253 rv515_compute_mode_priority(rdev,
1254 &wm0_high, &wm1_high,
1255 mode0, mode1,
1256 &d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
1257 rv515_compute_mode_priority(rdev,
1258 &wm0_low, &wm1_low,
1259 mode0, mode1,
1260 &d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
1221 1261
1222 WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); 1262 WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
1223 WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); 1263 WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
1224 WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); 1264 WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
1225 WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); 1265 WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
1226} 1266}
1227 1267
1228void rv515_bandwidth_update(struct radeon_device *rdev) 1268void rv515_bandwidth_update(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
new file mode 100644
index 000000000000..8303de267ee5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -0,0 +1,2085 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "rv6xxd.h"
28#include "r600_dpm.h"
29#include "rv6xx_dpm.h"
30#include "atom.h"
31#include <linux/seq_file.h>
32
33static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
34 u32 unscaled_count, u32 unit);
35
36static struct rv6xx_ps *rv6xx_get_ps(struct radeon_ps *rps)
37{
38 struct rv6xx_ps *ps = rps->ps_priv;
39
40 return ps;
41}
42
43static struct rv6xx_power_info *rv6xx_get_pi(struct radeon_device *rdev)
44{
45 struct rv6xx_power_info *pi = rdev->pm.dpm.priv;
46
47 return pi;
48}
49
50static void rv6xx_force_pcie_gen1(struct radeon_device *rdev)
51{
52 u32 tmp;
53 int i;
54
55 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
56 tmp &= LC_GEN2_EN;
57 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
58
59 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
60 tmp |= LC_INITIATE_LINK_SPEED_CHANGE;
61 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
62
63 for (i = 0; i < rdev->usec_timeout; i++) {
64 if (!(RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE))
65 break;
66 udelay(1);
67 }
68
69 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
70 tmp &= ~LC_INITIATE_LINK_SPEED_CHANGE;
71 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
72}
73
74static void rv6xx_enable_pcie_gen2_support(struct radeon_device *rdev)
75{
76 u32 tmp;
77
78 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
79
80 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
81 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
82 tmp |= LC_GEN2_EN;
83 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
84 }
85}
86
87static void rv6xx_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
88 bool enable)
89{
90 u32 tmp;
91
92 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
93 if (enable)
94 tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
95 else
96 tmp |= LC_HW_VOLTAGE_IF_CONTROL(0);
97 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
98}
99
100static void rv6xx_enable_l0s(struct radeon_device *rdev)
101{
102 u32 tmp;
103
104 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L0S_INACTIVITY_MASK;
105 tmp |= LC_L0S_INACTIVITY(3);
106 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
107}
108
109static void rv6xx_enable_l1(struct radeon_device *rdev)
110{
111 u32 tmp;
112
113 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL);
114 tmp &= ~LC_L1_INACTIVITY_MASK;
115 tmp |= LC_L1_INACTIVITY(4);
116 tmp &= ~LC_PMI_TO_L1_DIS;
117 tmp &= ~LC_ASPM_TO_L1_DIS;
118 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
119}
120
121static void rv6xx_enable_pll_sleep_in_l1(struct radeon_device *rdev)
122{
123 u32 tmp;
124
125 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L1_INACTIVITY_MASK;
126 tmp |= LC_L1_INACTIVITY(8);
127 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
128
129 /* NOTE, this is a PCIE indirect reg, not PCIE PORT */
130 tmp = RREG32_PCIE(PCIE_P_CNTL);
131 tmp |= P_PLL_PWRDN_IN_L1L23;
132 tmp &= ~P_PLL_BUF_PDNB;
133 tmp &= ~P_PLL_PDNB;
134 tmp |= P_ALLOW_PRX_FRONTEND_SHUTOFF;
135 WREG32_PCIE(PCIE_P_CNTL, tmp);
136}
137
138static int rv6xx_convert_clock_to_stepping(struct radeon_device *rdev,
139 u32 clock, struct rv6xx_sclk_stepping *step)
140{
141 int ret;
142 struct atom_clock_dividers dividers;
143
144 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
145 clock, false, &dividers);
146 if (ret)
147 return ret;
148
149 if (dividers.enable_post_div)
150 step->post_divider = 2 + (dividers.post_div & 0xF) + (dividers.post_div >> 4);
151 else
152 step->post_divider = 1;
153
154 step->vco_frequency = clock * step->post_divider;
155
156 return 0;
157}
158
159static void rv6xx_output_stepping(struct radeon_device *rdev,
160 u32 step_index, struct rv6xx_sclk_stepping *step)
161{
162 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
163 u32 ref_clk = rdev->clock.spll.reference_freq;
164 u32 fb_divider;
165 u32 spll_step_count = rv6xx_scale_count_given_unit(rdev,
166 R600_SPLLSTEPTIME_DFLT *
167 pi->spll_ref_div,
168 R600_SPLLSTEPUNIT_DFLT);
169
170 r600_engine_clock_entry_enable(rdev, step_index, true);
171 r600_engine_clock_entry_enable_pulse_skipping(rdev, step_index, false);
172
173 if (step->post_divider == 1)
174 r600_engine_clock_entry_enable_post_divider(rdev, step_index, false);
175 else {
176 u32 lo_len = (step->post_divider - 2) / 2;
177 u32 hi_len = step->post_divider - 2 - lo_len;
178
179 r600_engine_clock_entry_enable_post_divider(rdev, step_index, true);
180 r600_engine_clock_entry_set_post_divider(rdev, step_index, (hi_len << 4) | lo_len);
181 }
182
183 fb_divider = ((step->vco_frequency * pi->spll_ref_div) / ref_clk) >>
184 pi->fb_div_scale;
185
186 r600_engine_clock_entry_set_reference_divider(rdev, step_index,
187 pi->spll_ref_div - 1);
188 r600_engine_clock_entry_set_feedback_divider(rdev, step_index, fb_divider);
189 r600_engine_clock_entry_set_step_time(rdev, step_index, spll_step_count);
190
191}
192
193static struct rv6xx_sclk_stepping rv6xx_next_vco_step(struct radeon_device *rdev,
194 struct rv6xx_sclk_stepping *cur,
195 bool increasing_vco, u32 step_size)
196{
197 struct rv6xx_sclk_stepping next;
198
199 next.post_divider = cur->post_divider;
200
201 if (increasing_vco)
202 next.vco_frequency = (cur->vco_frequency * (100 + step_size)) / 100;
203 else
204 next.vco_frequency = (cur->vco_frequency * 100 + 99 + step_size) / (100 + step_size);
205
206 return next;
207}
208
209static bool rv6xx_can_step_post_div(struct radeon_device *rdev,
210 struct rv6xx_sclk_stepping *cur,
211 struct rv6xx_sclk_stepping *target)
212{
213 return (cur->post_divider > target->post_divider) &&
214 ((cur->vco_frequency * target->post_divider) <=
215 (target->vco_frequency * (cur->post_divider - 1)));
216}
217
218static struct rv6xx_sclk_stepping rv6xx_next_post_div_step(struct radeon_device *rdev,
219 struct rv6xx_sclk_stepping *cur,
220 struct rv6xx_sclk_stepping *target)
221{
222 struct rv6xx_sclk_stepping next = *cur;
223
224 while (rv6xx_can_step_post_div(rdev, &next, target))
225 next.post_divider--;
226
227 return next;
228}
229
230static bool rv6xx_reached_stepping_target(struct radeon_device *rdev,
231 struct rv6xx_sclk_stepping *cur,
232 struct rv6xx_sclk_stepping *target,
233 bool increasing_vco)
234{
235 return (increasing_vco && (cur->vco_frequency >= target->vco_frequency)) ||
236 (!increasing_vco && (cur->vco_frequency <= target->vco_frequency));
237}
238
239static void rv6xx_generate_steps(struct radeon_device *rdev,
240 u32 low, u32 high,
241 u32 start_index, u8 *end_index)
242{
243 struct rv6xx_sclk_stepping cur;
244 struct rv6xx_sclk_stepping target;
245 bool increasing_vco;
246 u32 step_index = start_index;
247
248 rv6xx_convert_clock_to_stepping(rdev, low, &cur);
249 rv6xx_convert_clock_to_stepping(rdev, high, &target);
250
251 rv6xx_output_stepping(rdev, step_index++, &cur);
252
253 increasing_vco = (target.vco_frequency >= cur.vco_frequency);
254
255 if (target.post_divider > cur.post_divider)
256 cur.post_divider = target.post_divider;
257
258 while (1) {
259 struct rv6xx_sclk_stepping next;
260
261 if (rv6xx_can_step_post_div(rdev, &cur, &target))
262 next = rv6xx_next_post_div_step(rdev, &cur, &target);
263 else
264 next = rv6xx_next_vco_step(rdev, &cur, increasing_vco, R600_VCOSTEPPCT_DFLT);
265
266 if (rv6xx_reached_stepping_target(rdev, &next, &target, increasing_vco)) {
267 struct rv6xx_sclk_stepping tiny =
268 rv6xx_next_vco_step(rdev, &target, !increasing_vco, R600_ENDINGVCOSTEPPCT_DFLT);
269 tiny.post_divider = next.post_divider;
270
271 if (!rv6xx_reached_stepping_target(rdev, &tiny, &cur, !increasing_vco))
272 rv6xx_output_stepping(rdev, step_index++, &tiny);
273
274 if ((next.post_divider != target.post_divider) &&
275 (next.vco_frequency != target.vco_frequency)) {
276 struct rv6xx_sclk_stepping final_vco;
277
278 final_vco.vco_frequency = target.vco_frequency;
279 final_vco.post_divider = next.post_divider;
280
281 rv6xx_output_stepping(rdev, step_index++, &final_vco);
282 }
283
284 rv6xx_output_stepping(rdev, step_index++, &target);
285 break;
286 } else
287 rv6xx_output_stepping(rdev, step_index++, &next);
288
289 cur = next;
290 }
291
292 *end_index = (u8)step_index - 1;
293
294}
295
296static void rv6xx_generate_single_step(struct radeon_device *rdev,
297 u32 clock, u32 index)
298{
299 struct rv6xx_sclk_stepping step;
300
301 rv6xx_convert_clock_to_stepping(rdev, clock, &step);
302 rv6xx_output_stepping(rdev, index, &step);
303}
304
305static void rv6xx_invalidate_intermediate_steps_range(struct radeon_device *rdev,
306 u32 start_index, u32 end_index)
307{
308 u32 step_index;
309
310 for (step_index = start_index + 1; step_index < end_index; step_index++)
311 r600_engine_clock_entry_enable(rdev, step_index, false);
312}
313
314static void rv6xx_set_engine_spread_spectrum_clk_s(struct radeon_device *rdev,
315 u32 index, u32 clk_s)
316{
317 WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
318 CLKS(clk_s), ~CLKS_MASK);
319}
320
321static void rv6xx_set_engine_spread_spectrum_clk_v(struct radeon_device *rdev,
322 u32 index, u32 clk_v)
323{
324 WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
325 CLKV(clk_v), ~CLKV_MASK);
326}
327
328static void rv6xx_enable_engine_spread_spectrum(struct radeon_device *rdev,
329 u32 index, bool enable)
330{
331 if (enable)
332 WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
333 SSEN, ~SSEN);
334 else
335 WREG32_P(CG_SPLL_SPREAD_SPECTRUM_LOW + (index * 4),
336 0, ~SSEN);
337}
338
339static void rv6xx_set_memory_spread_spectrum_clk_s(struct radeon_device *rdev,
340 u32 clk_s)
341{
342 WREG32_P(CG_MPLL_SPREAD_SPECTRUM, CLKS(clk_s), ~CLKS_MASK);
343}
344
345static void rv6xx_set_memory_spread_spectrum_clk_v(struct radeon_device *rdev,
346 u32 clk_v)
347{
348 WREG32_P(CG_MPLL_SPREAD_SPECTRUM, CLKV(clk_v), ~CLKV_MASK);
349}
350
351static void rv6xx_enable_memory_spread_spectrum(struct radeon_device *rdev,
352 bool enable)
353{
354 if (enable)
355 WREG32_P(CG_MPLL_SPREAD_SPECTRUM, SSEN, ~SSEN);
356 else
357 WREG32_P(CG_MPLL_SPREAD_SPECTRUM, 0, ~SSEN);
358}
359
360static void rv6xx_enable_dynamic_spread_spectrum(struct radeon_device *rdev,
361 bool enable)
362{
363 if (enable)
364 WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
365 else
366 WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
367}
368
369static void rv6xx_memory_clock_entry_enable_post_divider(struct radeon_device *rdev,
370 u32 index, bool enable)
371{
372 if (enable)
373 WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4),
374 LEVEL0_MPLL_DIV_EN, ~LEVEL0_MPLL_DIV_EN);
375 else
376 WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4), 0, ~LEVEL0_MPLL_DIV_EN);
377}
378
379static void rv6xx_memory_clock_entry_set_post_divider(struct radeon_device *rdev,
380 u32 index, u32 divider)
381{
382 WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4),
383 LEVEL0_MPLL_POST_DIV(divider), ~LEVEL0_MPLL_POST_DIV_MASK);
384}
385
386static void rv6xx_memory_clock_entry_set_feedback_divider(struct radeon_device *rdev,
387 u32 index, u32 divider)
388{
389 WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4), LEVEL0_MPLL_FB_DIV(divider),
390 ~LEVEL0_MPLL_FB_DIV_MASK);
391}
392
393static void rv6xx_memory_clock_entry_set_reference_divider(struct radeon_device *rdev,
394 u32 index, u32 divider)
395{
396 WREG32_P(MPLL_FREQ_LEVEL_0 + (index * 4),
397 LEVEL0_MPLL_REF_DIV(divider), ~LEVEL0_MPLL_REF_DIV_MASK);
398}
399
400static void rv6xx_vid_response_set_brt(struct radeon_device *rdev, u32 rt)
401{
402 WREG32_P(VID_RT, BRT(rt), ~BRT_MASK);
403}
404
405static void rv6xx_enable_engine_feedback_and_reference_sync(struct radeon_device *rdev)
406{
407 WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
408}
409
410static u64 rv6xx_clocks_per_unit(u32 unit)
411{
412 u64 tmp = 1 << (2 * unit);
413
414 return tmp;
415}
416
417static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
418 u32 unscaled_count, u32 unit)
419{
420 u32 count_per_unit = (u32)rv6xx_clocks_per_unit(unit);
421
422 return (unscaled_count + count_per_unit - 1) / count_per_unit;
423}
424
425static u32 rv6xx_compute_count_for_delay(struct radeon_device *rdev,
426 u32 delay_us, u32 unit)
427{
428 u32 ref_clk = rdev->clock.spll.reference_freq;
429
430 return rv6xx_scale_count_given_unit(rdev, delay_us * (ref_clk / 100), unit);
431}
432
433static void rv6xx_calculate_engine_speed_stepping_parameters(struct radeon_device *rdev,
434 struct rv6xx_ps *state)
435{
436 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
437
438 pi->hw.sclks[R600_POWER_LEVEL_LOW] =
439 state->low.sclk;
440 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM] =
441 state->medium.sclk;
442 pi->hw.sclks[R600_POWER_LEVEL_HIGH] =
443 state->high.sclk;
444
445 pi->hw.low_sclk_index = R600_POWER_LEVEL_LOW;
446 pi->hw.medium_sclk_index = R600_POWER_LEVEL_MEDIUM;
447 pi->hw.high_sclk_index = R600_POWER_LEVEL_HIGH;
448}
449
450static void rv6xx_calculate_memory_clock_stepping_parameters(struct radeon_device *rdev,
451 struct rv6xx_ps *state)
452{
453 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
454
455 pi->hw.mclks[R600_POWER_LEVEL_CTXSW] =
456 state->high.mclk;
457 pi->hw.mclks[R600_POWER_LEVEL_HIGH] =
458 state->high.mclk;
459 pi->hw.mclks[R600_POWER_LEVEL_MEDIUM] =
460 state->medium.mclk;
461 pi->hw.mclks[R600_POWER_LEVEL_LOW] =
462 state->low.mclk;
463
464 pi->hw.high_mclk_index = R600_POWER_LEVEL_HIGH;
465
466 if (state->high.mclk == state->medium.mclk)
467 pi->hw.medium_mclk_index =
468 pi->hw.high_mclk_index;
469 else
470 pi->hw.medium_mclk_index = R600_POWER_LEVEL_MEDIUM;
471
472
473 if (state->medium.mclk == state->low.mclk)
474 pi->hw.low_mclk_index =
475 pi->hw.medium_mclk_index;
476 else
477 pi->hw.low_mclk_index = R600_POWER_LEVEL_LOW;
478}
479
480static void rv6xx_calculate_voltage_stepping_parameters(struct radeon_device *rdev,
481 struct rv6xx_ps *state)
482{
483 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
484
485 pi->hw.vddc[R600_POWER_LEVEL_CTXSW] = state->high.vddc;
486 pi->hw.vddc[R600_POWER_LEVEL_HIGH] = state->high.vddc;
487 pi->hw.vddc[R600_POWER_LEVEL_MEDIUM] = state->medium.vddc;
488 pi->hw.vddc[R600_POWER_LEVEL_LOW] = state->low.vddc;
489
490 pi->hw.backbias[R600_POWER_LEVEL_CTXSW] =
491 (state->high.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
492 pi->hw.backbias[R600_POWER_LEVEL_HIGH] =
493 (state->high.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
494 pi->hw.backbias[R600_POWER_LEVEL_MEDIUM] =
495 (state->medium.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
496 pi->hw.backbias[R600_POWER_LEVEL_LOW] =
497 (state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? true : false;
498
499 pi->hw.pcie_gen2[R600_POWER_LEVEL_HIGH] =
500 (state->high.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? true : false;
501 pi->hw.pcie_gen2[R600_POWER_LEVEL_MEDIUM] =
502 (state->medium.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? true : false;
503 pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW] =
504 (state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? true : false;
505
506 pi->hw.high_vddc_index = R600_POWER_LEVEL_HIGH;
507
508 if ((state->high.vddc == state->medium.vddc) &&
509 ((state->high.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ==
510 (state->medium.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE)))
511 pi->hw.medium_vddc_index =
512 pi->hw.high_vddc_index;
513 else
514 pi->hw.medium_vddc_index = R600_POWER_LEVEL_MEDIUM;
515
516 if ((state->medium.vddc == state->low.vddc) &&
517 ((state->medium.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ==
518 (state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE)))
519 pi->hw.low_vddc_index =
520 pi->hw.medium_vddc_index;
521 else
522 pi->hw.medium_vddc_index = R600_POWER_LEVEL_LOW;
523}
524
525static inline u32 rv6xx_calculate_vco_frequency(u32 ref_clock,
526 struct atom_clock_dividers *dividers,
527 u32 fb_divider_scale)
528{
529 return ref_clock * ((dividers->fb_div & ~1) << fb_divider_scale) /
530 (dividers->ref_div + 1);
531}
532
533static inline u32 rv6xx_calculate_spread_spectrum_clk_v(u32 vco_freq, u32 ref_freq,
534 u32 ss_rate, u32 ss_percent,
535 u32 fb_divider_scale)
536{
537 u32 fb_divider = vco_freq / ref_freq;
538
539 return (ss_percent * ss_rate * 4 * (fb_divider * fb_divider) /
540 (5375 * ((vco_freq * 10) / (4096 >> fb_divider_scale))));
541}
542
543static inline u32 rv6xx_calculate_spread_spectrum_clk_s(u32 ss_rate, u32 ref_freq)
544{
545 return (((ref_freq * 10) / (ss_rate * 2)) - 1) / 4;
546}
547
548static void rv6xx_program_engine_spread_spectrum(struct radeon_device *rdev,
549 u32 clock, enum r600_power_level level)
550{
551 u32 ref_clk = rdev->clock.spll.reference_freq;
552 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
553 struct atom_clock_dividers dividers;
554 struct radeon_atom_ss ss;
555 u32 vco_freq, clk_v, clk_s;
556
557 rv6xx_enable_engine_spread_spectrum(rdev, level, false);
558
559 if (clock && pi->sclk_ss) {
560 if (radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, clock, false, &dividers) == 0) {
561 vco_freq = rv6xx_calculate_vco_frequency(ref_clk, &dividers,
562 pi->fb_div_scale);
563
564 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
565 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
566 clk_v = rv6xx_calculate_spread_spectrum_clk_v(vco_freq,
567 (ref_clk / (dividers.ref_div + 1)),
568 ss.rate,
569 ss.percentage,
570 pi->fb_div_scale);
571
572 clk_s = rv6xx_calculate_spread_spectrum_clk_s(ss.rate,
573 (ref_clk / (dividers.ref_div + 1)));
574
575 rv6xx_set_engine_spread_spectrum_clk_v(rdev, level, clk_v);
576 rv6xx_set_engine_spread_spectrum_clk_s(rdev, level, clk_s);
577 rv6xx_enable_engine_spread_spectrum(rdev, level, true);
578 }
579 }
580 }
581}
582
583static void rv6xx_program_sclk_spread_spectrum_parameters_except_lowest_entry(struct radeon_device *rdev)
584{
585 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
586
587 rv6xx_program_engine_spread_spectrum(rdev,
588 pi->hw.sclks[R600_POWER_LEVEL_HIGH],
589 R600_POWER_LEVEL_HIGH);
590
591 rv6xx_program_engine_spread_spectrum(rdev,
592 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM],
593 R600_POWER_LEVEL_MEDIUM);
594
595}
596
597static int rv6xx_program_mclk_stepping_entry(struct radeon_device *rdev,
598 u32 entry, u32 clock)
599{
600 struct atom_clock_dividers dividers;
601
602 if (radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM, clock, false, &dividers))
603 return -EINVAL;
604
605
606 rv6xx_memory_clock_entry_set_reference_divider(rdev, entry, dividers.ref_div);
607 rv6xx_memory_clock_entry_set_feedback_divider(rdev, entry, dividers.fb_div);
608 rv6xx_memory_clock_entry_set_post_divider(rdev, entry, dividers.post_div);
609
610 if (dividers.enable_post_div)
611 rv6xx_memory_clock_entry_enable_post_divider(rdev, entry, true);
612 else
613 rv6xx_memory_clock_entry_enable_post_divider(rdev, entry, false);
614
615 return 0;
616}
617
618static void rv6xx_program_mclk_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
619{
620 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
621 int i;
622
623 for (i = 1; i < R600_PM_NUMBER_OF_MCLKS; i++) {
624 if (pi->hw.mclks[i])
625 rv6xx_program_mclk_stepping_entry(rdev, i,
626 pi->hw.mclks[i]);
627 }
628}
629
630static void rv6xx_find_memory_clock_with_highest_vco(struct radeon_device *rdev,
631 u32 requested_memory_clock,
632 u32 ref_clk,
633 struct atom_clock_dividers *dividers,
634 u32 *vco_freq)
635{
636 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
637 struct atom_clock_dividers req_dividers;
638 u32 vco_freq_temp;
639
640 if (radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
641 requested_memory_clock, false, &req_dividers) == 0) {
642 vco_freq_temp = rv6xx_calculate_vco_frequency(ref_clk, &req_dividers,
643 pi->fb_div_scale);
644
645 if (vco_freq_temp > *vco_freq) {
646 *dividers = req_dividers;
647 *vco_freq = vco_freq_temp;
648 }
649 }
650}
651
652static void rv6xx_program_mclk_spread_spectrum_parameters(struct radeon_device *rdev)
653{
654 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
655 u32 ref_clk = rdev->clock.mpll.reference_freq;
656 struct atom_clock_dividers dividers;
657 struct radeon_atom_ss ss;
658 u32 vco_freq = 0, clk_v, clk_s;
659
660 rv6xx_enable_memory_spread_spectrum(rdev, false);
661
662 if (pi->mclk_ss) {
663 rv6xx_find_memory_clock_with_highest_vco(rdev,
664 pi->hw.mclks[pi->hw.high_mclk_index],
665 ref_clk,
666 &dividers,
667 &vco_freq);
668
669 rv6xx_find_memory_clock_with_highest_vco(rdev,
670 pi->hw.mclks[pi->hw.medium_mclk_index],
671 ref_clk,
672 &dividers,
673 &vco_freq);
674
675 rv6xx_find_memory_clock_with_highest_vco(rdev,
676 pi->hw.mclks[pi->hw.low_mclk_index],
677 ref_clk,
678 &dividers,
679 &vco_freq);
680
681 if (vco_freq) {
682 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
683 ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
684 clk_v = rv6xx_calculate_spread_spectrum_clk_v(vco_freq,
685 (ref_clk / (dividers.ref_div + 1)),
686 ss.rate,
687 ss.percentage,
688 pi->fb_div_scale);
689
690 clk_s = rv6xx_calculate_spread_spectrum_clk_s(ss.rate,
691 (ref_clk / (dividers.ref_div + 1)));
692
693 rv6xx_set_memory_spread_spectrum_clk_v(rdev, clk_v);
694 rv6xx_set_memory_spread_spectrum_clk_s(rdev, clk_s);
695 rv6xx_enable_memory_spread_spectrum(rdev, true);
696 }
697 }
698 }
699}
700
701static int rv6xx_program_voltage_stepping_entry(struct radeon_device *rdev,
702 u32 entry, u16 voltage)
703{
704 u32 mask, set_pins;
705 int ret;
706
707 ret = radeon_atom_get_voltage_gpio_settings(rdev, voltage,
708 SET_VOLTAGE_TYPE_ASIC_VDDC,
709 &set_pins, &mask);
710 if (ret)
711 return ret;
712
713 r600_voltage_control_program_voltages(rdev, entry, set_pins);
714
715 return 0;
716}
717
718static void rv6xx_program_voltage_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
719{
720 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
721 int i;
722
723 for (i = 1; i < R600_PM_NUMBER_OF_VOLTAGE_LEVELS; i++)
724 rv6xx_program_voltage_stepping_entry(rdev, i,
725 pi->hw.vddc[i]);
726
727}
728
729static void rv6xx_program_backbias_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
730{
731 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
732
733 if (pi->hw.backbias[1])
734 WREG32_P(VID_UPPER_GPIO_CNTL, MEDIUM_BACKBIAS_VALUE, ~MEDIUM_BACKBIAS_VALUE);
735 else
736 WREG32_P(VID_UPPER_GPIO_CNTL, 0, ~MEDIUM_BACKBIAS_VALUE);
737
738 if (pi->hw.backbias[2])
739 WREG32_P(VID_UPPER_GPIO_CNTL, HIGH_BACKBIAS_VALUE, ~HIGH_BACKBIAS_VALUE);
740 else
741 WREG32_P(VID_UPPER_GPIO_CNTL, 0, ~HIGH_BACKBIAS_VALUE);
742}
743
744static void rv6xx_program_sclk_spread_spectrum_parameters_lowest_entry(struct radeon_device *rdev)
745{
746 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
747
748 rv6xx_program_engine_spread_spectrum(rdev,
749 pi->hw.sclks[R600_POWER_LEVEL_LOW],
750 R600_POWER_LEVEL_LOW);
751}
752
753static void rv6xx_program_mclk_stepping_parameters_lowest_entry(struct radeon_device *rdev)
754{
755 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
756
757 if (pi->hw.mclks[0])
758 rv6xx_program_mclk_stepping_entry(rdev, 0,
759 pi->hw.mclks[0]);
760}
761
762static void rv6xx_program_voltage_stepping_parameters_lowest_entry(struct radeon_device *rdev)
763{
764 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
765
766 rv6xx_program_voltage_stepping_entry(rdev, 0,
767 pi->hw.vddc[0]);
768
769}
770
771static void rv6xx_program_backbias_stepping_parameters_lowest_entry(struct radeon_device *rdev)
772{
773 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
774
775 if (pi->hw.backbias[0])
776 WREG32_P(VID_UPPER_GPIO_CNTL, LOW_BACKBIAS_VALUE, ~LOW_BACKBIAS_VALUE);
777 else
778 WREG32_P(VID_UPPER_GPIO_CNTL, 0, ~LOW_BACKBIAS_VALUE);
779}
780
781static u32 calculate_memory_refresh_rate(struct radeon_device *rdev,
782 u32 engine_clock)
783{
784 u32 dram_rows, dram_refresh_rate;
785 u32 tmp;
786
787 tmp = (RREG32(RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
788 dram_rows = 1 << (tmp + 10);
789 dram_refresh_rate = 1 << ((RREG32(MC_SEQ_RESERVE_M) & 0x3) + 3);
790
791 return ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
792}
793
794static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev)
795{
796 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
797 u32 sqm_ratio;
798 u32 arb_refresh_rate;
799 u32 high_clock;
800
801 if (pi->hw.sclks[R600_POWER_LEVEL_HIGH] <
802 (pi->hw.sclks[R600_POWER_LEVEL_LOW] * 0xFF / 0x40))
803 high_clock = pi->hw.sclks[R600_POWER_LEVEL_HIGH];
804 else
805 high_clock =
806 pi->hw.sclks[R600_POWER_LEVEL_LOW] * 0xFF / 0x40;
807
808 radeon_atom_set_engine_dram_timings(rdev, high_clock, 0);
809
810 sqm_ratio = (STATE0(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_LOW]) |
811 STATE1(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_MEDIUM]) |
812 STATE2(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_HIGH]) |
813 STATE3(64 * high_clock / pi->hw.sclks[R600_POWER_LEVEL_HIGH]));
814 WREG32(SQM_RATIO, sqm_ratio);
815
816 arb_refresh_rate =
817 (POWERMODE0(calculate_memory_refresh_rate(rdev,
818 pi->hw.sclks[R600_POWER_LEVEL_LOW])) |
819 POWERMODE1(calculate_memory_refresh_rate(rdev,
820 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
821 POWERMODE2(calculate_memory_refresh_rate(rdev,
822 pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
823 POWERMODE3(calculate_memory_refresh_rate(rdev,
824 pi->hw.sclks[R600_POWER_LEVEL_HIGH])));
825 WREG32(ARB_RFSH_RATE, arb_refresh_rate);
826}
827
828static void rv6xx_program_mpll_timing_parameters(struct radeon_device *rdev)
829{
830 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
831
832 r600_set_mpll_lock_time(rdev, R600_MPLLLOCKTIME_DFLT *
833 pi->mpll_ref_div);
834 r600_set_mpll_reset_time(rdev, R600_MPLLRESETTIME_DFLT);
835}
836
837static void rv6xx_program_bsp(struct radeon_device *rdev)
838{
839 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
840 u32 ref_clk = rdev->clock.spll.reference_freq;
841
842 r600_calculate_u_and_p(R600_ASI_DFLT,
843 ref_clk, 16,
844 &pi->bsp,
845 &pi->bsu);
846
847 r600_set_bsp(rdev, pi->bsu, pi->bsp);
848}
849
850static void rv6xx_program_at(struct radeon_device *rdev)
851{
852 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
853
854 r600_set_at(rdev,
855 (pi->hw.rp[0] * pi->bsp) / 200,
856 (pi->hw.rp[1] * pi->bsp) / 200,
857 (pi->hw.lp[2] * pi->bsp) / 200,
858 (pi->hw.lp[1] * pi->bsp) / 200);
859}
860
861static void rv6xx_program_git(struct radeon_device *rdev)
862{
863 r600_set_git(rdev, R600_GICST_DFLT);
864}
865
866static void rv6xx_program_tp(struct radeon_device *rdev)
867{
868 int i;
869
870 for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
871 r600_set_tc(rdev, i, r600_utc[i], r600_dtc[i]);
872
873 r600_select_td(rdev, R600_TD_DFLT);
874}
875
876static void rv6xx_program_vc(struct radeon_device *rdev)
877{
878 r600_set_vrc(rdev, R600_VRC_DFLT);
879}
880
881static void rv6xx_clear_vc(struct radeon_device *rdev)
882{
883 r600_set_vrc(rdev, 0);
884}
885
886static void rv6xx_program_tpp(struct radeon_device *rdev)
887{
888 r600_set_tpu(rdev, R600_TPU_DFLT);
889 r600_set_tpc(rdev, R600_TPC_DFLT);
890}
891
892static void rv6xx_program_sstp(struct radeon_device *rdev)
893{
894 r600_set_sstu(rdev, R600_SSTU_DFLT);
895 r600_set_sst(rdev, R600_SST_DFLT);
896}
897
898static void rv6xx_program_fcp(struct radeon_device *rdev)
899{
900 r600_set_fctu(rdev, R600_FCTU_DFLT);
901 r600_set_fct(rdev, R600_FCT_DFLT);
902}
903
904static void rv6xx_program_vddc3d_parameters(struct radeon_device *rdev)
905{
906 r600_set_vddc3d_oorsu(rdev, R600_VDDC3DOORSU_DFLT);
907 r600_set_vddc3d_oorphc(rdev, R600_VDDC3DOORPHC_DFLT);
908 r600_set_vddc3d_oorsdc(rdev, R600_VDDC3DOORSDC_DFLT);
909 r600_set_ctxcgtt3d_rphc(rdev, R600_CTXCGTT3DRPHC_DFLT);
910 r600_set_ctxcgtt3d_rsdc(rdev, R600_CTXCGTT3DRSDC_DFLT);
911}
912
913static void rv6xx_program_voltage_timing_parameters(struct radeon_device *rdev)
914{
915 u32 rt;
916
917 r600_vid_rt_set_vru(rdev, R600_VRU_DFLT);
918
919 r600_vid_rt_set_vrt(rdev,
920 rv6xx_compute_count_for_delay(rdev,
921 rdev->pm.dpm.voltage_response_time,
922 R600_VRU_DFLT));
923
924 rt = rv6xx_compute_count_for_delay(rdev,
925 rdev->pm.dpm.backbias_response_time,
926 R600_VRU_DFLT);
927
928 rv6xx_vid_response_set_brt(rdev, (rt + 0x1F) >> 5);
929}
930
931static void rv6xx_program_engine_speed_parameters(struct radeon_device *rdev)
932{
933 r600_vid_rt_set_ssu(rdev, R600_SPLLSTEPUNIT_DFLT);
934 rv6xx_enable_engine_feedback_and_reference_sync(rdev);
935}
936
937static u64 rv6xx_get_master_voltage_mask(struct radeon_device *rdev)
938{
939 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
940 u64 master_mask = 0;
941 int i;
942
943 for (i = 0; i < R600_PM_NUMBER_OF_VOLTAGE_LEVELS; i++) {
944 u32 tmp_mask, tmp_set_pins;
945 int ret;
946
947 ret = radeon_atom_get_voltage_gpio_settings(rdev,
948 pi->hw.vddc[i],
949 SET_VOLTAGE_TYPE_ASIC_VDDC,
950 &tmp_set_pins, &tmp_mask);
951
952 if (ret == 0)
953 master_mask |= tmp_mask;
954 }
955
956 return master_mask;
957}
958
959static void rv6xx_program_voltage_gpio_pins(struct radeon_device *rdev)
960{
961 r600_voltage_control_enable_pins(rdev,
962 rv6xx_get_master_voltage_mask(rdev));
963}
964
965static void rv6xx_enable_static_voltage_control(struct radeon_device *rdev,
966 struct radeon_ps *new_ps,
967 bool enable)
968{
969 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
970
971 if (enable)
972 radeon_atom_set_voltage(rdev,
973 new_state->low.vddc,
974 SET_VOLTAGE_TYPE_ASIC_VDDC);
975 else
976 r600_voltage_control_deactivate_static_control(rdev,
977 rv6xx_get_master_voltage_mask(rdev));
978}
979
980static void rv6xx_enable_display_gap(struct radeon_device *rdev, bool enable)
981{
982 if (enable) {
983 u32 tmp = (DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM) |
984 DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM) |
985 DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
986 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
987 VBI_TIMER_COUNT(0x3FFF) |
988 VBI_TIMER_UNIT(7));
989 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
990
991 WREG32_P(MCLK_PWRMGT_CNTL, USE_DISPLAY_GAP, ~USE_DISPLAY_GAP);
992 } else
993 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~USE_DISPLAY_GAP);
994}
995
996static void rv6xx_program_power_level_enter_state(struct radeon_device *rdev)
997{
998 r600_power_level_set_enter_index(rdev, R600_POWER_LEVEL_MEDIUM);
999}
1000
1001static void rv6xx_calculate_t(u32 l_f, u32 h_f, int h,
1002 int d_l, int d_r, u8 *l, u8 *r)
1003{
1004 int a_n, a_d, h_r, l_r;
1005
1006 h_r = d_l;
1007 l_r = 100 - d_r;
1008
1009 a_n = (int)h_f * d_l + (int)l_f * (h - d_r);
1010 a_d = (int)l_f * l_r + (int)h_f * h_r;
1011
1012 if (a_d != 0) {
1013 *l = d_l - h_r * a_n / a_d;
1014 *r = d_r + l_r * a_n / a_d;
1015 }
1016}
1017
1018static void rv6xx_calculate_ap(struct radeon_device *rdev,
1019 struct rv6xx_ps *state)
1020{
1021 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1022
1023 pi->hw.lp[0] = 0;
1024 pi->hw.rp[R600_PM_NUMBER_OF_ACTIVITY_LEVELS - 1]
1025 = 100;
1026
1027 rv6xx_calculate_t(state->low.sclk,
1028 state->medium.sclk,
1029 R600_AH_DFLT,
1030 R600_LMP_DFLT,
1031 R600_RLP_DFLT,
1032 &pi->hw.lp[1],
1033 &pi->hw.rp[0]);
1034
1035 rv6xx_calculate_t(state->medium.sclk,
1036 state->high.sclk,
1037 R600_AH_DFLT,
1038 R600_LHP_DFLT,
1039 R600_RMP_DFLT,
1040 &pi->hw.lp[2],
1041 &pi->hw.rp[1]);
1042
1043}
1044
1045static void rv6xx_calculate_stepping_parameters(struct radeon_device *rdev,
1046 struct radeon_ps *new_ps)
1047{
1048 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1049
1050 rv6xx_calculate_engine_speed_stepping_parameters(rdev, new_state);
1051 rv6xx_calculate_memory_clock_stepping_parameters(rdev, new_state);
1052 rv6xx_calculate_voltage_stepping_parameters(rdev, new_state);
1053 rv6xx_calculate_ap(rdev, new_state);
1054}
1055
1056static void rv6xx_program_stepping_parameters_except_lowest_entry(struct radeon_device *rdev)
1057{
1058 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1059
1060 rv6xx_program_mclk_stepping_parameters_except_lowest_entry(rdev);
1061 if (pi->voltage_control)
1062 rv6xx_program_voltage_stepping_parameters_except_lowest_entry(rdev);
1063 rv6xx_program_backbias_stepping_parameters_except_lowest_entry(rdev);
1064 rv6xx_program_sclk_spread_spectrum_parameters_except_lowest_entry(rdev);
1065 rv6xx_program_mclk_spread_spectrum_parameters(rdev);
1066 rv6xx_program_memory_timing_parameters(rdev);
1067}
1068
1069static void rv6xx_program_stepping_parameters_lowest_entry(struct radeon_device *rdev)
1070{
1071 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1072
1073 rv6xx_program_mclk_stepping_parameters_lowest_entry(rdev);
1074 if (pi->voltage_control)
1075 rv6xx_program_voltage_stepping_parameters_lowest_entry(rdev);
1076 rv6xx_program_backbias_stepping_parameters_lowest_entry(rdev);
1077 rv6xx_program_sclk_spread_spectrum_parameters_lowest_entry(rdev);
1078}
1079
1080static void rv6xx_program_power_level_low(struct radeon_device *rdev)
1081{
1082 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1083
1084 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_LOW,
1085 pi->hw.low_vddc_index);
1086 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_LOW,
1087 pi->hw.low_mclk_index);
1088 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_LOW,
1089 pi->hw.low_sclk_index);
1090 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_LOW,
1091 R600_DISPLAY_WATERMARK_LOW);
1092 r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_LOW,
1093 pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW]);
1094}
1095
1096static void rv6xx_program_power_level_low_to_lowest_state(struct radeon_device *rdev)
1097{
1098 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1099
1100 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_LOW, 0);
1101 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_LOW, 0);
1102 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_LOW, 0);
1103
1104 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_LOW,
1105 R600_DISPLAY_WATERMARK_LOW);
1106
1107 r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_LOW,
1108 pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW]);
1109
1110}
1111
1112static void rv6xx_program_power_level_medium(struct radeon_device *rdev)
1113{
1114 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1115
1116 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_MEDIUM,
1117 pi->hw.medium_vddc_index);
1118 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
1119 pi->hw.medium_mclk_index);
1120 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
1121 pi->hw.medium_sclk_index);
1122 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_MEDIUM,
1123 R600_DISPLAY_WATERMARK_LOW);
1124 r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_MEDIUM,
1125 pi->hw.pcie_gen2[R600_POWER_LEVEL_MEDIUM]);
1126}
1127
1128static void rv6xx_program_power_level_medium_for_transition(struct radeon_device *rdev)
1129{
1130 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1131
1132 rv6xx_program_mclk_stepping_entry(rdev,
1133 R600_POWER_LEVEL_CTXSW,
1134 pi->hw.mclks[pi->hw.low_mclk_index]);
1135
1136 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_MEDIUM, 1);
1137
1138 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
1139 R600_POWER_LEVEL_CTXSW);
1140 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_MEDIUM,
1141 pi->hw.medium_sclk_index);
1142
1143 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_MEDIUM,
1144 R600_DISPLAY_WATERMARK_LOW);
1145
1146 rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_MEDIUM, false);
1147
1148 r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_MEDIUM,
1149 pi->hw.pcie_gen2[R600_POWER_LEVEL_LOW]);
1150}
1151
1152static void rv6xx_program_power_level_high(struct radeon_device *rdev)
1153{
1154 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1155
1156 r600_power_level_set_voltage_index(rdev, R600_POWER_LEVEL_HIGH,
1157 pi->hw.high_vddc_index);
1158 r600_power_level_set_mem_clock_index(rdev, R600_POWER_LEVEL_HIGH,
1159 pi->hw.high_mclk_index);
1160 r600_power_level_set_eng_clock_index(rdev, R600_POWER_LEVEL_HIGH,
1161 pi->hw.high_sclk_index);
1162
1163 r600_power_level_set_watermark_id(rdev, R600_POWER_LEVEL_HIGH,
1164 R600_DISPLAY_WATERMARK_HIGH);
1165
1166 r600_power_level_set_pcie_gen2(rdev, R600_POWER_LEVEL_HIGH,
1167 pi->hw.pcie_gen2[R600_POWER_LEVEL_HIGH]);
1168}
1169
1170static void rv6xx_enable_backbias(struct radeon_device *rdev, bool enable)
1171{
1172 if (enable)
1173 WREG32_P(GENERAL_PWRMGT, BACKBIAS_PAD_EN | BACKBIAS_DPM_CNTL,
1174 ~(BACKBIAS_PAD_EN | BACKBIAS_DPM_CNTL));
1175 else
1176 WREG32_P(GENERAL_PWRMGT, 0,
1177 ~(BACKBIAS_VALUE | BACKBIAS_PAD_EN | BACKBIAS_DPM_CNTL));
1178}
1179
1180static void rv6xx_program_display_gap(struct radeon_device *rdev)
1181{
1182 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
1183
1184 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
1185 if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) {
1186 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1187 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1188 } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) {
1189 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1190 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1191 } else {
1192 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1193 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1194 }
1195 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
1196}
1197
1198static void rv6xx_set_sw_voltage_to_safe(struct radeon_device *rdev,
1199 struct radeon_ps *new_ps,
1200 struct radeon_ps *old_ps)
1201{
1202 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1203 struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1204 u16 safe_voltage;
1205
1206 safe_voltage = (new_state->low.vddc >= old_state->low.vddc) ?
1207 new_state->low.vddc : old_state->low.vddc;
1208
1209 rv6xx_program_voltage_stepping_entry(rdev, R600_POWER_LEVEL_CTXSW,
1210 safe_voltage);
1211
1212 WREG32_P(GENERAL_PWRMGT, SW_GPIO_INDEX(R600_POWER_LEVEL_CTXSW),
1213 ~SW_GPIO_INDEX_MASK);
1214}
1215
1216static void rv6xx_set_sw_voltage_to_low(struct radeon_device *rdev,
1217 struct radeon_ps *old_ps)
1218{
1219 struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1220
1221 rv6xx_program_voltage_stepping_entry(rdev, R600_POWER_LEVEL_CTXSW,
1222 old_state->low.vddc);
1223
1224 WREG32_P(GENERAL_PWRMGT, SW_GPIO_INDEX(R600_POWER_LEVEL_CTXSW),
1225 ~SW_GPIO_INDEX_MASK);
1226}
1227
1228static void rv6xx_set_safe_backbias(struct radeon_device *rdev,
1229 struct radeon_ps *new_ps,
1230 struct radeon_ps *old_ps)
1231{
1232 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1233 struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1234
1235 if ((new_state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) &&
1236 (old_state->low.flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE))
1237 WREG32_P(GENERAL_PWRMGT, BACKBIAS_VALUE, ~BACKBIAS_VALUE);
1238 else
1239 WREG32_P(GENERAL_PWRMGT, 0, ~BACKBIAS_VALUE);
1240}
1241
1242static void rv6xx_set_safe_pcie_gen2(struct radeon_device *rdev,
1243 struct radeon_ps *new_ps,
1244 struct radeon_ps *old_ps)
1245{
1246 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1247 struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1248
1249 if ((new_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) !=
1250 (old_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2))
1251 rv6xx_force_pcie_gen1(rdev);
1252}
1253
1254static void rv6xx_enable_dynamic_voltage_control(struct radeon_device *rdev,
1255 bool enable)
1256{
1257 if (enable)
1258 WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
1259 else
1260 WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
1261}
1262
1263static void rv6xx_enable_dynamic_backbias_control(struct radeon_device *rdev,
1264 bool enable)
1265{
1266 if (enable)
1267 WREG32_P(GENERAL_PWRMGT, BACKBIAS_DPM_CNTL, ~BACKBIAS_DPM_CNTL);
1268 else
1269 WREG32_P(GENERAL_PWRMGT, 0, ~BACKBIAS_DPM_CNTL);
1270}
1271
1272static int rv6xx_step_sw_voltage(struct radeon_device *rdev,
1273 u16 initial_voltage,
1274 u16 target_voltage)
1275{
1276 u16 current_voltage;
1277 u16 true_target_voltage;
1278 u16 voltage_step;
1279 int signed_voltage_step;
1280
1281 if ((radeon_atom_get_voltage_step(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
1282 &voltage_step)) ||
1283 (radeon_atom_round_to_true_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
1284 initial_voltage, &current_voltage)) ||
1285 (radeon_atom_round_to_true_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
1286 target_voltage, &true_target_voltage)))
1287 return -EINVAL;
1288
1289 if (true_target_voltage < current_voltage)
1290 signed_voltage_step = -(int)voltage_step;
1291 else
1292 signed_voltage_step = voltage_step;
1293
1294 while (current_voltage != true_target_voltage) {
1295 current_voltage += signed_voltage_step;
1296 rv6xx_program_voltage_stepping_entry(rdev, R600_POWER_LEVEL_CTXSW,
1297 current_voltage);
1298 msleep((rdev->pm.dpm.voltage_response_time + 999) / 1000);
1299 }
1300
1301 return 0;
1302}
1303
1304static int rv6xx_step_voltage_if_increasing(struct radeon_device *rdev,
1305 struct radeon_ps *new_ps,
1306 struct radeon_ps *old_ps)
1307{
1308 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1309 struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1310
1311 if (new_state->low.vddc > old_state->low.vddc)
1312 return rv6xx_step_sw_voltage(rdev,
1313 old_state->low.vddc,
1314 new_state->low.vddc);
1315
1316 return 0;
1317}
1318
1319static int rv6xx_step_voltage_if_decreasing(struct radeon_device *rdev,
1320 struct radeon_ps *new_ps,
1321 struct radeon_ps *old_ps)
1322{
1323 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1324 struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1325
1326 if (new_state->low.vddc < old_state->low.vddc)
1327 return rv6xx_step_sw_voltage(rdev,
1328 old_state->low.vddc,
1329 new_state->low.vddc);
1330 else
1331 return 0;
1332}
1333
1334static void rv6xx_enable_high(struct radeon_device *rdev)
1335{
1336 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1337
1338 if ((pi->restricted_levels < 1) ||
1339 (pi->restricted_levels == 3))
1340 r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true);
1341}
1342
1343static void rv6xx_enable_medium(struct radeon_device *rdev)
1344{
1345 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1346
1347 if (pi->restricted_levels < 2)
1348 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
1349}
1350
1351static void rv6xx_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1352{
1353 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1354 bool want_thermal_protection;
1355 enum radeon_dpm_event_src dpm_event_src;
1356
1357 switch (sources) {
1358 case 0:
1359 default:
1360 want_thermal_protection = false;
1361 break;
1362 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1363 want_thermal_protection = true;
1364 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
1365 break;
1366
1367 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1368 want_thermal_protection = true;
1369 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
1370 break;
1371
1372 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1373 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1374 want_thermal_protection = true;
1375 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1376 break;
1377 }
1378
1379 if (want_thermal_protection) {
1380 WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
1381 if (pi->thermal_protection)
1382 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
1383 } else {
1384 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
1385 }
1386}
1387
1388static void rv6xx_enable_auto_throttle_source(struct radeon_device *rdev,
1389 enum radeon_dpm_auto_throttle_src source,
1390 bool enable)
1391{
1392 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1393
1394 if (enable) {
1395 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1396 pi->active_auto_throttle_sources |= 1 << source;
1397 rv6xx_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1398 }
1399 } else {
1400 if (pi->active_auto_throttle_sources & (1 << source)) {
1401 pi->active_auto_throttle_sources &= ~(1 << source);
1402 rv6xx_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1403 }
1404 }
1405}
1406
1407
1408static void rv6xx_enable_thermal_protection(struct radeon_device *rdev,
1409 bool enable)
1410{
1411 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1412
1413 if (pi->active_auto_throttle_sources)
1414 r600_enable_thermal_protection(rdev, enable);
1415}
1416
1417static void rv6xx_generate_transition_stepping(struct radeon_device *rdev,
1418 struct radeon_ps *new_ps,
1419 struct radeon_ps *old_ps)
1420{
1421 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1422 struct rv6xx_ps *old_state = rv6xx_get_ps(old_ps);
1423 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1424
1425 rv6xx_generate_steps(rdev,
1426 old_state->low.sclk,
1427 new_state->low.sclk,
1428 0, &pi->hw.medium_sclk_index);
1429}
1430
1431static void rv6xx_generate_low_step(struct radeon_device *rdev,
1432 struct radeon_ps *new_ps)
1433{
1434 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1435 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1436
1437 pi->hw.low_sclk_index = 0;
1438 rv6xx_generate_single_step(rdev,
1439 new_state->low.sclk,
1440 0);
1441}
1442
1443static void rv6xx_invalidate_intermediate_steps(struct radeon_device *rdev)
1444{
1445 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1446
1447 rv6xx_invalidate_intermediate_steps_range(rdev, 0,
1448 pi->hw.medium_sclk_index);
1449}
1450
1451static void rv6xx_generate_stepping_table(struct radeon_device *rdev,
1452 struct radeon_ps *new_ps)
1453{
1454 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1455 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1456
1457 pi->hw.low_sclk_index = 0;
1458
1459 rv6xx_generate_steps(rdev,
1460 new_state->low.sclk,
1461 new_state->medium.sclk,
1462 0,
1463 &pi->hw.medium_sclk_index);
1464 rv6xx_generate_steps(rdev,
1465 new_state->medium.sclk,
1466 new_state->high.sclk,
1467 pi->hw.medium_sclk_index,
1468 &pi->hw.high_sclk_index);
1469}
1470
1471static void rv6xx_enable_spread_spectrum(struct radeon_device *rdev,
1472 bool enable)
1473{
1474 if (enable)
1475 rv6xx_enable_dynamic_spread_spectrum(rdev, true);
1476 else {
1477 rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_LOW, false);
1478 rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_MEDIUM, false);
1479 rv6xx_enable_engine_spread_spectrum(rdev, R600_POWER_LEVEL_HIGH, false);
1480 rv6xx_enable_dynamic_spread_spectrum(rdev, false);
1481 rv6xx_enable_memory_spread_spectrum(rdev, false);
1482 }
1483}
1484
1485static void rv6xx_reset_lvtm_data_sync(struct radeon_device *rdev)
1486{
1487 if (ASIC_IS_DCE3(rdev))
1488 WREG32_P(DCE3_LVTMA_DATA_SYNCHRONIZATION, LVTMA_PFREQCHG, ~LVTMA_PFREQCHG);
1489 else
1490 WREG32_P(LVTMA_DATA_SYNCHRONIZATION, LVTMA_PFREQCHG, ~LVTMA_PFREQCHG);
1491}
1492
1493static void rv6xx_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
1494 struct radeon_ps *new_ps,
1495 bool enable)
1496{
1497 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1498
1499 if (enable) {
1500 rv6xx_enable_bif_dynamic_pcie_gen2(rdev, true);
1501 rv6xx_enable_pcie_gen2_support(rdev);
1502 r600_enable_dynamic_pcie_gen2(rdev, true);
1503 } else {
1504 if (!(new_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2))
1505 rv6xx_force_pcie_gen1(rdev);
1506 rv6xx_enable_bif_dynamic_pcie_gen2(rdev, false);
1507 r600_enable_dynamic_pcie_gen2(rdev, false);
1508 }
1509}
1510
1511static void rv6xx_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
1512 struct radeon_ps *new_ps,
1513 struct radeon_ps *old_ps)
1514{
1515 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1516 struct rv6xx_ps *current_state = rv6xx_get_ps(old_ps);
1517
1518 if ((new_ps->vclk == old_ps->vclk) &&
1519 (new_ps->dclk == old_ps->dclk))
1520 return;
1521
1522 if (new_state->high.sclk >= current_state->high.sclk)
1523 return;
1524
1525 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
1526}
1527
1528static void rv6xx_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
1529 struct radeon_ps *new_ps,
1530 struct radeon_ps *old_ps)
1531{
1532 struct rv6xx_ps *new_state = rv6xx_get_ps(new_ps);
1533 struct rv6xx_ps *current_state = rv6xx_get_ps(old_ps);
1534
1535 if ((new_ps->vclk == old_ps->vclk) &&
1536 (new_ps->dclk == old_ps->dclk))
1537 return;
1538
1539 if (new_state->high.sclk < current_state->high.sclk)
1540 return;
1541
1542 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
1543}
1544
1545int rv6xx_dpm_enable(struct radeon_device *rdev)
1546{
1547 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1548 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1549 int ret;
1550
1551 if (r600_dynamicpm_enabled(rdev))
1552 return -EINVAL;
1553
1554 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1555 rv6xx_enable_backbias(rdev, true);
1556
1557 if (pi->dynamic_ss)
1558 rv6xx_enable_spread_spectrum(rdev, true);
1559
1560 rv6xx_program_mpll_timing_parameters(rdev);
1561 rv6xx_program_bsp(rdev);
1562 rv6xx_program_git(rdev);
1563 rv6xx_program_tp(rdev);
1564 rv6xx_program_tpp(rdev);
1565 rv6xx_program_sstp(rdev);
1566 rv6xx_program_fcp(rdev);
1567 rv6xx_program_vddc3d_parameters(rdev);
1568 rv6xx_program_voltage_timing_parameters(rdev);
1569 rv6xx_program_engine_speed_parameters(rdev);
1570
1571 rv6xx_enable_display_gap(rdev, true);
1572 if (pi->display_gap == false)
1573 rv6xx_enable_display_gap(rdev, false);
1574
1575 rv6xx_program_power_level_enter_state(rdev);
1576
1577 rv6xx_calculate_stepping_parameters(rdev, boot_ps);
1578
1579 if (pi->voltage_control)
1580 rv6xx_program_voltage_gpio_pins(rdev);
1581
1582 rv6xx_generate_stepping_table(rdev, boot_ps);
1583
1584 rv6xx_program_stepping_parameters_except_lowest_entry(rdev);
1585 rv6xx_program_stepping_parameters_lowest_entry(rdev);
1586
1587 rv6xx_program_power_level_low(rdev);
1588 rv6xx_program_power_level_medium(rdev);
1589 rv6xx_program_power_level_high(rdev);
1590 rv6xx_program_vc(rdev);
1591 rv6xx_program_at(rdev);
1592
1593 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
1594 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
1595 r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true);
1596
1597 if (rdev->irq.installed &&
1598 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1599 ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1600 if (ret)
1601 return ret;
1602 rdev->irq.dpm_thermal = true;
1603 radeon_irq_set(rdev);
1604 }
1605
1606 rv6xx_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
1607
1608 r600_start_dpm(rdev);
1609
1610 if (pi->voltage_control)
1611 rv6xx_enable_static_voltage_control(rdev, boot_ps, false);
1612
1613 if (pi->dynamic_pcie_gen2)
1614 rv6xx_enable_dynamic_pcie_gen2(rdev, boot_ps, true);
1615
1616 if (pi->gfx_clock_gating)
1617 r600_gfx_clockgating_enable(rdev, true);
1618
1619 return 0;
1620}
1621
1622void rv6xx_dpm_disable(struct radeon_device *rdev)
1623{
1624 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1625 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1626
1627 if (!r600_dynamicpm_enabled(rdev))
1628 return;
1629
1630 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
1631 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
1632 rv6xx_enable_display_gap(rdev, false);
1633 rv6xx_clear_vc(rdev);
1634 r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
1635
1636 if (pi->thermal_protection)
1637 r600_enable_thermal_protection(rdev, false);
1638
1639 r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
1640 r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
1641 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
1642
1643 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1644 rv6xx_enable_backbias(rdev, false);
1645
1646 rv6xx_enable_spread_spectrum(rdev, false);
1647
1648 if (pi->voltage_control)
1649 rv6xx_enable_static_voltage_control(rdev, boot_ps, true);
1650
1651 if (pi->dynamic_pcie_gen2)
1652 rv6xx_enable_dynamic_pcie_gen2(rdev, boot_ps, false);
1653
1654 if (rdev->irq.installed &&
1655 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1656 rdev->irq.dpm_thermal = false;
1657 radeon_irq_set(rdev);
1658 }
1659
1660 if (pi->gfx_clock_gating)
1661 r600_gfx_clockgating_enable(rdev, false);
1662
1663 r600_stop_dpm(rdev);
1664}
1665
1666int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
1667{
1668 struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
1669 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
1670 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
1671 int ret;
1672
1673 rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1674
1675 rv6xx_clear_vc(rdev);
1676 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
1677 r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
1678
1679 if (pi->thermal_protection)
1680 r600_enable_thermal_protection(rdev, false);
1681
1682 r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
1683 r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
1684 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
1685
1686 rv6xx_generate_transition_stepping(rdev, new_ps, old_ps);
1687 rv6xx_program_power_level_medium_for_transition(rdev);
1688
1689 if (pi->voltage_control) {
1690 rv6xx_set_sw_voltage_to_safe(rdev, new_ps, old_ps);
1691 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1692 rv6xx_set_sw_voltage_to_low(rdev, old_ps);
1693 }
1694
1695 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1696 rv6xx_set_safe_backbias(rdev, new_ps, old_ps);
1697
1698 if (pi->dynamic_pcie_gen2)
1699 rv6xx_set_safe_pcie_gen2(rdev, new_ps, old_ps);
1700
1701 if (pi->voltage_control)
1702 rv6xx_enable_dynamic_voltage_control(rdev, false);
1703
1704 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1705 rv6xx_enable_dynamic_backbias_control(rdev, false);
1706
1707 if (pi->voltage_control) {
1708 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1709 rv6xx_step_voltage_if_increasing(rdev, new_ps, old_ps);
1710 msleep((rdev->pm.dpm.voltage_response_time + 999) / 1000);
1711 }
1712
1713 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
1714 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false);
1715 r600_wait_for_power_level_unequal(rdev, R600_POWER_LEVEL_LOW);
1716
1717 rv6xx_generate_low_step(rdev, new_ps);
1718 rv6xx_invalidate_intermediate_steps(rdev);
1719 rv6xx_calculate_stepping_parameters(rdev, new_ps);
1720 rv6xx_program_stepping_parameters_lowest_entry(rdev);
1721 rv6xx_program_power_level_low_to_lowest_state(rdev);
1722
1723 r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
1724 r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
1725 r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
1726
1727 if (pi->voltage_control) {
1728 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) {
1729 ret = rv6xx_step_voltage_if_decreasing(rdev, new_ps, old_ps);
1730 if (ret)
1731 return ret;
1732 }
1733 rv6xx_enable_dynamic_voltage_control(rdev, true);
1734 }
1735
1736 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1737 rv6xx_enable_dynamic_backbias_control(rdev, true);
1738
1739 if (pi->dynamic_pcie_gen2)
1740 rv6xx_enable_dynamic_pcie_gen2(rdev, new_ps, true);
1741
1742 rv6xx_reset_lvtm_data_sync(rdev);
1743
1744 rv6xx_generate_stepping_table(rdev, new_ps);
1745 rv6xx_program_stepping_parameters_except_lowest_entry(rdev);
1746 rv6xx_program_power_level_low(rdev);
1747 rv6xx_program_power_level_medium(rdev);
1748 rv6xx_program_power_level_high(rdev);
1749 rv6xx_enable_medium(rdev);
1750 rv6xx_enable_high(rdev);
1751
1752 if (pi->thermal_protection)
1753 rv6xx_enable_thermal_protection(rdev, true);
1754 rv6xx_program_vc(rdev);
1755 rv6xx_program_at(rdev);
1756
1757 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1758
1759 return 0;
1760}
1761
1762void rv6xx_setup_asic(struct radeon_device *rdev)
1763{
1764 r600_enable_acpi_pm(rdev);
1765
1766 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
1767 rv6xx_enable_l0s(rdev);
1768 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
1769 rv6xx_enable_l1(rdev);
1770 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
1771 rv6xx_enable_pll_sleep_in_l1(rdev);
1772}
1773
1774void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev)
1775{
1776 rv6xx_program_display_gap(rdev);
1777}
1778
1779union power_info {
1780 struct _ATOM_POWERPLAY_INFO info;
1781 struct _ATOM_POWERPLAY_INFO_V2 info_2;
1782 struct _ATOM_POWERPLAY_INFO_V3 info_3;
1783 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
1784 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
1785 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
1786};
1787
1788union pplib_clock_info {
1789 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
1790 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
1791 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
1792 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
1793};
1794
1795union pplib_power_state {
1796 struct _ATOM_PPLIB_STATE v1;
1797 struct _ATOM_PPLIB_STATE_V2 v2;
1798};
1799
1800static void rv6xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
1801 struct radeon_ps *rps,
1802 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
1803{
1804 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
1805 rps->class = le16_to_cpu(non_clock_info->usClassification);
1806 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
1807
1808 if (r600_is_uvd_state(rps->class, rps->class2)) {
1809 rps->vclk = RV6XX_DEFAULT_VCLK_FREQ;
1810 rps->dclk = RV6XX_DEFAULT_DCLK_FREQ;
1811 } else {
1812 rps->vclk = 0;
1813 rps->dclk = 0;
1814 }
1815
1816 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
1817 rdev->pm.dpm.boot_ps = rps;
1818 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
1819 rdev->pm.dpm.uvd_ps = rps;
1820}
1821
1822static void rv6xx_parse_pplib_clock_info(struct radeon_device *rdev,
1823 struct radeon_ps *rps, int index,
1824 union pplib_clock_info *clock_info)
1825{
1826 struct rv6xx_ps *ps = rv6xx_get_ps(rps);
1827 u32 sclk, mclk;
1828 u16 vddc;
1829 struct rv6xx_pl *pl;
1830
1831 switch (index) {
1832 case 0:
1833 pl = &ps->low;
1834 break;
1835 case 1:
1836 pl = &ps->medium;
1837 break;
1838 case 2:
1839 default:
1840 pl = &ps->high;
1841 break;
1842 }
1843
1844 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
1845 sclk |= clock_info->r600.ucEngineClockHigh << 16;
1846 mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
1847 mclk |= clock_info->r600.ucMemoryClockHigh << 16;
1848
1849 pl->mclk = mclk;
1850 pl->sclk = sclk;
1851 pl->vddc = le16_to_cpu(clock_info->r600.usVDDC);
1852 pl->flags = le32_to_cpu(clock_info->r600.ulFlags);
1853
1854 /* patch up vddc if necessary */
1855 if (pl->vddc == 0xff01) {
1856 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
1857 pl->vddc = vddc;
1858 }
1859
1860 /* fix up pcie gen2 */
1861 if (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) {
1862 if ((rdev->family == CHIP_RV610) || (rdev->family == CHIP_RV630)) {
1863 if (pl->vddc < 1100)
1864 pl->flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
1865 }
1866 }
1867
1868 /* patch up boot state */
1869 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
1870 u16 vddc, vddci, mvdd;
1871 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
1872 pl->mclk = rdev->clock.default_mclk;
1873 pl->sclk = rdev->clock.default_sclk;
1874 pl->vddc = vddc;
1875 }
1876}
1877
1878static int rv6xx_parse_power_table(struct radeon_device *rdev)
1879{
1880 struct radeon_mode_info *mode_info = &rdev->mode_info;
1881 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
1882 union pplib_power_state *power_state;
1883 int i, j;
1884 union pplib_clock_info *clock_info;
1885 union power_info *power_info;
1886 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
1887 u16 data_offset;
1888 u8 frev, crev;
1889 struct rv6xx_ps *ps;
1890
1891 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
1892 &frev, &crev, &data_offset))
1893 return -EINVAL;
1894 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1895
1896 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
1897 power_info->pplib.ucNumStates, GFP_KERNEL);
1898 if (!rdev->pm.dpm.ps)
1899 return -ENOMEM;
1900 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
1901 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
1902 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
1903
1904 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
1905 power_state = (union pplib_power_state *)
1906 (mode_info->atom_context->bios + data_offset +
1907 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
1908 i * power_info->pplib.ucStateEntrySize);
1909 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
1910 (mode_info->atom_context->bios + data_offset +
1911 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
1912 (power_state->v1.ucNonClockStateIndex *
1913 power_info->pplib.ucNonClockSize));
1914 if (power_info->pplib.ucStateEntrySize - 1) {
1915 ps = kzalloc(sizeof(struct rv6xx_ps), GFP_KERNEL);
1916 if (ps == NULL) {
1917 kfree(rdev->pm.dpm.ps);
1918 return -ENOMEM;
1919 }
1920 rdev->pm.dpm.ps[i].ps_priv = ps;
1921 rv6xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
1922 non_clock_info);
1923 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
1924 clock_info = (union pplib_clock_info *)
1925 (mode_info->atom_context->bios + data_offset +
1926 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
1927 (power_state->v1.ucClockStateIndices[j] *
1928 power_info->pplib.ucClockInfoSize));
1929 rv6xx_parse_pplib_clock_info(rdev,
1930 &rdev->pm.dpm.ps[i], j,
1931 clock_info);
1932 }
1933 }
1934 }
1935 rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
1936 return 0;
1937}
1938
1939int rv6xx_dpm_init(struct radeon_device *rdev)
1940{
1941 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
1942 uint16_t data_offset, size;
1943 uint8_t frev, crev;
1944 struct atom_clock_dividers dividers;
1945 struct rv6xx_power_info *pi;
1946 int ret;
1947
1948 pi = kzalloc(sizeof(struct rv6xx_power_info), GFP_KERNEL);
1949 if (pi == NULL)
1950 return -ENOMEM;
1951 rdev->pm.dpm.priv = pi;
1952
1953 ret = rv6xx_parse_power_table(rdev);
1954 if (ret)
1955 return ret;
1956
1957 if (rdev->pm.dpm.voltage_response_time == 0)
1958 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
1959 if (rdev->pm.dpm.backbias_response_time == 0)
1960 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
1961
1962 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1963 0, false, &dividers);
1964 if (ret)
1965 pi->spll_ref_div = dividers.ref_div + 1;
1966 else
1967 pi->spll_ref_div = R600_REFERENCEDIVIDER_DFLT;
1968
1969 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
1970 0, false, &dividers);
1971 if (ret)
1972 pi->mpll_ref_div = dividers.ref_div + 1;
1973 else
1974 pi->mpll_ref_div = R600_REFERENCEDIVIDER_DFLT;
1975
1976 if (rdev->family >= CHIP_RV670)
1977 pi->fb_div_scale = 1;
1978 else
1979 pi->fb_div_scale = 0;
1980
1981 pi->voltage_control =
1982 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
1983
1984 pi->gfx_clock_gating = true;
1985
1986 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
1987 &frev, &crev, &data_offset)) {
1988 pi->sclk_ss = true;
1989 pi->mclk_ss = true;
1990 pi->dynamic_ss = true;
1991 } else {
1992 pi->sclk_ss = false;
1993 pi->mclk_ss = false;
1994 pi->dynamic_ss = false;
1995 }
1996
1997 pi->dynamic_pcie_gen2 = true;
1998
1999 if (pi->gfx_clock_gating &&
2000 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2001 pi->thermal_protection = true;
2002 else
2003 pi->thermal_protection = false;
2004
2005 pi->display_gap = true;
2006
2007 return 0;
2008}
2009
2010void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
2011 struct radeon_ps *rps)
2012{
2013 struct rv6xx_ps *ps = rv6xx_get_ps(rps);
2014 struct rv6xx_pl *pl;
2015
2016 r600_dpm_print_class_info(rps->class, rps->class2);
2017 r600_dpm_print_cap_info(rps->caps);
2018 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2019 pl = &ps->low;
2020 printk("\t\tpower level 0 sclk: %u mclk: %u vddc: %u\n",
2021 pl->sclk, pl->mclk, pl->vddc);
2022 pl = &ps->medium;
2023 printk("\t\tpower level 1 sclk: %u mclk: %u vddc: %u\n",
2024 pl->sclk, pl->mclk, pl->vddc);
2025 pl = &ps->high;
2026 printk("\t\tpower level 2 sclk: %u mclk: %u vddc: %u\n",
2027 pl->sclk, pl->mclk, pl->vddc);
2028 r600_dpm_print_ps_status(rdev, rps);
2029}
2030
2031void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
2032 struct seq_file *m)
2033{
2034 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
2035 struct rv6xx_ps *ps = rv6xx_get_ps(rps);
2036 struct rv6xx_pl *pl;
2037 u32 current_index =
2038 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
2039 CURRENT_PROFILE_INDEX_SHIFT;
2040
2041 if (current_index > 2) {
2042 seq_printf(m, "invalid dpm profile %d\n", current_index);
2043 } else {
2044 if (current_index == 0)
2045 pl = &ps->low;
2046 else if (current_index == 1)
2047 pl = &ps->medium;
2048 else /* current_index == 2 */
2049 pl = &ps->high;
2050 seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2051 seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n",
2052 current_index, pl->sclk, pl->mclk, pl->vddc);
2053 }
2054}
2055
2056void rv6xx_dpm_fini(struct radeon_device *rdev)
2057{
2058 int i;
2059
2060 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2061 kfree(rdev->pm.dpm.ps[i].ps_priv);
2062 }
2063 kfree(rdev->pm.dpm.ps);
2064 kfree(rdev->pm.dpm.priv);
2065}
2066
2067u32 rv6xx_dpm_get_sclk(struct radeon_device *rdev, bool low)
2068{
2069 struct rv6xx_ps *requested_state = rv6xx_get_ps(rdev->pm.dpm.requested_ps);
2070
2071 if (low)
2072 return requested_state->low.sclk;
2073 else
2074 return requested_state->high.sclk;
2075}
2076
2077u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low)
2078{
2079 struct rv6xx_ps *requested_state = rv6xx_get_ps(rdev->pm.dpm.requested_ps);
2080
2081 if (low)
2082 return requested_state->low.mclk;
2083 else
2084 return requested_state->high.mclk;
2085}
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.h b/drivers/gpu/drm/radeon/rv6xx_dpm.h
new file mode 100644
index 000000000000..8035d53ebea6
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.h
@@ -0,0 +1,95 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#ifndef __RV6XX_DPM_H__
26#define __RV6XX_DPM_H__
27
28#include "r600_dpm.h"
29
30/* Represents a single SCLK step. */
31struct rv6xx_sclk_stepping
32{
33 u32 vco_frequency;
34 u32 post_divider;
35};
36
37struct rv6xx_pm_hw_state {
38 u32 sclks[R600_PM_NUMBER_OF_ACTIVITY_LEVELS];
39 u32 mclks[R600_PM_NUMBER_OF_MCLKS];
40 u16 vddc[R600_PM_NUMBER_OF_VOLTAGE_LEVELS];
41 bool backbias[R600_PM_NUMBER_OF_VOLTAGE_LEVELS];
42 bool pcie_gen2[R600_PM_NUMBER_OF_ACTIVITY_LEVELS];
43 u8 high_sclk_index;
44 u8 medium_sclk_index;
45 u8 low_sclk_index;
46 u8 high_mclk_index;
47 u8 medium_mclk_index;
48 u8 low_mclk_index;
49 u8 high_vddc_index;
50 u8 medium_vddc_index;
51 u8 low_vddc_index;
52 u8 rp[R600_PM_NUMBER_OF_ACTIVITY_LEVELS];
53 u8 lp[R600_PM_NUMBER_OF_ACTIVITY_LEVELS];
54};
55
56struct rv6xx_power_info {
57 /* flags */
58 bool voltage_control;
59 bool sclk_ss;
60 bool mclk_ss;
61 bool dynamic_ss;
62 bool dynamic_pcie_gen2;
63 bool thermal_protection;
64 bool display_gap;
65 bool gfx_clock_gating;
66 /* clk values */
67 u32 fb_div_scale;
68 u32 spll_ref_div;
69 u32 mpll_ref_div;
70 u32 bsu;
71 u32 bsp;
72 /* */
73 u32 active_auto_throttle_sources;
74 /* current power state */
75 u32 restricted_levels;
76 struct rv6xx_pm_hw_state hw;
77};
78
79struct rv6xx_pl {
80 u32 sclk;
81 u32 mclk;
82 u16 vddc;
83 u32 flags;
84};
85
86struct rv6xx_ps {
87 struct rv6xx_pl high;
88 struct rv6xx_pl medium;
89 struct rv6xx_pl low;
90};
91
92#define RV6XX_DEFAULT_VCLK_FREQ 40000 /* 10 khz */
93#define RV6XX_DEFAULT_DCLK_FREQ 30000 /* 10 khz */
94
95#endif
diff --git a/drivers/gpu/drm/radeon/rv6xxd.h b/drivers/gpu/drm/radeon/rv6xxd.h
new file mode 100644
index 000000000000..34e86f90b431
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv6xxd.h
@@ -0,0 +1,246 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef RV6XXD_H
24#define RV6XXD_H
25
26/* RV6xx power management */
27#define SPLL_CNTL_MODE 0x60c
28# define SPLL_DIV_SYNC (1 << 5)
29
30#define GENERAL_PWRMGT 0x618
31# define GLOBAL_PWRMGT_EN (1 << 0)
32# define STATIC_PM_EN (1 << 1)
33# define MOBILE_SU (1 << 2)
34# define THERMAL_PROTECTION_DIS (1 << 3)
35# define THERMAL_PROTECTION_TYPE (1 << 4)
36# define ENABLE_GEN2PCIE (1 << 5)
37# define SW_GPIO_INDEX(x) ((x) << 6)
38# define SW_GPIO_INDEX_MASK (3 << 6)
39# define LOW_VOLT_D2_ACPI (1 << 8)
40# define LOW_VOLT_D3_ACPI (1 << 9)
41# define VOLT_PWRMGT_EN (1 << 10)
42# define BACKBIAS_PAD_EN (1 << 16)
43# define BACKBIAS_VALUE (1 << 17)
44# define BACKBIAS_DPM_CNTL (1 << 18)
45# define DYN_SPREAD_SPECTRUM_EN (1 << 21)
46
47#define MCLK_PWRMGT_CNTL 0x624
48# define MPLL_PWRMGT_OFF (1 << 0)
49# define YCLK_TURNOFF (1 << 1)
50# define MPLL_TURNOFF (1 << 2)
51# define SU_MCLK_USE_BCLK (1 << 3)
52# define DLL_READY (1 << 4)
53# define MC_BUSY (1 << 5)
54# define MC_INT_CNTL (1 << 7)
55# define MRDCKA_SLEEP (1 << 8)
56# define MRDCKB_SLEEP (1 << 9)
57# define MRDCKC_SLEEP (1 << 10)
58# define MRDCKD_SLEEP (1 << 11)
59# define MRDCKE_SLEEP (1 << 12)
60# define MRDCKF_SLEEP (1 << 13)
61# define MRDCKG_SLEEP (1 << 14)
62# define MRDCKH_SLEEP (1 << 15)
63# define MRDCKA_RESET (1 << 16)
64# define MRDCKB_RESET (1 << 17)
65# define MRDCKC_RESET (1 << 18)
66# define MRDCKD_RESET (1 << 19)
67# define MRDCKE_RESET (1 << 20)
68# define MRDCKF_RESET (1 << 21)
69# define MRDCKG_RESET (1 << 22)
70# define MRDCKH_RESET (1 << 23)
71# define DLL_READY_READ (1 << 24)
72# define USE_DISPLAY_GAP (1 << 25)
73# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
74# define USE_DISPLAY_GAP_CTXSW (1 << 27)
75# define MPLL_TURNOFF_D2 (1 << 28)
76# define USE_DISPLAY_URGENT_CTXSW (1 << 29)
77
78#define MPLL_FREQ_LEVEL_0 0x6e8
79# define LEVEL0_MPLL_POST_DIV(x) ((x) << 0)
80# define LEVEL0_MPLL_POST_DIV_MASK (0xff << 0)
81# define LEVEL0_MPLL_FB_DIV(x) ((x) << 8)
82# define LEVEL0_MPLL_FB_DIV_MASK (0xfff << 8)
83# define LEVEL0_MPLL_REF_DIV(x) ((x) << 20)
84# define LEVEL0_MPLL_REF_DIV_MASK (0x3f << 20)
85# define LEVEL0_MPLL_DIV_EN (1 << 28)
86# define LEVEL0_DLL_BYPASS (1 << 29)
87# define LEVEL0_DLL_RESET (1 << 30)
88
89#define VID_RT 0x6f8
90# define VID_CRT(x) ((x) << 0)
91# define VID_CRT_MASK (0x1fff << 0)
92# define VID_CRTU(x) ((x) << 13)
93# define VID_CRTU_MASK (7 << 13)
94# define SSTU(x) ((x) << 16)
95# define SSTU_MASK (7 << 16)
96# define VID_SWT(x) ((x) << 19)
97# define VID_SWT_MASK (0x1f << 19)
98# define BRT(x) ((x) << 24)
99# define BRT_MASK (0xff << 24)
100
101#define TARGET_AND_CURRENT_PROFILE_INDEX 0x70c
102# define TARGET_PROFILE_INDEX_MASK (3 << 0)
103# define TARGET_PROFILE_INDEX_SHIFT 0
104# define CURRENT_PROFILE_INDEX_MASK (3 << 2)
105# define CURRENT_PROFILE_INDEX_SHIFT 2
106# define DYN_PWR_ENTER_INDEX(x) ((x) << 4)
107# define DYN_PWR_ENTER_INDEX_MASK (3 << 4)
108# define DYN_PWR_ENTER_INDEX_SHIFT 4
109# define CURR_MCLK_INDEX_MASK (3 << 6)
110# define CURR_MCLK_INDEX_SHIFT 6
111# define CURR_SCLK_INDEX_MASK (0x1f << 8)
112# define CURR_SCLK_INDEX_SHIFT 8
113# define CURR_VID_INDEX_MASK (3 << 13)
114# define CURR_VID_INDEX_SHIFT 13
115
116#define VID_UPPER_GPIO_CNTL 0x740
117# define CTXSW_UPPER_GPIO_VALUES(x) ((x) << 0)
118# define CTXSW_UPPER_GPIO_VALUES_MASK (7 << 0)
119# define HIGH_UPPER_GPIO_VALUES(x) ((x) << 3)
120# define HIGH_UPPER_GPIO_VALUES_MASK (7 << 3)
121# define MEDIUM_UPPER_GPIO_VALUES(x) ((x) << 6)
122# define MEDIUM_UPPER_GPIO_VALUES_MASK (7 << 6)
123# define LOW_UPPER_GPIO_VALUES(x) ((x) << 9)
124# define LOW_UPPER_GPIO_VALUES_MASK (7 << 9)
125# define CTXSW_BACKBIAS_VALUE (1 << 12)
126# define HIGH_BACKBIAS_VALUE (1 << 13)
127# define MEDIUM_BACKBIAS_VALUE (1 << 14)
128# define LOW_BACKBIAS_VALUE (1 << 15)
129
130#define CG_DISPLAY_GAP_CNTL 0x7dc
131# define DISP1_GAP(x) ((x) << 0)
132# define DISP1_GAP_MASK (3 << 0)
133# define DISP2_GAP(x) ((x) << 2)
134# define DISP2_GAP_MASK (3 << 2)
135# define VBI_TIMER_COUNT(x) ((x) << 4)
136# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
137# define VBI_TIMER_UNIT(x) ((x) << 20)
138# define VBI_TIMER_UNIT_MASK (7 << 20)
139# define DISP1_GAP_MCHG(x) ((x) << 24)
140# define DISP1_GAP_MCHG_MASK (3 << 24)
141# define DISP2_GAP_MCHG(x) ((x) << 26)
142# define DISP2_GAP_MCHG_MASK (3 << 26)
143
144#define CG_THERMAL_CTRL 0x7f0
145# define DPM_EVENT_SRC(x) ((x) << 0)
146# define DPM_EVENT_SRC_MASK (7 << 0)
147# define THERM_INC_CLK (1 << 3)
148# define TOFFSET(x) ((x) << 4)
149# define TOFFSET_MASK (0xff << 4)
150# define DIG_THERM_DPM(x) ((x) << 12)
151# define DIG_THERM_DPM_MASK (0xff << 12)
152# define CTF_SEL(x) ((x) << 20)
153# define CTF_SEL_MASK (7 << 20)
154# define CTF_PAD_POLARITY (1 << 23)
155# define CTF_PAD_EN (1 << 24)
156
157#define CG_SPLL_SPREAD_SPECTRUM_LOW 0x820
158# define SSEN (1 << 0)
159# define CLKS(x) ((x) << 3)
160# define CLKS_MASK (0xff << 3)
161# define CLKS_SHIFT 3
162# define CLKV(x) ((x) << 11)
163# define CLKV_MASK (0x7ff << 11)
164# define CLKV_SHIFT 11
165#define CG_MPLL_SPREAD_SPECTRUM 0x830
166
167#define CITF_CNTL 0x200c
168# define BLACKOUT_RD (1 << 0)
169# define BLACKOUT_WR (1 << 1)
170
171#define RAMCFG 0x2408
172#define NOOFBANK_SHIFT 0
173#define NOOFBANK_MASK 0x00000001
174#define NOOFRANK_SHIFT 1
175#define NOOFRANK_MASK 0x00000002
176#define NOOFROWS_SHIFT 2
177#define NOOFROWS_MASK 0x0000001C
178#define NOOFCOLS_SHIFT 5
179#define NOOFCOLS_MASK 0x00000060
180#define CHANSIZE_SHIFT 7
181#define CHANSIZE_MASK 0x00000080
182#define BURSTLENGTH_SHIFT 8
183#define BURSTLENGTH_MASK 0x00000100
184#define CHANSIZE_OVERRIDE (1 << 10)
185
186#define SQM_RATIO 0x2424
187# define STATE0(x) ((x) << 0)
188# define STATE0_MASK (0xff << 0)
189# define STATE1(x) ((x) << 8)
190# define STATE1_MASK (0xff << 8)
191# define STATE2(x) ((x) << 16)
192# define STATE2_MASK (0xff << 16)
193# define STATE3(x) ((x) << 24)
194# define STATE3_MASK (0xff << 24)
195
196#define ARB_RFSH_CNTL 0x2460
197# define ENABLE (1 << 0)
198#define ARB_RFSH_RATE 0x2464
199# define POWERMODE0(x) ((x) << 0)
200# define POWERMODE0_MASK (0xff << 0)
201# define POWERMODE1(x) ((x) << 8)
202# define POWERMODE1_MASK (0xff << 8)
203# define POWERMODE2(x) ((x) << 16)
204# define POWERMODE2_MASK (0xff << 16)
205# define POWERMODE3(x) ((x) << 24)
206# define POWERMODE3_MASK (0xff << 24)
207
208#define MC_SEQ_DRAM 0x2608
209# define CKE_DYN (1 << 12)
210
211#define MC_SEQ_CMD 0x26c4
212
213#define MC_SEQ_RESERVE_S 0x2890
214#define MC_SEQ_RESERVE_M 0x2894
215
216#define LVTMA_DATA_SYNCHRONIZATION 0x7adc
217# define LVTMA_PFREQCHG (1 << 8)
218#define DCE3_LVTMA_DATA_SYNCHRONIZATION 0x7f98
219
220/* PCIE indirect regs */
221#define PCIE_P_CNTL 0x40
222# define P_PLL_PWRDN_IN_L1L23 (1 << 3)
223# define P_PLL_BUF_PDNB (1 << 4)
224# define P_PLL_PDNB (1 << 9)
225# define P_ALLOW_PRX_FRONTEND_SHUTOFF (1 << 12)
226/* PCIE PORT indirect regs */
227#define PCIE_LC_CNTL 0xa0
228# define LC_L0S_INACTIVITY(x) ((x) << 8)
229# define LC_L0S_INACTIVITY_MASK (0xf << 8)
230# define LC_L0S_INACTIVITY_SHIFT 8
231# define LC_L1_INACTIVITY(x) ((x) << 12)
232# define LC_L1_INACTIVITY_MASK (0xf << 12)
233# define LC_L1_INACTIVITY_SHIFT 12
234# define LC_PMI_TO_L1_DIS (1 << 16)
235# define LC_ASPM_TO_L1_DIS (1 << 24)
236#define PCIE_LC_SPEED_CNTL 0xa4
237# define LC_GEN2_EN (1 << 0)
238# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 7)
239# define LC_CURRENT_DATA_RATE (1 << 11)
240# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
241# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
242# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
243# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
244# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
245
246#endif
diff --git a/drivers/gpu/drm/radeon/rv730_dpm.c b/drivers/gpu/drm/radeon/rv730_dpm.c
new file mode 100644
index 000000000000..3f5e1cf138ba
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv730_dpm.c
@@ -0,0 +1,508 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "rv730d.h"
28#include "r600_dpm.h"
29#include "rv770_dpm.h"
30#include "atom.h"
31
32#define MC_CG_ARB_FREQ_F0 0x0a
33#define MC_CG_ARB_FREQ_F1 0x0b
34#define MC_CG_ARB_FREQ_F2 0x0c
35#define MC_CG_ARB_FREQ_F3 0x0d
36
37struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps);
38struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
39
40int rv730_populate_sclk_value(struct radeon_device *rdev,
41 u32 engine_clock,
42 RV770_SMC_SCLK_VALUE *sclk)
43{
44 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
45 struct atom_clock_dividers dividers;
46 u32 spll_func_cntl = pi->clk_regs.rv730.cg_spll_func_cntl;
47 u32 spll_func_cntl_2 = pi->clk_regs.rv730.cg_spll_func_cntl_2;
48 u32 spll_func_cntl_3 = pi->clk_regs.rv730.cg_spll_func_cntl_3;
49 u32 cg_spll_spread_spectrum = pi->clk_regs.rv730.cg_spll_spread_spectrum;
50 u32 cg_spll_spread_spectrum_2 = pi->clk_regs.rv730.cg_spll_spread_spectrum_2;
51 u64 tmp;
52 u32 reference_clock = rdev->clock.spll.reference_freq;
53 u32 reference_divider, post_divider;
54 u32 fbdiv;
55 int ret;
56
57 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
58 engine_clock, false, &dividers);
59 if (ret)
60 return ret;
61
62 reference_divider = 1 + dividers.ref_div;
63
64 if (dividers.enable_post_div)
65 post_divider = ((dividers.post_div >> 4) & 0xf) +
66 (dividers.post_div & 0xf) + 2;
67 else
68 post_divider = 1;
69
70 tmp = (u64) engine_clock * reference_divider * post_divider * 16384;
71 do_div(tmp, reference_clock);
72 fbdiv = (u32) tmp;
73
74 /* set up registers */
75 if (dividers.enable_post_div)
76 spll_func_cntl |= SPLL_DIVEN;
77 else
78 spll_func_cntl &= ~SPLL_DIVEN;
79 spll_func_cntl &= ~(SPLL_HILEN_MASK | SPLL_LOLEN_MASK | SPLL_REF_DIV_MASK);
80 spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
81 spll_func_cntl |= SPLL_HILEN((dividers.post_div >> 4) & 0xf);
82 spll_func_cntl |= SPLL_LOLEN(dividers.post_div & 0xf);
83
84 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
85 spll_func_cntl_2 |= SCLK_MUX_SEL(2);
86
87 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
88 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
89 spll_func_cntl_3 |= SPLL_DITHEN;
90
91 if (pi->sclk_ss) {
92 struct radeon_atom_ss ss;
93 u32 vco_freq = engine_clock * post_divider;
94
95 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
96 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
97 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
98 u32 clk_v = ss.percentage * fbdiv / (clk_s * 10000);
99
100 cg_spll_spread_spectrum &= ~CLK_S_MASK;
101 cg_spll_spread_spectrum |= CLK_S(clk_s);
102 cg_spll_spread_spectrum |= SSEN;
103
104 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
105 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
106 }
107 }
108
109 sclk->sclk_value = cpu_to_be32(engine_clock);
110 sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
111 sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
112 sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
113 sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(cg_spll_spread_spectrum);
114 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(cg_spll_spread_spectrum_2);
115
116 return 0;
117}
118
119int rv730_populate_mclk_value(struct radeon_device *rdev,
120 u32 engine_clock, u32 memory_clock,
121 LPRV7XX_SMC_MCLK_VALUE mclk)
122{
123 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
124 u32 mclk_pwrmgt_cntl = pi->clk_regs.rv730.mclk_pwrmgt_cntl;
125 u32 dll_cntl = pi->clk_regs.rv730.dll_cntl;
126 u32 mpll_func_cntl = pi->clk_regs.rv730.mpll_func_cntl;
127 u32 mpll_func_cntl_2 = pi->clk_regs.rv730.mpll_func_cntl2;
128 u32 mpll_func_cntl_3 = pi->clk_regs.rv730.mpll_func_cntl3;
129 u32 mpll_ss = pi->clk_regs.rv730.mpll_ss;
130 u32 mpll_ss2 = pi->clk_regs.rv730.mpll_ss2;
131 struct atom_clock_dividers dividers;
132 u32 post_divider, reference_divider;
133 int ret;
134
135 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
136 memory_clock, false, &dividers);
137 if (ret)
138 return ret;
139
140 reference_divider = dividers.ref_div + 1;
141
142 if (dividers.enable_post_div)
143 post_divider = ((dividers.post_div >> 4) & 0xf) +
144 (dividers.post_div & 0xf) + 2;
145 else
146 post_divider = 1;
147
148 /* setup the registers */
149 if (dividers.enable_post_div)
150 mpll_func_cntl |= MPLL_DIVEN;
151 else
152 mpll_func_cntl &= ~MPLL_DIVEN;
153
154 mpll_func_cntl &= ~(MPLL_REF_DIV_MASK | MPLL_HILEN_MASK | MPLL_LOLEN_MASK);
155 mpll_func_cntl |= MPLL_REF_DIV(dividers.ref_div);
156 mpll_func_cntl |= MPLL_HILEN((dividers.post_div >> 4) & 0xf);
157 mpll_func_cntl |= MPLL_LOLEN(dividers.post_div & 0xf);
158
159 mpll_func_cntl_3 &= ~MPLL_FB_DIV_MASK;
160 mpll_func_cntl_3 |= MPLL_FB_DIV(dividers.fb_div);
161 if (dividers.enable_dithen)
162 mpll_func_cntl_3 |= MPLL_DITHEN;
163 else
164 mpll_func_cntl_3 &= ~MPLL_DITHEN;
165
166 if (pi->mclk_ss) {
167 struct radeon_atom_ss ss;
168 u32 vco_freq = memory_clock * post_divider;
169
170 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
171 ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
172 u32 reference_clock = rdev->clock.mpll.reference_freq;
173 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
174 u32 clk_v = ss.percentage * dividers.fb_div / (clk_s * 10000);
175
176 mpll_ss &= ~CLK_S_MASK;
177 mpll_ss |= CLK_S(clk_s);
178 mpll_ss |= SSEN;
179
180 mpll_ss2 &= ~CLK_V_MASK;
181 mpll_ss |= CLK_V(clk_v);
182 }
183 }
184
185
186 mclk->mclk730.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
187 mclk->mclk730.vDLL_CNTL = cpu_to_be32(dll_cntl);
188 mclk->mclk730.mclk_value = cpu_to_be32(memory_clock);
189 mclk->mclk730.vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
190 mclk->mclk730.vMPLL_FUNC_CNTL2 = cpu_to_be32(mpll_func_cntl_2);
191 mclk->mclk730.vMPLL_FUNC_CNTL3 = cpu_to_be32(mpll_func_cntl_3);
192 mclk->mclk730.vMPLL_SS = cpu_to_be32(mpll_ss);
193 mclk->mclk730.vMPLL_SS2 = cpu_to_be32(mpll_ss2);
194
195 return 0;
196}
197
198void rv730_read_clock_registers(struct radeon_device *rdev)
199{
200 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
201
202 pi->clk_regs.rv730.cg_spll_func_cntl =
203 RREG32(CG_SPLL_FUNC_CNTL);
204 pi->clk_regs.rv730.cg_spll_func_cntl_2 =
205 RREG32(CG_SPLL_FUNC_CNTL_2);
206 pi->clk_regs.rv730.cg_spll_func_cntl_3 =
207 RREG32(CG_SPLL_FUNC_CNTL_3);
208 pi->clk_regs.rv730.cg_spll_spread_spectrum =
209 RREG32(CG_SPLL_SPREAD_SPECTRUM);
210 pi->clk_regs.rv730.cg_spll_spread_spectrum_2 =
211 RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
212
213 pi->clk_regs.rv730.mclk_pwrmgt_cntl =
214 RREG32(TCI_MCLK_PWRMGT_CNTL);
215 pi->clk_regs.rv730.dll_cntl =
216 RREG32(TCI_DLL_CNTL);
217 pi->clk_regs.rv730.mpll_func_cntl =
218 RREG32(CG_MPLL_FUNC_CNTL);
219 pi->clk_regs.rv730.mpll_func_cntl2 =
220 RREG32(CG_MPLL_FUNC_CNTL_2);
221 pi->clk_regs.rv730.mpll_func_cntl3 =
222 RREG32(CG_MPLL_FUNC_CNTL_3);
223 pi->clk_regs.rv730.mpll_ss =
224 RREG32(CG_TCI_MPLL_SPREAD_SPECTRUM);
225 pi->clk_regs.rv730.mpll_ss2 =
226 RREG32(CG_TCI_MPLL_SPREAD_SPECTRUM_2);
227}
228
229int rv730_populate_smc_acpi_state(struct radeon_device *rdev,
230 RV770_SMC_STATETABLE *table)
231{
232 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
233 u32 mpll_func_cntl = 0;
234 u32 mpll_func_cntl_2 = 0 ;
235 u32 mpll_func_cntl_3 = 0;
236 u32 mclk_pwrmgt_cntl;
237 u32 dll_cntl;
238 u32 spll_func_cntl;
239 u32 spll_func_cntl_2;
240 u32 spll_func_cntl_3;
241
242 table->ACPIState = table->initialState;
243 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
244
245 if (pi->acpi_vddc) {
246 rv770_populate_vddc_value(rdev, pi->acpi_vddc,
247 &table->ACPIState.levels[0].vddc);
248 table->ACPIState.levels[0].gen2PCIE = pi->pcie_gen2 ?
249 pi->acpi_pcie_gen2 : 0;
250 table->ACPIState.levels[0].gen2XSP =
251 pi->acpi_pcie_gen2;
252 } else {
253 rv770_populate_vddc_value(rdev, pi->min_vddc_in_table,
254 &table->ACPIState.levels[0].vddc);
255 table->ACPIState.levels[0].gen2PCIE = 0;
256 }
257
258 mpll_func_cntl = pi->clk_regs.rv730.mpll_func_cntl;
259 mpll_func_cntl_2 = pi->clk_regs.rv730.mpll_func_cntl2;
260 mpll_func_cntl_3 = pi->clk_regs.rv730.mpll_func_cntl3;
261
262 mpll_func_cntl |= MPLL_RESET | MPLL_BYPASS_EN;
263 mpll_func_cntl &= ~MPLL_SLEEP;
264
265 mpll_func_cntl_2 &= ~MCLK_MUX_SEL_MASK;
266 mpll_func_cntl_2 |= MCLK_MUX_SEL(1);
267
268 mclk_pwrmgt_cntl = (MRDCKA_RESET |
269 MRDCKB_RESET |
270 MRDCKC_RESET |
271 MRDCKD_RESET |
272 MRDCKE_RESET |
273 MRDCKF_RESET |
274 MRDCKG_RESET |
275 MRDCKH_RESET |
276 MRDCKA_SLEEP |
277 MRDCKB_SLEEP |
278 MRDCKC_SLEEP |
279 MRDCKD_SLEEP |
280 MRDCKE_SLEEP |
281 MRDCKF_SLEEP |
282 MRDCKG_SLEEP |
283 MRDCKH_SLEEP);
284
285 dll_cntl = 0xff000000;
286
287 spll_func_cntl = pi->clk_regs.rv730.cg_spll_func_cntl;
288 spll_func_cntl_2 = pi->clk_regs.rv730.cg_spll_func_cntl_2;
289 spll_func_cntl_3 = pi->clk_regs.rv730.cg_spll_func_cntl_3;
290
291 spll_func_cntl |= SPLL_RESET | SPLL_BYPASS_EN;
292 spll_func_cntl &= ~SPLL_SLEEP;
293
294 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
295 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
296
297 table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
298 table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL2 = cpu_to_be32(mpll_func_cntl_2);
299 table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL3 = cpu_to_be32(mpll_func_cntl_3);
300 table->ACPIState.levels[0].mclk.mclk730.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
301 table->ACPIState.levels[0].mclk.mclk730.vDLL_CNTL = cpu_to_be32(dll_cntl);
302
303 table->ACPIState.levels[0].mclk.mclk730.mclk_value = 0;
304
305 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
306 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
307 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
308
309 table->ACPIState.levels[0].sclk.sclk_value = 0;
310
311 rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
312
313 table->ACPIState.levels[1] = table->ACPIState.levels[0];
314 table->ACPIState.levels[2] = table->ACPIState.levels[0];
315
316 return 0;
317}
318
319int rv730_populate_smc_initial_state(struct radeon_device *rdev,
320 struct radeon_ps *radeon_state,
321 RV770_SMC_STATETABLE *table)
322{
323 struct rv7xx_ps *initial_state = rv770_get_ps(radeon_state);
324 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
325 u32 a_t;
326
327 table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL =
328 cpu_to_be32(pi->clk_regs.rv730.mpll_func_cntl);
329 table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL2 =
330 cpu_to_be32(pi->clk_regs.rv730.mpll_func_cntl2);
331 table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL3 =
332 cpu_to_be32(pi->clk_regs.rv730.mpll_func_cntl3);
333 table->initialState.levels[0].mclk.mclk730.vMCLK_PWRMGT_CNTL =
334 cpu_to_be32(pi->clk_regs.rv730.mclk_pwrmgt_cntl);
335 table->initialState.levels[0].mclk.mclk730.vDLL_CNTL =
336 cpu_to_be32(pi->clk_regs.rv730.dll_cntl);
337 table->initialState.levels[0].mclk.mclk730.vMPLL_SS =
338 cpu_to_be32(pi->clk_regs.rv730.mpll_ss);
339 table->initialState.levels[0].mclk.mclk730.vMPLL_SS2 =
340 cpu_to_be32(pi->clk_regs.rv730.mpll_ss2);
341
342 table->initialState.levels[0].mclk.mclk730.mclk_value =
343 cpu_to_be32(initial_state->low.mclk);
344
345 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
346 cpu_to_be32(pi->clk_regs.rv730.cg_spll_func_cntl);
347 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
348 cpu_to_be32(pi->clk_regs.rv730.cg_spll_func_cntl_2);
349 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
350 cpu_to_be32(pi->clk_regs.rv730.cg_spll_func_cntl_3);
351 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
352 cpu_to_be32(pi->clk_regs.rv730.cg_spll_spread_spectrum);
353 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
354 cpu_to_be32(pi->clk_regs.rv730.cg_spll_spread_spectrum_2);
355
356 table->initialState.levels[0].sclk.sclk_value =
357 cpu_to_be32(initial_state->low.sclk);
358
359 table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
360
361 table->initialState.levels[0].seqValue =
362 rv770_get_seq_value(rdev, &initial_state->low);
363
364 rv770_populate_vddc_value(rdev,
365 initial_state->low.vddc,
366 &table->initialState.levels[0].vddc);
367 rv770_populate_initial_mvdd_value(rdev,
368 &table->initialState.levels[0].mvdd);
369
370 a_t = CG_R(0xffff) | CG_L(0);
371
372 table->initialState.levels[0].aT = cpu_to_be32(a_t);
373
374 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
375
376 if (pi->boot_in_gen2)
377 table->initialState.levels[0].gen2PCIE = 1;
378 else
379 table->initialState.levels[0].gen2PCIE = 0;
380 if (initial_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
381 table->initialState.levels[0].gen2XSP = 1;
382 else
383 table->initialState.levels[0].gen2XSP = 0;
384
385 table->initialState.levels[1] = table->initialState.levels[0];
386 table->initialState.levels[2] = table->initialState.levels[0];
387
388 table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
389
390 return 0;
391}
392
393void rv730_program_memory_timing_parameters(struct radeon_device *rdev,
394 struct radeon_ps *radeon_state)
395{
396 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
397 u32 arb_refresh_rate = 0;
398 u32 dram_timing = 0;
399 u32 dram_timing2 = 0;
400 u32 old_dram_timing = 0;
401 u32 old_dram_timing2 = 0;
402
403 arb_refresh_rate = RREG32(MC_ARB_RFSH_RATE) &
404 ~(POWERMODE1_MASK | POWERMODE2_MASK | POWERMODE3_MASK);
405 arb_refresh_rate |=
406 (POWERMODE1(rv770_calculate_memory_refresh_rate(rdev, state->low.sclk)) |
407 POWERMODE2(rv770_calculate_memory_refresh_rate(rdev, state->medium.sclk)) |
408 POWERMODE3(rv770_calculate_memory_refresh_rate(rdev, state->high.sclk)));
409 WREG32(MC_ARB_RFSH_RATE, arb_refresh_rate);
410
411 /* save the boot dram timings */
412 old_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
413 old_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
414
415 radeon_atom_set_engine_dram_timings(rdev,
416 state->high.sclk,
417 state->high.mclk);
418
419 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
420 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
421
422 WREG32(MC_ARB_DRAM_TIMING_3, dram_timing);
423 WREG32(MC_ARB_DRAM_TIMING2_3, dram_timing2);
424
425 radeon_atom_set_engine_dram_timings(rdev,
426 state->medium.sclk,
427 state->medium.mclk);
428
429 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
430 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
431
432 WREG32(MC_ARB_DRAM_TIMING_2, dram_timing);
433 WREG32(MC_ARB_DRAM_TIMING2_2, dram_timing2);
434
435 radeon_atom_set_engine_dram_timings(rdev,
436 state->low.sclk,
437 state->low.mclk);
438
439 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
440 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
441
442 WREG32(MC_ARB_DRAM_TIMING_1, dram_timing);
443 WREG32(MC_ARB_DRAM_TIMING2_1, dram_timing2);
444
445 /* restore the boot dram timings */
446 WREG32(MC_ARB_DRAM_TIMING, old_dram_timing);
447 WREG32(MC_ARB_DRAM_TIMING2, old_dram_timing2);
448
449}
450
451void rv730_start_dpm(struct radeon_device *rdev)
452{
453 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
454
455 WREG32_P(TCI_MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
456
457 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
458}
459
460void rv730_stop_dpm(struct radeon_device *rdev)
461{
462 PPSMC_Result result;
463
464 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
465
466 if (result != PPSMC_Result_OK)
467 DRM_ERROR("Could not force DPM to low\n");
468
469 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
470
471 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
472
473 WREG32_P(TCI_MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
474}
475
476void rv730_program_dcodt(struct radeon_device *rdev, bool use_dcodt)
477{
478 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
479 u32 i = use_dcodt ? 0 : 1;
480 u32 mc4_io_pad_cntl;
481
482 mc4_io_pad_cntl = RREG32(MC4_IO_DQ_PAD_CNTL_D0_I0);
483 mc4_io_pad_cntl &= 0xFFFFFF00;
484 mc4_io_pad_cntl |= pi->odt_value_0[i];
485 WREG32(MC4_IO_DQ_PAD_CNTL_D0_I0, mc4_io_pad_cntl);
486 WREG32(MC4_IO_DQ_PAD_CNTL_D0_I1, mc4_io_pad_cntl);
487
488 mc4_io_pad_cntl = RREG32(MC4_IO_QS_PAD_CNTL_D0_I0);
489 mc4_io_pad_cntl &= 0xFFFFFF00;
490 mc4_io_pad_cntl |= pi->odt_value_1[i];
491 WREG32(MC4_IO_QS_PAD_CNTL_D0_I0, mc4_io_pad_cntl);
492 WREG32(MC4_IO_QS_PAD_CNTL_D0_I1, mc4_io_pad_cntl);
493}
494
495void rv730_get_odt_values(struct radeon_device *rdev)
496{
497 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
498 u32 mc4_io_pad_cntl;
499
500 pi->odt_value_0[0] = (u8)0;
501 pi->odt_value_1[0] = (u8)0x80;
502
503 mc4_io_pad_cntl = RREG32(MC4_IO_DQ_PAD_CNTL_D0_I0);
504 pi->odt_value_0[1] = (u8)(mc4_io_pad_cntl & 0xff);
505
506 mc4_io_pad_cntl = RREG32(MC4_IO_QS_PAD_CNTL_D0_I0);
507 pi->odt_value_1[1] = (u8)(mc4_io_pad_cntl & 0xff);
508}
diff --git a/drivers/gpu/drm/radeon/rv730d.h b/drivers/gpu/drm/radeon/rv730d.h
new file mode 100644
index 000000000000..f0a7954fb1cb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv730d.h
@@ -0,0 +1,165 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef RV730_H
24#define RV730_H
25
26#define CG_SPLL_FUNC_CNTL 0x600
27#define SPLL_RESET (1 << 0)
28#define SPLL_SLEEP (1 << 1)
29#define SPLL_DIVEN (1 << 2)
30#define SPLL_BYPASS_EN (1 << 3)
31#define SPLL_REF_DIV(x) ((x) << 4)
32#define SPLL_REF_DIV_MASK (0x3f << 4)
33#define SPLL_HILEN(x) ((x) << 12)
34#define SPLL_HILEN_MASK (0xf << 12)
35#define SPLL_LOLEN(x) ((x) << 16)
36#define SPLL_LOLEN_MASK (0xf << 16)
37#define CG_SPLL_FUNC_CNTL_2 0x604
38#define SCLK_MUX_SEL(x) ((x) << 0)
39#define SCLK_MUX_SEL_MASK (0x1ff << 0)
40#define CG_SPLL_FUNC_CNTL_3 0x608
41#define SPLL_FB_DIV(x) ((x) << 0)
42#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
43#define SPLL_DITHEN (1 << 28)
44
45#define CG_MPLL_FUNC_CNTL 0x624
46#define MPLL_RESET (1 << 0)
47#define MPLL_SLEEP (1 << 1)
48#define MPLL_DIVEN (1 << 2)
49#define MPLL_BYPASS_EN (1 << 3)
50#define MPLL_REF_DIV(x) ((x) << 4)
51#define MPLL_REF_DIV_MASK (0x3f << 4)
52#define MPLL_HILEN(x) ((x) << 12)
53#define MPLL_HILEN_MASK (0xf << 12)
54#define MPLL_LOLEN(x) ((x) << 16)
55#define MPLL_LOLEN_MASK (0xf << 16)
56#define CG_MPLL_FUNC_CNTL_2 0x628
57#define MCLK_MUX_SEL(x) ((x) << 0)
58#define MCLK_MUX_SEL_MASK (0x1ff << 0)
59#define CG_MPLL_FUNC_CNTL_3 0x62c
60#define MPLL_FB_DIV(x) ((x) << 0)
61#define MPLL_FB_DIV_MASK (0x3ffffff << 0)
62#define MPLL_DITHEN (1 << 28)
63
64#define CG_TCI_MPLL_SPREAD_SPECTRUM 0x634
65#define CG_TCI_MPLL_SPREAD_SPECTRUM_2 0x638
66#define GENERAL_PWRMGT 0x63c
67# define GLOBAL_PWRMGT_EN (1 << 0)
68# define STATIC_PM_EN (1 << 1)
69# define THERMAL_PROTECTION_DIS (1 << 2)
70# define THERMAL_PROTECTION_TYPE (1 << 3)
71# define ENABLE_GEN2PCIE (1 << 4)
72# define ENABLE_GEN2XSP (1 << 5)
73# define SW_SMIO_INDEX(x) ((x) << 6)
74# define SW_SMIO_INDEX_MASK (3 << 6)
75# define LOW_VOLT_D2_ACPI (1 << 8)
76# define LOW_VOLT_D3_ACPI (1 << 9)
77# define VOLT_PWRMGT_EN (1 << 10)
78# define BACKBIAS_PAD_EN (1 << 18)
79# define BACKBIAS_VALUE (1 << 19)
80# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
81# define AC_DC_SW (1 << 24)
82
83#define SCLK_PWRMGT_CNTL 0x644
84# define SCLK_PWRMGT_OFF (1 << 0)
85# define SCLK_LOW_D1 (1 << 1)
86# define FIR_RESET (1 << 4)
87# define FIR_FORCE_TREND_SEL (1 << 5)
88# define FIR_TREND_MODE (1 << 6)
89# define DYN_GFX_CLK_OFF_EN (1 << 7)
90# define GFX_CLK_FORCE_ON (1 << 8)
91# define GFX_CLK_REQUEST_OFF (1 << 9)
92# define GFX_CLK_FORCE_OFF (1 << 10)
93# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
94# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
95# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
96
97#define TCI_MCLK_PWRMGT_CNTL 0x648
98# define MPLL_PWRMGT_OFF (1 << 5)
99# define DLL_READY (1 << 6)
100# define MC_INT_CNTL (1 << 7)
101# define MRDCKA_SLEEP (1 << 8)
102# define MRDCKB_SLEEP (1 << 9)
103# define MRDCKC_SLEEP (1 << 10)
104# define MRDCKD_SLEEP (1 << 11)
105# define MRDCKE_SLEEP (1 << 12)
106# define MRDCKF_SLEEP (1 << 13)
107# define MRDCKG_SLEEP (1 << 14)
108# define MRDCKH_SLEEP (1 << 15)
109# define MRDCKA_RESET (1 << 16)
110# define MRDCKB_RESET (1 << 17)
111# define MRDCKC_RESET (1 << 18)
112# define MRDCKD_RESET (1 << 19)
113# define MRDCKE_RESET (1 << 20)
114# define MRDCKF_RESET (1 << 21)
115# define MRDCKG_RESET (1 << 22)
116# define MRDCKH_RESET (1 << 23)
117# define DLL_READY_READ (1 << 24)
118# define USE_DISPLAY_GAP (1 << 25)
119# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
120# define MPLL_TURNOFF_D2 (1 << 28)
121#define TCI_DLL_CNTL 0x64c
122
123#define CG_PG_CNTL 0x858
124# define PWRGATE_ENABLE (1 << 0)
125
126#define CG_AT 0x6d4
127#define CG_R(x) ((x) << 0)
128#define CG_R_MASK (0xffff << 0)
129#define CG_L(x) ((x) << 16)
130#define CG_L_MASK (0xffff << 16)
131
132#define CG_SPLL_SPREAD_SPECTRUM 0x790
133#define SSEN (1 << 0)
134#define CLK_S(x) ((x) << 4)
135#define CLK_S_MASK (0xfff << 4)
136#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
137#define CLK_V(x) ((x) << 0)
138#define CLK_V_MASK (0x3ffffff << 0)
139
140#define MC_ARB_DRAM_TIMING 0x2774
141#define MC_ARB_DRAM_TIMING2 0x2778
142
143#define MC_ARB_RFSH_RATE 0x27b0
144#define POWERMODE0(x) ((x) << 0)
145#define POWERMODE0_MASK (0xff << 0)
146#define POWERMODE1(x) ((x) << 8)
147#define POWERMODE1_MASK (0xff << 8)
148#define POWERMODE2(x) ((x) << 16)
149#define POWERMODE2_MASK (0xff << 16)
150#define POWERMODE3(x) ((x) << 24)
151#define POWERMODE3_MASK (0xff << 24)
152
153#define MC_ARB_DRAM_TIMING_1 0x27f0
154#define MC_ARB_DRAM_TIMING_2 0x27f4
155#define MC_ARB_DRAM_TIMING_3 0x27f8
156#define MC_ARB_DRAM_TIMING2_1 0x27fc
157#define MC_ARB_DRAM_TIMING2_2 0x2800
158#define MC_ARB_DRAM_TIMING2_3 0x2804
159
160#define MC4_IO_DQ_PAD_CNTL_D0_I0 0x2978
161#define MC4_IO_DQ_PAD_CNTL_D0_I1 0x297c
162#define MC4_IO_QS_PAD_CNTL_D0_I0 0x2980
163#define MC4_IO_QS_PAD_CNTL_D0_I1 0x2984
164
165#endif
diff --git a/drivers/gpu/drm/radeon/rv740_dpm.c b/drivers/gpu/drm/radeon/rv740_dpm.c
new file mode 100644
index 000000000000..c4c8da501da8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv740_dpm.c
@@ -0,0 +1,416 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "rv740d.h"
28#include "r600_dpm.h"
29#include "rv770_dpm.h"
30#include "atom.h"
31
32struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
33
34u32 rv740_get_decoded_reference_divider(u32 encoded_ref)
35{
36 u32 ref = 0;
37
38 switch (encoded_ref) {
39 case 0:
40 ref = 1;
41 break;
42 case 16:
43 ref = 2;
44 break;
45 case 17:
46 ref = 3;
47 break;
48 case 18:
49 ref = 2;
50 break;
51 case 19:
52 ref = 3;
53 break;
54 case 20:
55 ref = 4;
56 break;
57 case 21:
58 ref = 5;
59 break;
60 default:
61 DRM_ERROR("Invalid encoded Reference Divider\n");
62 ref = 0;
63 break;
64 }
65
66 return ref;
67}
68
69struct dll_speed_setting {
70 u16 min;
71 u16 max;
72 u32 dll_speed;
73};
74
75static struct dll_speed_setting dll_speed_table[16] =
76{
77 { 270, 320, 0x0f },
78 { 240, 270, 0x0e },
79 { 200, 240, 0x0d },
80 { 180, 200, 0x0c },
81 { 160, 180, 0x0b },
82 { 140, 160, 0x0a },
83 { 120, 140, 0x09 },
84 { 110, 120, 0x08 },
85 { 95, 110, 0x07 },
86 { 85, 95, 0x06 },
87 { 78, 85, 0x05 },
88 { 70, 78, 0x04 },
89 { 65, 70, 0x03 },
90 { 60, 65, 0x02 },
91 { 42, 60, 0x01 },
92 { 00, 42, 0x00 }
93};
94
95u32 rv740_get_dll_speed(bool is_gddr5, u32 memory_clock)
96{
97 int i;
98 u32 factor;
99 u16 data_rate;
100
101 if (is_gddr5)
102 factor = 4;
103 else
104 factor = 2;
105
106 data_rate = (u16)(memory_clock * factor / 1000);
107
108 if (data_rate < dll_speed_table[0].max) {
109 for (i = 0; i < 16; i++) {
110 if (data_rate > dll_speed_table[i].min &&
111 data_rate <= dll_speed_table[i].max)
112 return dll_speed_table[i].dll_speed;
113 }
114 }
115
116 DRM_DEBUG_KMS("Target MCLK greater than largest MCLK in DLL speed table\n");
117
118 return 0x0f;
119}
120
121int rv740_populate_sclk_value(struct radeon_device *rdev, u32 engine_clock,
122 RV770_SMC_SCLK_VALUE *sclk)
123{
124 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
125 struct atom_clock_dividers dividers;
126 u32 spll_func_cntl = pi->clk_regs.rv770.cg_spll_func_cntl;
127 u32 spll_func_cntl_2 = pi->clk_regs.rv770.cg_spll_func_cntl_2;
128 u32 spll_func_cntl_3 = pi->clk_regs.rv770.cg_spll_func_cntl_3;
129 u32 cg_spll_spread_spectrum = pi->clk_regs.rv770.cg_spll_spread_spectrum;
130 u32 cg_spll_spread_spectrum_2 = pi->clk_regs.rv770.cg_spll_spread_spectrum_2;
131 u64 tmp;
132 u32 reference_clock = rdev->clock.spll.reference_freq;
133 u32 reference_divider;
134 u32 fbdiv;
135 int ret;
136
137 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
138 engine_clock, false, &dividers);
139 if (ret)
140 return ret;
141
142 reference_divider = 1 + dividers.ref_div;
143
144 tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
145 do_div(tmp, reference_clock);
146 fbdiv = (u32) tmp;
147
148 spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
149 spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
150 spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
151
152 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
153 spll_func_cntl_2 |= SCLK_MUX_SEL(2);
154
155 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
156 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
157 spll_func_cntl_3 |= SPLL_DITHEN;
158
159 if (pi->sclk_ss) {
160 struct radeon_atom_ss ss;
161 u32 vco_freq = engine_clock * dividers.post_div;
162
163 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
164 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
165 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
166 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
167
168 cg_spll_spread_spectrum &= ~CLK_S_MASK;
169 cg_spll_spread_spectrum |= CLK_S(clk_s);
170 cg_spll_spread_spectrum |= SSEN;
171
172 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
173 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
174 }
175 }
176
177 sclk->sclk_value = cpu_to_be32(engine_clock);
178 sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
179 sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
180 sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
181 sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(cg_spll_spread_spectrum);
182 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(cg_spll_spread_spectrum_2);
183
184 return 0;
185}
186
187int rv740_populate_mclk_value(struct radeon_device *rdev,
188 u32 engine_clock, u32 memory_clock,
189 RV7XX_SMC_MCLK_VALUE *mclk)
190{
191 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
192 u32 mpll_ad_func_cntl = pi->clk_regs.rv770.mpll_ad_func_cntl;
193 u32 mpll_ad_func_cntl_2 = pi->clk_regs.rv770.mpll_ad_func_cntl_2;
194 u32 mpll_dq_func_cntl = pi->clk_regs.rv770.mpll_dq_func_cntl;
195 u32 mpll_dq_func_cntl_2 = pi->clk_regs.rv770.mpll_dq_func_cntl_2;
196 u32 mclk_pwrmgt_cntl = pi->clk_regs.rv770.mclk_pwrmgt_cntl;
197 u32 dll_cntl = pi->clk_regs.rv770.dll_cntl;
198 u32 mpll_ss1 = pi->clk_regs.rv770.mpll_ss1;
199 u32 mpll_ss2 = pi->clk_regs.rv770.mpll_ss2;
200 struct atom_clock_dividers dividers;
201 u32 ibias;
202 u32 dll_speed;
203 int ret;
204
205 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
206 memory_clock, false, &dividers);
207 if (ret)
208 return ret;
209
210 ibias = rv770_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
211
212 mpll_ad_func_cntl &= ~(CLKR_MASK |
213 YCLK_POST_DIV_MASK |
214 CLKF_MASK |
215 CLKFRAC_MASK |
216 IBIAS_MASK);
217 mpll_ad_func_cntl |= CLKR(dividers.ref_div);
218 mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
219 mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
220 mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
221 mpll_ad_func_cntl |= IBIAS(ibias);
222
223 if (dividers.vco_mode)
224 mpll_ad_func_cntl_2 |= VCO_MODE;
225 else
226 mpll_ad_func_cntl_2 &= ~VCO_MODE;
227
228 if (pi->mem_gddr5) {
229 mpll_dq_func_cntl &= ~(CLKR_MASK |
230 YCLK_POST_DIV_MASK |
231 CLKF_MASK |
232 CLKFRAC_MASK |
233 IBIAS_MASK);
234 mpll_dq_func_cntl |= CLKR(dividers.ref_div);
235 mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
236 mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
237 mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
238 mpll_dq_func_cntl |= IBIAS(ibias);
239
240 if (dividers.vco_mode)
241 mpll_dq_func_cntl_2 |= VCO_MODE;
242 else
243 mpll_dq_func_cntl_2 &= ~VCO_MODE;
244 }
245
246 if (pi->mclk_ss) {
247 struct radeon_atom_ss ss;
248 u32 vco_freq = memory_clock * dividers.post_div;
249
250 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
251 ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
252 u32 reference_clock = rdev->clock.mpll.reference_freq;
253 u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
254 u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
255 u32 clk_v = 0x40000 * ss.percentage *
256 (dividers.whole_fb_div + (dividers.frac_fb_div / 8)) / (clk_s * 10000);
257
258 mpll_ss1 &= ~CLKV_MASK;
259 mpll_ss1 |= CLKV(clk_v);
260
261 mpll_ss2 &= ~CLKS_MASK;
262 mpll_ss2 |= CLKS(clk_s);
263 }
264 }
265
266 dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
267 memory_clock);
268
269 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
270 mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
271
272 mclk->mclk770.mclk_value = cpu_to_be32(memory_clock);
273 mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
274 mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
275 mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
276 mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
277 mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
278 mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
279 mclk->mclk770.vMPLL_SS = cpu_to_be32(mpll_ss1);
280 mclk->mclk770.vMPLL_SS2 = cpu_to_be32(mpll_ss2);
281
282 return 0;
283}
284
285void rv740_read_clock_registers(struct radeon_device *rdev)
286{
287 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
288
289 pi->clk_regs.rv770.cg_spll_func_cntl =
290 RREG32(CG_SPLL_FUNC_CNTL);
291 pi->clk_regs.rv770.cg_spll_func_cntl_2 =
292 RREG32(CG_SPLL_FUNC_CNTL_2);
293 pi->clk_regs.rv770.cg_spll_func_cntl_3 =
294 RREG32(CG_SPLL_FUNC_CNTL_3);
295 pi->clk_regs.rv770.cg_spll_spread_spectrum =
296 RREG32(CG_SPLL_SPREAD_SPECTRUM);
297 pi->clk_regs.rv770.cg_spll_spread_spectrum_2 =
298 RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
299
300 pi->clk_regs.rv770.mpll_ad_func_cntl =
301 RREG32(MPLL_AD_FUNC_CNTL);
302 pi->clk_regs.rv770.mpll_ad_func_cntl_2 =
303 RREG32(MPLL_AD_FUNC_CNTL_2);
304 pi->clk_regs.rv770.mpll_dq_func_cntl =
305 RREG32(MPLL_DQ_FUNC_CNTL);
306 pi->clk_regs.rv770.mpll_dq_func_cntl_2 =
307 RREG32(MPLL_DQ_FUNC_CNTL_2);
308 pi->clk_regs.rv770.mclk_pwrmgt_cntl =
309 RREG32(MCLK_PWRMGT_CNTL);
310 pi->clk_regs.rv770.dll_cntl = RREG32(DLL_CNTL);
311 pi->clk_regs.rv770.mpll_ss1 = RREG32(MPLL_SS1);
312 pi->clk_regs.rv770.mpll_ss2 = RREG32(MPLL_SS2);
313}
314
315int rv740_populate_smc_acpi_state(struct radeon_device *rdev,
316 RV770_SMC_STATETABLE *table)
317{
318 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
319 u32 mpll_ad_func_cntl = pi->clk_regs.rv770.mpll_ad_func_cntl;
320 u32 mpll_ad_func_cntl_2 = pi->clk_regs.rv770.mpll_ad_func_cntl_2;
321 u32 mpll_dq_func_cntl = pi->clk_regs.rv770.mpll_dq_func_cntl;
322 u32 mpll_dq_func_cntl_2 = pi->clk_regs.rv770.mpll_dq_func_cntl_2;
323 u32 spll_func_cntl = pi->clk_regs.rv770.cg_spll_func_cntl;
324 u32 spll_func_cntl_2 = pi->clk_regs.rv770.cg_spll_func_cntl_2;
325 u32 spll_func_cntl_3 = pi->clk_regs.rv770.cg_spll_func_cntl_3;
326 u32 mclk_pwrmgt_cntl = pi->clk_regs.rv770.mclk_pwrmgt_cntl;
327 u32 dll_cntl = pi->clk_regs.rv770.dll_cntl;
328
329 table->ACPIState = table->initialState;
330
331 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
332
333 if (pi->acpi_vddc) {
334 rv770_populate_vddc_value(rdev, pi->acpi_vddc,
335 &table->ACPIState.levels[0].vddc);
336 table->ACPIState.levels[0].gen2PCIE =
337 pi->pcie_gen2 ?
338 pi->acpi_pcie_gen2 : 0;
339 table->ACPIState.levels[0].gen2XSP =
340 pi->acpi_pcie_gen2;
341 } else {
342 rv770_populate_vddc_value(rdev, pi->min_vddc_in_table,
343 &table->ACPIState.levels[0].vddc);
344 table->ACPIState.levels[0].gen2PCIE = 0;
345 }
346
347 mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
348
349 mpll_dq_func_cntl_2 |= BYPASS | BIAS_GEN_PDNB | RESET_EN;
350
351 mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
352 MRDCKA1_RESET |
353 MRDCKB0_RESET |
354 MRDCKB1_RESET |
355 MRDCKC0_RESET |
356 MRDCKC1_RESET |
357 MRDCKD0_RESET |
358 MRDCKD1_RESET);
359
360 dll_cntl |= (MRDCKA0_BYPASS |
361 MRDCKA1_BYPASS |
362 MRDCKB0_BYPASS |
363 MRDCKB1_BYPASS |
364 MRDCKC0_BYPASS |
365 MRDCKC1_BYPASS |
366 MRDCKD0_BYPASS |
367 MRDCKD1_BYPASS);
368
369 spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN;
370
371 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
372 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
373
374 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
375 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
376 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
377 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
378 table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
379 table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
380
381 table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
382
383 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
384 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
385 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
386
387 table->ACPIState.levels[0].sclk.sclk_value = 0;
388
389 table->ACPIState.levels[1] = table->ACPIState.levels[0];
390 table->ACPIState.levels[2] = table->ACPIState.levels[0];
391
392 rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
393
394 return 0;
395}
396
397void rv740_enable_mclk_spread_spectrum(struct radeon_device *rdev,
398 bool enable)
399{
400 if (enable)
401 WREG32_P(MPLL_CNTL_MODE, SS_SSEN, ~SS_SSEN);
402 else
403 WREG32_P(MPLL_CNTL_MODE, 0, ~SS_SSEN);
404}
405
406u8 rv740_get_mclk_frequency_ratio(u32 memory_clock)
407{
408 u8 mc_para_index;
409
410 if ((memory_clock < 10000) || (memory_clock > 47500))
411 mc_para_index = 0x00;
412 else
413 mc_para_index = (u8)((memory_clock - 10000) / 2500);
414
415 return mc_para_index;
416}
diff --git a/drivers/gpu/drm/radeon/rv740d.h b/drivers/gpu/drm/radeon/rv740d.h
new file mode 100644
index 000000000000..fe5ab075dc17
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv740d.h
@@ -0,0 +1,117 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef RV740_H
24#define RV740_H
25
26#define CG_SPLL_FUNC_CNTL 0x600
27#define SPLL_RESET (1 << 0)
28#define SPLL_SLEEP (1 << 1)
29#define SPLL_BYPASS_EN (1 << 3)
30#define SPLL_REF_DIV(x) ((x) << 4)
31#define SPLL_REF_DIV_MASK (0x3f << 4)
32#define SPLL_PDIV_A(x) ((x) << 20)
33#define SPLL_PDIV_A_MASK (0x7f << 20)
34#define CG_SPLL_FUNC_CNTL_2 0x604
35#define SCLK_MUX_SEL(x) ((x) << 0)
36#define SCLK_MUX_SEL_MASK (0x1ff << 0)
37#define CG_SPLL_FUNC_CNTL_3 0x608
38#define SPLL_FB_DIV(x) ((x) << 0)
39#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
40#define SPLL_DITHEN (1 << 28)
41
42#define MPLL_CNTL_MODE 0x61c
43#define SS_SSEN (1 << 24)
44
45#define MPLL_AD_FUNC_CNTL 0x624
46#define CLKF(x) ((x) << 0)
47#define CLKF_MASK (0x7f << 0)
48#define CLKR(x) ((x) << 7)
49#define CLKR_MASK (0x1f << 7)
50#define CLKFRAC(x) ((x) << 12)
51#define CLKFRAC_MASK (0x1f << 12)
52#define YCLK_POST_DIV(x) ((x) << 17)
53#define YCLK_POST_DIV_MASK (3 << 17)
54#define IBIAS(x) ((x) << 20)
55#define IBIAS_MASK (0x3ff << 20)
56#define RESET (1 << 30)
57#define PDNB (1 << 31)
58#define MPLL_AD_FUNC_CNTL_2 0x628
59#define BYPASS (1 << 19)
60#define BIAS_GEN_PDNB (1 << 24)
61#define RESET_EN (1 << 25)
62#define VCO_MODE (1 << 29)
63#define MPLL_DQ_FUNC_CNTL 0x62c
64#define MPLL_DQ_FUNC_CNTL_2 0x630
65
66#define MCLK_PWRMGT_CNTL 0x648
67#define DLL_SPEED(x) ((x) << 0)
68#define DLL_SPEED_MASK (0x1f << 0)
69# define MPLL_PWRMGT_OFF (1 << 5)
70# define DLL_READY (1 << 6)
71# define MC_INT_CNTL (1 << 7)
72# define MRDCKA0_SLEEP (1 << 8)
73# define MRDCKA1_SLEEP (1 << 9)
74# define MRDCKB0_SLEEP (1 << 10)
75# define MRDCKB1_SLEEP (1 << 11)
76# define MRDCKC0_SLEEP (1 << 12)
77# define MRDCKC1_SLEEP (1 << 13)
78# define MRDCKD0_SLEEP (1 << 14)
79# define MRDCKD1_SLEEP (1 << 15)
80# define MRDCKA0_RESET (1 << 16)
81# define MRDCKA1_RESET (1 << 17)
82# define MRDCKB0_RESET (1 << 18)
83# define MRDCKB1_RESET (1 << 19)
84# define MRDCKC0_RESET (1 << 20)
85# define MRDCKC1_RESET (1 << 21)
86# define MRDCKD0_RESET (1 << 22)
87# define MRDCKD1_RESET (1 << 23)
88# define DLL_READY_READ (1 << 24)
89# define USE_DISPLAY_GAP (1 << 25)
90# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
91# define MPLL_TURNOFF_D2 (1 << 28)
92#define DLL_CNTL 0x64c
93# define MRDCKA0_BYPASS (1 << 24)
94# define MRDCKA1_BYPASS (1 << 25)
95# define MRDCKB0_BYPASS (1 << 26)
96# define MRDCKB1_BYPASS (1 << 27)
97# define MRDCKC0_BYPASS (1 << 28)
98# define MRDCKC1_BYPASS (1 << 29)
99# define MRDCKD0_BYPASS (1 << 30)
100# define MRDCKD1_BYPASS (1 << 31)
101
102#define CG_SPLL_SPREAD_SPECTRUM 0x790
103#define SSEN (1 << 0)
104#define CLK_S(x) ((x) << 4)
105#define CLK_S_MASK (0xfff << 4)
106#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
107#define CLK_V(x) ((x) << 0)
108#define CLK_V_MASK (0x3ffffff << 0)
109
110#define MPLL_SS1 0x85c
111#define CLKV(x) ((x) << 0)
112#define CLKV_MASK (0x3ffffff << 0)
113#define MPLL_SS2 0x860
114#define CLKS(x) ((x) << 0)
115#define CLKS_MASK (0xfff << 0)
116
117#endif
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
new file mode 100644
index 000000000000..9af464d48eaa
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -0,0 +1,2493 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "radeon.h"
27#include "rv770d.h"
28#include "r600_dpm.h"
29#include "rv770_dpm.h"
30#include "cypress_dpm.h"
31#include "atom.h"
32#include <linux/seq_file.h>
33
34#define MC_CG_ARB_FREQ_F0 0x0a
35#define MC_CG_ARB_FREQ_F1 0x0b
36#define MC_CG_ARB_FREQ_F2 0x0c
37#define MC_CG_ARB_FREQ_F3 0x0d
38
39#define MC_CG_SEQ_DRAMCONF_S0 0x05
40#define MC_CG_SEQ_DRAMCONF_S1 0x06
41
42#define PCIE_BUS_CLK 10000
43#define TCLK (PCIE_BUS_CLK / 10)
44
45#define SMC_RAM_END 0xC000
46
47struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps)
48{
49 struct rv7xx_ps *ps = rps->ps_priv;
50
51 return ps;
52}
53
54struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev)
55{
56 struct rv7xx_power_info *pi = rdev->pm.dpm.priv;
57
58 return pi;
59}
60
61struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev)
62{
63 struct evergreen_power_info *pi = rdev->pm.dpm.priv;
64
65 return pi;
66}
67
68static void rv770_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
69 bool enable)
70{
71 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
72 u32 tmp;
73
74 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
75 if (enable) {
76 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
77 tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
78 tmp |= LC_GEN2_EN_STRAP;
79 } else {
80 if (!pi->boot_in_gen2) {
81 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
82 tmp &= ~LC_GEN2_EN_STRAP;
83 }
84 }
85 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
86 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
87 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
88
89}
90
91static void rv770_enable_l0s(struct radeon_device *rdev)
92{
93 u32 tmp;
94
95 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L0S_INACTIVITY_MASK;
96 tmp |= LC_L0S_INACTIVITY(3);
97 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
98}
99
100static void rv770_enable_l1(struct radeon_device *rdev)
101{
102 u32 tmp;
103
104 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL);
105 tmp &= ~LC_L1_INACTIVITY_MASK;
106 tmp |= LC_L1_INACTIVITY(4);
107 tmp &= ~LC_PMI_TO_L1_DIS;
108 tmp &= ~LC_ASPM_TO_L1_DIS;
109 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
110}
111
112static void rv770_enable_pll_sleep_in_l1(struct radeon_device *rdev)
113{
114 u32 tmp;
115
116 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L1_INACTIVITY_MASK;
117 tmp |= LC_L1_INACTIVITY(8);
118 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
119
120 /* NOTE, this is a PCIE indirect reg, not PCIE PORT */
121 tmp = RREG32_PCIE(PCIE_P_CNTL);
122 tmp |= P_PLL_PWRDN_IN_L1L23;
123 tmp &= ~P_PLL_BUF_PDNB;
124 tmp &= ~P_PLL_PDNB;
125 tmp |= P_ALLOW_PRX_FRONTEND_SHUTOFF;
126 WREG32_PCIE(PCIE_P_CNTL, tmp);
127}
128
129static void rv770_gfx_clock_gating_enable(struct radeon_device *rdev,
130 bool enable)
131{
132 if (enable)
133 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
134 else {
135 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
136 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
137 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
138 RREG32(GB_TILING_CONFIG);
139 }
140}
141
142static void rv770_mg_clock_gating_enable(struct radeon_device *rdev,
143 bool enable)
144{
145 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
146
147 if (enable) {
148 u32 mgcg_cgtt_local0;
149
150 if (rdev->family == CHIP_RV770)
151 mgcg_cgtt_local0 = RV770_MGCGTTLOCAL0_DFLT;
152 else
153 mgcg_cgtt_local0 = RV7XX_MGCGTTLOCAL0_DFLT;
154
155 WREG32(CG_CGTT_LOCAL_0, mgcg_cgtt_local0);
156 WREG32(CG_CGTT_LOCAL_1, (RV770_MGCGTTLOCAL1_DFLT & 0xFFFFCFFF));
157
158 if (pi->mgcgtssm)
159 WREG32(CGTS_SM_CTRL_REG, RV770_MGCGCGTSSMCTRL_DFLT);
160 } else {
161 WREG32(CG_CGTT_LOCAL_0, 0xFFFFFFFF);
162 WREG32(CG_CGTT_LOCAL_1, 0xFFFFCFFF);
163 }
164}
165
166void rv770_restore_cgcg(struct radeon_device *rdev)
167{
168 bool dpm_en = false, cg_en = false;
169
170 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
171 dpm_en = true;
172 if (RREG32(SCLK_PWRMGT_CNTL) & DYN_GFX_CLK_OFF_EN)
173 cg_en = true;
174
175 if (dpm_en && !cg_en)
176 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
177}
178
179static void rv770_start_dpm(struct radeon_device *rdev)
180{
181 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
182
183 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
184
185 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
186}
187
188void rv770_stop_dpm(struct radeon_device *rdev)
189{
190 PPSMC_Result result;
191
192 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
193
194 if (result != PPSMC_Result_OK)
195 DRM_ERROR("Could not force DPM to low.\n");
196
197 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
198
199 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
200
201 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
202}
203
204bool rv770_dpm_enabled(struct radeon_device *rdev)
205{
206 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
207 return true;
208 else
209 return false;
210}
211
212void rv770_enable_thermal_protection(struct radeon_device *rdev,
213 bool enable)
214{
215 if (enable)
216 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
217 else
218 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
219}
220
221void rv770_enable_acpi_pm(struct radeon_device *rdev)
222{
223 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
224}
225
226u8 rv770_get_seq_value(struct radeon_device *rdev,
227 struct rv7xx_pl *pl)
228{
229 return (pl->flags & ATOM_PPLIB_R600_FLAGS_LOWPOWER) ?
230 MC_CG_SEQ_DRAMCONF_S0 : MC_CG_SEQ_DRAMCONF_S1;
231}
232
233int rv770_read_smc_soft_register(struct radeon_device *rdev,
234 u16 reg_offset, u32 *value)
235{
236 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
237
238 return rv770_read_smc_sram_dword(rdev,
239 pi->soft_regs_start + reg_offset,
240 value, pi->sram_end);
241}
242
243int rv770_write_smc_soft_register(struct radeon_device *rdev,
244 u16 reg_offset, u32 value)
245{
246 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
247
248 return rv770_write_smc_sram_dword(rdev,
249 pi->soft_regs_start + reg_offset,
250 value, pi->sram_end);
251}
252
253int rv770_populate_smc_t(struct radeon_device *rdev,
254 struct radeon_ps *radeon_state,
255 RV770_SMC_SWSTATE *smc_state)
256{
257 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
258 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
259 int i;
260 int a_n;
261 int a_d;
262 u8 l[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
263 u8 r[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
264 u32 a_t;
265
266 l[0] = 0;
267 r[2] = 100;
268
269 a_n = (int)state->medium.sclk * pi->lmp +
270 (int)state->low.sclk * (R600_AH_DFLT - pi->rlp);
271 a_d = (int)state->low.sclk * (100 - (int)pi->rlp) +
272 (int)state->medium.sclk * pi->lmp;
273
274 l[1] = (u8)(pi->lmp - (int)pi->lmp * a_n / a_d);
275 r[0] = (u8)(pi->rlp + (100 - (int)pi->rlp) * a_n / a_d);
276
277 a_n = (int)state->high.sclk * pi->lhp + (int)state->medium.sclk *
278 (R600_AH_DFLT - pi->rmp);
279 a_d = (int)state->medium.sclk * (100 - (int)pi->rmp) +
280 (int)state->high.sclk * pi->lhp;
281
282 l[2] = (u8)(pi->lhp - (int)pi->lhp * a_n / a_d);
283 r[1] = (u8)(pi->rmp + (100 - (int)pi->rmp) * a_n / a_d);
284
285 for (i = 0; i < (RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1); i++) {
286 a_t = CG_R(r[i] * pi->bsp / 200) | CG_L(l[i] * pi->bsp / 200);
287 smc_state->levels[i].aT = cpu_to_be32(a_t);
288 }
289
290 a_t = CG_R(r[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1] * pi->pbsp / 200) |
291 CG_L(l[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1] * pi->pbsp / 200);
292
293 smc_state->levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1].aT =
294 cpu_to_be32(a_t);
295
296 return 0;
297}
298
299int rv770_populate_smc_sp(struct radeon_device *rdev,
300 struct radeon_ps *radeon_state,
301 RV770_SMC_SWSTATE *smc_state)
302{
303 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
304 int i;
305
306 for (i = 0; i < (RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1); i++)
307 smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
308
309 smc_state->levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1].bSP =
310 cpu_to_be32(pi->psp);
311
312 return 0;
313}
314
315static void rv770_calculate_fractional_mpll_feedback_divider(u32 memory_clock,
316 u32 reference_clock,
317 bool gddr5,
318 struct atom_clock_dividers *dividers,
319 u32 *clkf,
320 u32 *clkfrac)
321{
322 u32 post_divider, reference_divider, feedback_divider8;
323 u32 fyclk;
324
325 if (gddr5)
326 fyclk = (memory_clock * 8) / 2;
327 else
328 fyclk = (memory_clock * 4) / 2;
329
330 post_divider = dividers->post_div;
331 reference_divider = dividers->ref_div;
332
333 feedback_divider8 =
334 (8 * fyclk * reference_divider * post_divider) / reference_clock;
335
336 *clkf = feedback_divider8 / 8;
337 *clkfrac = feedback_divider8 % 8;
338}
339
340static int rv770_encode_yclk_post_div(u32 postdiv, u32 *encoded_postdiv)
341{
342 int ret = 0;
343
344 switch (postdiv) {
345 case 1:
346 *encoded_postdiv = 0;
347 break;
348 case 2:
349 *encoded_postdiv = 1;
350 break;
351 case 4:
352 *encoded_postdiv = 2;
353 break;
354 case 8:
355 *encoded_postdiv = 3;
356 break;
357 case 16:
358 *encoded_postdiv = 4;
359 break;
360 default:
361 ret = -EINVAL;
362 break;
363 }
364
365 return ret;
366}
367
368u32 rv770_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf)
369{
370 if (clkf <= 0x10)
371 return 0x4B;
372 if (clkf <= 0x19)
373 return 0x5B;
374 if (clkf <= 0x21)
375 return 0x2B;
376 if (clkf <= 0x27)
377 return 0x6C;
378 if (clkf <= 0x31)
379 return 0x9D;
380 return 0xC6;
381}
382
383static int rv770_populate_mclk_value(struct radeon_device *rdev,
384 u32 engine_clock, u32 memory_clock,
385 RV7XX_SMC_MCLK_VALUE *mclk)
386{
387 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
388 u8 encoded_reference_dividers[] = { 0, 16, 17, 20, 21 };
389 u32 mpll_ad_func_cntl =
390 pi->clk_regs.rv770.mpll_ad_func_cntl;
391 u32 mpll_ad_func_cntl_2 =
392 pi->clk_regs.rv770.mpll_ad_func_cntl_2;
393 u32 mpll_dq_func_cntl =
394 pi->clk_regs.rv770.mpll_dq_func_cntl;
395 u32 mpll_dq_func_cntl_2 =
396 pi->clk_regs.rv770.mpll_dq_func_cntl_2;
397 u32 mclk_pwrmgt_cntl =
398 pi->clk_regs.rv770.mclk_pwrmgt_cntl;
399 u32 dll_cntl = pi->clk_regs.rv770.dll_cntl;
400 struct atom_clock_dividers dividers;
401 u32 reference_clock = rdev->clock.mpll.reference_freq;
402 u32 clkf, clkfrac;
403 u32 postdiv_yclk;
404 u32 ibias;
405 int ret;
406
407 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
408 memory_clock, false, &dividers);
409 if (ret)
410 return ret;
411
412 if ((dividers.ref_div < 1) || (dividers.ref_div > 5))
413 return -EINVAL;
414
415 rv770_calculate_fractional_mpll_feedback_divider(memory_clock, reference_clock,
416 pi->mem_gddr5,
417 &dividers, &clkf, &clkfrac);
418
419 ret = rv770_encode_yclk_post_div(dividers.post_div, &postdiv_yclk);
420 if (ret)
421 return ret;
422
423 ibias = rv770_map_clkf_to_ibias(rdev, clkf);
424
425 mpll_ad_func_cntl &= ~(CLKR_MASK |
426 YCLK_POST_DIV_MASK |
427 CLKF_MASK |
428 CLKFRAC_MASK |
429 IBIAS_MASK);
430 mpll_ad_func_cntl |= CLKR(encoded_reference_dividers[dividers.ref_div - 1]);
431 mpll_ad_func_cntl |= YCLK_POST_DIV(postdiv_yclk);
432 mpll_ad_func_cntl |= CLKF(clkf);
433 mpll_ad_func_cntl |= CLKFRAC(clkfrac);
434 mpll_ad_func_cntl |= IBIAS(ibias);
435
436 if (dividers.vco_mode)
437 mpll_ad_func_cntl_2 |= VCO_MODE;
438 else
439 mpll_ad_func_cntl_2 &= ~VCO_MODE;
440
441 if (pi->mem_gddr5) {
442 rv770_calculate_fractional_mpll_feedback_divider(memory_clock,
443 reference_clock,
444 pi->mem_gddr5,
445 &dividers, &clkf, &clkfrac);
446
447 ibias = rv770_map_clkf_to_ibias(rdev, clkf);
448
449 ret = rv770_encode_yclk_post_div(dividers.post_div, &postdiv_yclk);
450 if (ret)
451 return ret;
452
453 mpll_dq_func_cntl &= ~(CLKR_MASK |
454 YCLK_POST_DIV_MASK |
455 CLKF_MASK |
456 CLKFRAC_MASK |
457 IBIAS_MASK);
458 mpll_dq_func_cntl |= CLKR(encoded_reference_dividers[dividers.ref_div - 1]);
459 mpll_dq_func_cntl |= YCLK_POST_DIV(postdiv_yclk);
460 mpll_dq_func_cntl |= CLKF(clkf);
461 mpll_dq_func_cntl |= CLKFRAC(clkfrac);
462 mpll_dq_func_cntl |= IBIAS(ibias);
463
464 if (dividers.vco_mode)
465 mpll_dq_func_cntl_2 |= VCO_MODE;
466 else
467 mpll_dq_func_cntl_2 &= ~VCO_MODE;
468 }
469
470 mclk->mclk770.mclk_value = cpu_to_be32(memory_clock);
471 mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
472 mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
473 mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
474 mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
475 mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
476 mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
477
478 return 0;
479}
480
481static int rv770_populate_sclk_value(struct radeon_device *rdev,
482 u32 engine_clock,
483 RV770_SMC_SCLK_VALUE *sclk)
484{
485 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
486 struct atom_clock_dividers dividers;
487 u32 spll_func_cntl =
488 pi->clk_regs.rv770.cg_spll_func_cntl;
489 u32 spll_func_cntl_2 =
490 pi->clk_regs.rv770.cg_spll_func_cntl_2;
491 u32 spll_func_cntl_3 =
492 pi->clk_regs.rv770.cg_spll_func_cntl_3;
493 u32 cg_spll_spread_spectrum =
494 pi->clk_regs.rv770.cg_spll_spread_spectrum;
495 u32 cg_spll_spread_spectrum_2 =
496 pi->clk_regs.rv770.cg_spll_spread_spectrum_2;
497 u64 tmp;
498 u32 reference_clock = rdev->clock.spll.reference_freq;
499 u32 reference_divider, post_divider;
500 u32 fbdiv;
501 int ret;
502
503 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
504 engine_clock, false, &dividers);
505 if (ret)
506 return ret;
507
508 reference_divider = 1 + dividers.ref_div;
509
510 if (dividers.enable_post_div)
511 post_divider = (0x0f & (dividers.post_div >> 4)) + (0x0f & dividers.post_div) + 2;
512 else
513 post_divider = 1;
514
515 tmp = (u64) engine_clock * reference_divider * post_divider * 16384;
516 do_div(tmp, reference_clock);
517 fbdiv = (u32) tmp;
518
519 if (dividers.enable_post_div)
520 spll_func_cntl |= SPLL_DIVEN;
521 else
522 spll_func_cntl &= ~SPLL_DIVEN;
523 spll_func_cntl &= ~(SPLL_HILEN_MASK | SPLL_LOLEN_MASK | SPLL_REF_DIV_MASK);
524 spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
525 spll_func_cntl |= SPLL_HILEN((dividers.post_div >> 4) & 0xf);
526 spll_func_cntl |= SPLL_LOLEN(dividers.post_div & 0xf);
527
528 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
529 spll_func_cntl_2 |= SCLK_MUX_SEL(2);
530
531 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
532 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
533 spll_func_cntl_3 |= SPLL_DITHEN;
534
535 if (pi->sclk_ss) {
536 struct radeon_atom_ss ss;
537 u32 vco_freq = engine_clock * post_divider;
538
539 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
540 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
541 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
542 u32 clk_v = ss.percentage * fbdiv / (clk_s * 10000);
543
544 cg_spll_spread_spectrum &= ~CLKS_MASK;
545 cg_spll_spread_spectrum |= CLKS(clk_s);
546 cg_spll_spread_spectrum |= SSEN;
547
548 cg_spll_spread_spectrum_2 &= ~CLKV_MASK;
549 cg_spll_spread_spectrum_2 |= CLKV(clk_v);
550 }
551 }
552
553 sclk->sclk_value = cpu_to_be32(engine_clock);
554 sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
555 sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
556 sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
557 sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(cg_spll_spread_spectrum);
558 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(cg_spll_spread_spectrum_2);
559
560 return 0;
561}
562
563int rv770_populate_vddc_value(struct radeon_device *rdev, u16 vddc,
564 RV770_SMC_VOLTAGE_VALUE *voltage)
565{
566 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
567 int i;
568
569 if (!pi->voltage_control) {
570 voltage->index = 0;
571 voltage->value = 0;
572 return 0;
573 }
574
575 for (i = 0; i < pi->valid_vddc_entries; i++) {
576 if (vddc <= pi->vddc_table[i].vddc) {
577 voltage->index = pi->vddc_table[i].vddc_index;
578 voltage->value = cpu_to_be16(vddc);
579 break;
580 }
581 }
582
583 if (i == pi->valid_vddc_entries)
584 return -EINVAL;
585
586 return 0;
587}
588
589int rv770_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
590 RV770_SMC_VOLTAGE_VALUE *voltage)
591{
592 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
593
594 if (!pi->mvdd_control) {
595 voltage->index = MVDD_HIGH_INDEX;
596 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
597 return 0;
598 }
599
600 if (mclk <= pi->mvdd_split_frequency) {
601 voltage->index = MVDD_LOW_INDEX;
602 voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
603 } else {
604 voltage->index = MVDD_HIGH_INDEX;
605 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
606 }
607
608 return 0;
609}
610
611static int rv770_convert_power_level_to_smc(struct radeon_device *rdev,
612 struct rv7xx_pl *pl,
613 RV770_SMC_HW_PERFORMANCE_LEVEL *level,
614 u8 watermark_level)
615{
616 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
617 int ret;
618
619 level->gen2PCIE = pi->pcie_gen2 ?
620 ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
621 level->gen2XSP = (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0;
622 level->backbias = (pl->flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? 1 : 0;
623 level->displayWatermark = watermark_level;
624
625 if (rdev->family == CHIP_RV740)
626 ret = rv740_populate_sclk_value(rdev, pl->sclk,
627 &level->sclk);
628 else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
629 ret = rv730_populate_sclk_value(rdev, pl->sclk,
630 &level->sclk);
631 else
632 ret = rv770_populate_sclk_value(rdev, pl->sclk,
633 &level->sclk);
634 if (ret)
635 return ret;
636
637 if (rdev->family == CHIP_RV740) {
638 if (pi->mem_gddr5) {
639 if (pl->mclk <= pi->mclk_strobe_mode_threshold)
640 level->strobeMode =
641 rv740_get_mclk_frequency_ratio(pl->mclk) | 0x10;
642 else
643 level->strobeMode = 0;
644
645 if (pl->mclk > pi->mclk_edc_enable_threshold)
646 level->mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
647 else
648 level->mcFlags = 0;
649 }
650 ret = rv740_populate_mclk_value(rdev, pl->sclk,
651 pl->mclk, &level->mclk);
652 } else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
653 ret = rv730_populate_mclk_value(rdev, pl->sclk,
654 pl->mclk, &level->mclk);
655 else
656 ret = rv770_populate_mclk_value(rdev, pl->sclk,
657 pl->mclk, &level->mclk);
658 if (ret)
659 return ret;
660
661 ret = rv770_populate_vddc_value(rdev, pl->vddc,
662 &level->vddc);
663 if (ret)
664 return ret;
665
666 ret = rv770_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
667
668 return ret;
669}
670
671static int rv770_convert_power_state_to_smc(struct radeon_device *rdev,
672 struct radeon_ps *radeon_state,
673 RV770_SMC_SWSTATE *smc_state)
674{
675 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
676 int ret;
677
678 if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
679 smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
680
681 ret = rv770_convert_power_level_to_smc(rdev,
682 &state->low,
683 &smc_state->levels[0],
684 PPSMC_DISPLAY_WATERMARK_LOW);
685 if (ret)
686 return ret;
687
688 ret = rv770_convert_power_level_to_smc(rdev,
689 &state->medium,
690 &smc_state->levels[1],
691 PPSMC_DISPLAY_WATERMARK_LOW);
692 if (ret)
693 return ret;
694
695 ret = rv770_convert_power_level_to_smc(rdev,
696 &state->high,
697 &smc_state->levels[2],
698 PPSMC_DISPLAY_WATERMARK_HIGH);
699 if (ret)
700 return ret;
701
702 smc_state->levels[0].arbValue = MC_CG_ARB_FREQ_F1;
703 smc_state->levels[1].arbValue = MC_CG_ARB_FREQ_F2;
704 smc_state->levels[2].arbValue = MC_CG_ARB_FREQ_F3;
705
706 smc_state->levels[0].seqValue = rv770_get_seq_value(rdev,
707 &state->low);
708 smc_state->levels[1].seqValue = rv770_get_seq_value(rdev,
709 &state->medium);
710 smc_state->levels[2].seqValue = rv770_get_seq_value(rdev,
711 &state->high);
712
713 rv770_populate_smc_sp(rdev, radeon_state, smc_state);
714
715 return rv770_populate_smc_t(rdev, radeon_state, smc_state);
716
717}
718
719u32 rv770_calculate_memory_refresh_rate(struct radeon_device *rdev,
720 u32 engine_clock)
721{
722 u32 dram_rows;
723 u32 dram_refresh_rate;
724 u32 mc_arb_rfsh_rate;
725 u32 tmp;
726
727 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
728 dram_rows = 1 << (tmp + 10);
729 tmp = RREG32(MC_SEQ_MISC0) & 3;
730 dram_refresh_rate = 1 << (tmp + 3);
731 mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
732
733 return mc_arb_rfsh_rate;
734}
735
736static void rv770_program_memory_timing_parameters(struct radeon_device *rdev,
737 struct radeon_ps *radeon_state)
738{
739 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
740 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
741 u32 sqm_ratio;
742 u32 arb_refresh_rate;
743 u32 high_clock;
744
745 if (state->high.sclk < (state->low.sclk * 0xFF / 0x40))
746 high_clock = state->high.sclk;
747 else
748 high_clock = (state->low.sclk * 0xFF / 0x40);
749
750 radeon_atom_set_engine_dram_timings(rdev, high_clock,
751 state->high.mclk);
752
753 sqm_ratio =
754 STATE0(64 * high_clock / pi->boot_sclk) |
755 STATE1(64 * high_clock / state->low.sclk) |
756 STATE2(64 * high_clock / state->medium.sclk) |
757 STATE3(64 * high_clock / state->high.sclk);
758 WREG32(MC_ARB_SQM_RATIO, sqm_ratio);
759
760 arb_refresh_rate =
761 POWERMODE0(rv770_calculate_memory_refresh_rate(rdev, pi->boot_sclk)) |
762 POWERMODE1(rv770_calculate_memory_refresh_rate(rdev, state->low.sclk)) |
763 POWERMODE2(rv770_calculate_memory_refresh_rate(rdev, state->medium.sclk)) |
764 POWERMODE3(rv770_calculate_memory_refresh_rate(rdev, state->high.sclk));
765 WREG32(MC_ARB_RFSH_RATE, arb_refresh_rate);
766}
767
768void rv770_enable_backbias(struct radeon_device *rdev,
769 bool enable)
770{
771 if (enable)
772 WREG32_P(GENERAL_PWRMGT, BACKBIAS_PAD_EN, ~BACKBIAS_PAD_EN);
773 else
774 WREG32_P(GENERAL_PWRMGT, 0, ~(BACKBIAS_VALUE | BACKBIAS_PAD_EN));
775}
776
777static void rv770_enable_spread_spectrum(struct radeon_device *rdev,
778 bool enable)
779{
780 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
781
782 if (enable) {
783 if (pi->sclk_ss)
784 WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
785
786 if (pi->mclk_ss) {
787 if (rdev->family == CHIP_RV740)
788 rv740_enable_mclk_spread_spectrum(rdev, true);
789 }
790 } else {
791 WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
792
793 WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
794
795 WREG32_P(CG_MPLL_SPREAD_SPECTRUM, 0, ~SSEN);
796
797 if (rdev->family == CHIP_RV740)
798 rv740_enable_mclk_spread_spectrum(rdev, false);
799 }
800}
801
802static void rv770_program_mpll_timing_parameters(struct radeon_device *rdev)
803{
804 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
805
806 if ((rdev->family == CHIP_RV770) && !pi->mem_gddr5) {
807 WREG32(MPLL_TIME,
808 (MPLL_LOCK_TIME(R600_MPLLLOCKTIME_DFLT * pi->ref_div) |
809 MPLL_RESET_TIME(R600_MPLLRESETTIME_DFLT)));
810 }
811}
812
813void rv770_setup_bsp(struct radeon_device *rdev)
814{
815 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
816 u32 xclk = radeon_get_xclk(rdev);
817
818 r600_calculate_u_and_p(pi->asi,
819 xclk,
820 16,
821 &pi->bsp,
822 &pi->bsu);
823
824 r600_calculate_u_and_p(pi->pasi,
825 xclk,
826 16,
827 &pi->pbsp,
828 &pi->pbsu);
829
830 pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
831 pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
832
833 WREG32(CG_BSP, pi->dsp);
834
835}
836
837void rv770_program_git(struct radeon_device *rdev)
838{
839 WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
840}
841
842void rv770_program_tp(struct radeon_device *rdev)
843{
844 int i;
845 enum r600_td td = R600_TD_DFLT;
846
847 for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
848 WREG32(CG_FFCT_0 + (i * 4), (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
849
850 if (td == R600_TD_AUTO)
851 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
852 else
853 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
854 if (td == R600_TD_UP)
855 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
856 if (td == R600_TD_DOWN)
857 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
858}
859
860void rv770_program_tpp(struct radeon_device *rdev)
861{
862 WREG32(CG_TPC, R600_TPC_DFLT);
863}
864
865void rv770_program_sstp(struct radeon_device *rdev)
866{
867 WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
868}
869
870void rv770_program_engine_speed_parameters(struct radeon_device *rdev)
871{
872 WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
873}
874
875static void rv770_enable_display_gap(struct radeon_device *rdev)
876{
877 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
878
879 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
880 tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
881 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
882 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
883}
884
885void rv770_program_vc(struct radeon_device *rdev)
886{
887 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
888
889 WREG32(CG_FTV, pi->vrc);
890}
891
892void rv770_clear_vc(struct radeon_device *rdev)
893{
894 WREG32(CG_FTV, 0);
895}
896
897int rv770_upload_firmware(struct radeon_device *rdev)
898{
899 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
900 int ret;
901
902 rv770_reset_smc(rdev);
903 rv770_stop_smc_clock(rdev);
904
905 ret = rv770_load_smc_ucode(rdev, pi->sram_end);
906 if (ret)
907 return ret;
908
909 return 0;
910}
911
912static int rv770_populate_smc_acpi_state(struct radeon_device *rdev,
913 RV770_SMC_STATETABLE *table)
914{
915 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
916
917 u32 mpll_ad_func_cntl =
918 pi->clk_regs.rv770.mpll_ad_func_cntl;
919 u32 mpll_ad_func_cntl_2 =
920 pi->clk_regs.rv770.mpll_ad_func_cntl_2;
921 u32 mpll_dq_func_cntl =
922 pi->clk_regs.rv770.mpll_dq_func_cntl;
923 u32 mpll_dq_func_cntl_2 =
924 pi->clk_regs.rv770.mpll_dq_func_cntl_2;
925 u32 spll_func_cntl =
926 pi->clk_regs.rv770.cg_spll_func_cntl;
927 u32 spll_func_cntl_2 =
928 pi->clk_regs.rv770.cg_spll_func_cntl_2;
929 u32 spll_func_cntl_3 =
930 pi->clk_regs.rv770.cg_spll_func_cntl_3;
931 u32 mclk_pwrmgt_cntl;
932 u32 dll_cntl;
933
934 table->ACPIState = table->initialState;
935
936 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
937
938 if (pi->acpi_vddc) {
939 rv770_populate_vddc_value(rdev, pi->acpi_vddc,
940 &table->ACPIState.levels[0].vddc);
941 if (pi->pcie_gen2) {
942 if (pi->acpi_pcie_gen2)
943 table->ACPIState.levels[0].gen2PCIE = 1;
944 else
945 table->ACPIState.levels[0].gen2PCIE = 0;
946 } else
947 table->ACPIState.levels[0].gen2PCIE = 0;
948 if (pi->acpi_pcie_gen2)
949 table->ACPIState.levels[0].gen2XSP = 1;
950 else
951 table->ACPIState.levels[0].gen2XSP = 0;
952 } else {
953 rv770_populate_vddc_value(rdev, pi->min_vddc_in_table,
954 &table->ACPIState.levels[0].vddc);
955 table->ACPIState.levels[0].gen2PCIE = 0;
956 }
957
958
959 mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
960
961 mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
962
963 mclk_pwrmgt_cntl = (MRDCKA0_RESET |
964 MRDCKA1_RESET |
965 MRDCKB0_RESET |
966 MRDCKB1_RESET |
967 MRDCKC0_RESET |
968 MRDCKC1_RESET |
969 MRDCKD0_RESET |
970 MRDCKD1_RESET);
971
972 dll_cntl = 0xff000000;
973
974 spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN;
975
976 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
977 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
978
979 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
980 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
981 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
982 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
983
984 table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
985 table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
986
987 table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
988
989 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
990 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
991 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
992
993 table->ACPIState.levels[0].sclk.sclk_value = 0;
994
995 rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
996
997 table->ACPIState.levels[1] = table->ACPIState.levels[0];
998 table->ACPIState.levels[2] = table->ACPIState.levels[0];
999
1000 return 0;
1001}
1002
1003int rv770_populate_initial_mvdd_value(struct radeon_device *rdev,
1004 RV770_SMC_VOLTAGE_VALUE *voltage)
1005{
1006 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1007
1008 if ((pi->s0_vid_lower_smio_cntl & pi->mvdd_mask_low) ==
1009 (pi->mvdd_low_smio[MVDD_LOW_INDEX] & pi->mvdd_mask_low) ) {
1010 voltage->index = MVDD_LOW_INDEX;
1011 voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
1012 } else {
1013 voltage->index = MVDD_HIGH_INDEX;
1014 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1015 }
1016
1017 return 0;
1018}
1019
1020static int rv770_populate_smc_initial_state(struct radeon_device *rdev,
1021 struct radeon_ps *radeon_state,
1022 RV770_SMC_STATETABLE *table)
1023{
1024 struct rv7xx_ps *initial_state = rv770_get_ps(radeon_state);
1025 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1026 u32 a_t;
1027
1028 table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
1029 cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl);
1030 table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
1031 cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl_2);
1032 table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
1033 cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl);
1034 table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
1035 cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl_2);
1036 table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
1037 cpu_to_be32(pi->clk_regs.rv770.mclk_pwrmgt_cntl);
1038 table->initialState.levels[0].mclk.mclk770.vDLL_CNTL =
1039 cpu_to_be32(pi->clk_regs.rv770.dll_cntl);
1040
1041 table->initialState.levels[0].mclk.mclk770.vMPLL_SS =
1042 cpu_to_be32(pi->clk_regs.rv770.mpll_ss1);
1043 table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 =
1044 cpu_to_be32(pi->clk_regs.rv770.mpll_ss2);
1045
1046 table->initialState.levels[0].mclk.mclk770.mclk_value =
1047 cpu_to_be32(initial_state->low.mclk);
1048
1049 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
1050 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl);
1051 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
1052 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_2);
1053 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
1054 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_3);
1055 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
1056 cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum);
1057 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
1058 cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum_2);
1059
1060 table->initialState.levels[0].sclk.sclk_value =
1061 cpu_to_be32(initial_state->low.sclk);
1062
1063 table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
1064
1065 table->initialState.levels[0].seqValue =
1066 rv770_get_seq_value(rdev, &initial_state->low);
1067
1068 rv770_populate_vddc_value(rdev,
1069 initial_state->low.vddc,
1070 &table->initialState.levels[0].vddc);
1071 rv770_populate_initial_mvdd_value(rdev,
1072 &table->initialState.levels[0].mvdd);
1073
1074 a_t = CG_R(0xffff) | CG_L(0);
1075 table->initialState.levels[0].aT = cpu_to_be32(a_t);
1076
1077 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
1078
1079 if (pi->boot_in_gen2)
1080 table->initialState.levels[0].gen2PCIE = 1;
1081 else
1082 table->initialState.levels[0].gen2PCIE = 0;
1083 if (initial_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
1084 table->initialState.levels[0].gen2XSP = 1;
1085 else
1086 table->initialState.levels[0].gen2XSP = 0;
1087
1088 if (rdev->family == CHIP_RV740) {
1089 if (pi->mem_gddr5) {
1090 if (initial_state->low.mclk <= pi->mclk_strobe_mode_threshold)
1091 table->initialState.levels[0].strobeMode =
1092 rv740_get_mclk_frequency_ratio(initial_state->low.mclk) | 0x10;
1093 else
1094 table->initialState.levels[0].strobeMode = 0;
1095
1096 if (initial_state->low.mclk >= pi->mclk_edc_enable_threshold)
1097 table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
1098 else
1099 table->initialState.levels[0].mcFlags = 0;
1100 }
1101 }
1102
1103 table->initialState.levels[1] = table->initialState.levels[0];
1104 table->initialState.levels[2] = table->initialState.levels[0];
1105
1106 table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
1107
1108 return 0;
1109}
1110
1111static int rv770_populate_smc_vddc_table(struct radeon_device *rdev,
1112 RV770_SMC_STATETABLE *table)
1113{
1114 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1115 int i;
1116
1117 for (i = 0; i < pi->valid_vddc_entries; i++) {
1118 table->highSMIO[pi->vddc_table[i].vddc_index] =
1119 pi->vddc_table[i].high_smio;
1120 table->lowSMIO[pi->vddc_table[i].vddc_index] =
1121 cpu_to_be32(pi->vddc_table[i].low_smio);
1122 }
1123
1124 table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0;
1125 table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] =
1126 cpu_to_be32(pi->vddc_mask_low);
1127
1128 for (i = 0;
1129 ((i < pi->valid_vddc_entries) &&
1130 (pi->max_vddc_in_table >
1131 pi->vddc_table[i].vddc));
1132 i++);
1133
1134 table->maxVDDCIndexInPPTable =
1135 pi->vddc_table[i].vddc_index;
1136
1137 return 0;
1138}
1139
1140static int rv770_populate_smc_mvdd_table(struct radeon_device *rdev,
1141 RV770_SMC_STATETABLE *table)
1142{
1143 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1144
1145 if (pi->mvdd_control) {
1146 table->lowSMIO[MVDD_HIGH_INDEX] |=
1147 cpu_to_be32(pi->mvdd_low_smio[MVDD_HIGH_INDEX]);
1148 table->lowSMIO[MVDD_LOW_INDEX] |=
1149 cpu_to_be32(pi->mvdd_low_smio[MVDD_LOW_INDEX]);
1150
1151 table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_MVDD] = 0;
1152 table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_MVDD] =
1153 cpu_to_be32(pi->mvdd_mask_low);
1154 }
1155
1156 return 0;
1157}
1158
1159static int rv770_init_smc_table(struct radeon_device *rdev,
1160 struct radeon_ps *radeon_boot_state)
1161{
1162 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1163 struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1164 RV770_SMC_STATETABLE *table = &pi->smc_statetable;
1165 int ret;
1166
1167 memset(table, 0, sizeof(RV770_SMC_STATETABLE));
1168
1169 pi->boot_sclk = boot_state->low.sclk;
1170
1171 rv770_populate_smc_vddc_table(rdev, table);
1172 rv770_populate_smc_mvdd_table(rdev, table);
1173
1174 switch (rdev->pm.int_thermal_type) {
1175 case THERMAL_TYPE_RV770:
1176 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
1177 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
1178 break;
1179 case THERMAL_TYPE_NONE:
1180 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
1181 break;
1182 case THERMAL_TYPE_EXTERNAL_GPIO:
1183 default:
1184 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
1185 break;
1186 }
1187
1188 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) {
1189 table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1190
1191 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT)
1192 table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK;
1193
1194 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT)
1195 table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE;
1196 }
1197
1198 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1199 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1200
1201 if (pi->mem_gddr5)
1202 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1203
1204 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1205 ret = rv730_populate_smc_initial_state(rdev, radeon_boot_state, table);
1206 else
1207 ret = rv770_populate_smc_initial_state(rdev, radeon_boot_state, table);
1208 if (ret)
1209 return ret;
1210
1211 if (rdev->family == CHIP_RV740)
1212 ret = rv740_populate_smc_acpi_state(rdev, table);
1213 else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1214 ret = rv730_populate_smc_acpi_state(rdev, table);
1215 else
1216 ret = rv770_populate_smc_acpi_state(rdev, table);
1217 if (ret)
1218 return ret;
1219
1220 table->driverState = table->initialState;
1221
1222 return rv770_copy_bytes_to_smc(rdev,
1223 pi->state_table_start,
1224 (const u8 *)table,
1225 sizeof(RV770_SMC_STATETABLE),
1226 pi->sram_end);
1227}
1228
1229static int rv770_construct_vddc_table(struct radeon_device *rdev)
1230{
1231 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1232 u16 min, max, step;
1233 u32 steps = 0;
1234 u8 vddc_index = 0;
1235 u32 i;
1236
1237 radeon_atom_get_min_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &min);
1238 radeon_atom_get_max_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &max);
1239 radeon_atom_get_voltage_step(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &step);
1240
1241 steps = (max - min) / step + 1;
1242
1243 if (steps > MAX_NO_VREG_STEPS)
1244 return -EINVAL;
1245
1246 for (i = 0; i < steps; i++) {
1247 u32 gpio_pins, gpio_mask;
1248
1249 pi->vddc_table[i].vddc = (u16)(min + i * step);
1250 radeon_atom_get_voltage_gpio_settings(rdev,
1251 pi->vddc_table[i].vddc,
1252 SET_VOLTAGE_TYPE_ASIC_VDDC,
1253 &gpio_pins, &gpio_mask);
1254 pi->vddc_table[i].low_smio = gpio_pins & gpio_mask;
1255 pi->vddc_table[i].high_smio = 0;
1256 pi->vddc_mask_low = gpio_mask;
1257 if (i > 0) {
1258 if ((pi->vddc_table[i].low_smio !=
1259 pi->vddc_table[i - 1].low_smio ) ||
1260 (pi->vddc_table[i].high_smio !=
1261 pi->vddc_table[i - 1].high_smio))
1262 vddc_index++;
1263 }
1264 pi->vddc_table[i].vddc_index = vddc_index;
1265 }
1266
1267 pi->valid_vddc_entries = (u8)steps;
1268
1269 return 0;
1270}
1271
1272static u32 rv770_get_mclk_split_point(struct atom_memory_info *memory_info)
1273{
1274 if (memory_info->mem_type == MEM_TYPE_GDDR3)
1275 return 30000;
1276
1277 return 0;
1278}
1279
1280static int rv770_get_mvdd_pin_configuration(struct radeon_device *rdev)
1281{
1282 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1283 u32 gpio_pins, gpio_mask;
1284
1285 radeon_atom_get_voltage_gpio_settings(rdev,
1286 MVDD_HIGH_VALUE, SET_VOLTAGE_TYPE_ASIC_MVDDC,
1287 &gpio_pins, &gpio_mask);
1288 pi->mvdd_mask_low = gpio_mask;
1289 pi->mvdd_low_smio[MVDD_HIGH_INDEX] =
1290 gpio_pins & gpio_mask;
1291
1292 radeon_atom_get_voltage_gpio_settings(rdev,
1293 MVDD_LOW_VALUE, SET_VOLTAGE_TYPE_ASIC_MVDDC,
1294 &gpio_pins, &gpio_mask);
1295 pi->mvdd_low_smio[MVDD_LOW_INDEX] =
1296 gpio_pins & gpio_mask;
1297
1298 return 0;
1299}
1300
1301u8 rv770_get_memory_module_index(struct radeon_device *rdev)
1302{
1303 return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
1304}
1305
1306static int rv770_get_mvdd_configuration(struct radeon_device *rdev)
1307{
1308 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1309 u8 memory_module_index;
1310 struct atom_memory_info memory_info;
1311
1312 memory_module_index = rv770_get_memory_module_index(rdev);
1313
1314 if (radeon_atom_get_memory_info(rdev, memory_module_index, &memory_info)) {
1315 pi->mvdd_control = false;
1316 return 0;
1317 }
1318
1319 pi->mvdd_split_frequency =
1320 rv770_get_mclk_split_point(&memory_info);
1321
1322 if (pi->mvdd_split_frequency == 0) {
1323 pi->mvdd_control = false;
1324 return 0;
1325 }
1326
1327 return rv770_get_mvdd_pin_configuration(rdev);
1328}
1329
1330void rv770_enable_voltage_control(struct radeon_device *rdev,
1331 bool enable)
1332{
1333 if (enable)
1334 WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
1335 else
1336 WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
1337}
1338
1339static void rv770_program_display_gap(struct radeon_device *rdev)
1340{
1341 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
1342
1343 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
1344 if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) {
1345 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1346 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1347 } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) {
1348 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1349 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1350 } else {
1351 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1352 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1353 }
1354 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
1355}
1356
1357static void rv770_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
1358 bool enable)
1359{
1360 rv770_enable_bif_dynamic_pcie_gen2(rdev, enable);
1361
1362 if (enable)
1363 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
1364 else
1365 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
1366}
1367
1368static void r7xx_program_memory_timing_parameters(struct radeon_device *rdev,
1369 struct radeon_ps *radeon_new_state)
1370{
1371 if ((rdev->family == CHIP_RV730) ||
1372 (rdev->family == CHIP_RV710) ||
1373 (rdev->family == CHIP_RV740))
1374 rv730_program_memory_timing_parameters(rdev, radeon_new_state);
1375 else
1376 rv770_program_memory_timing_parameters(rdev, radeon_new_state);
1377}
1378
1379static int rv770_upload_sw_state(struct radeon_device *rdev,
1380 struct radeon_ps *radeon_new_state)
1381{
1382 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1383 u16 address = pi->state_table_start +
1384 offsetof(RV770_SMC_STATETABLE, driverState);
1385 RV770_SMC_SWSTATE state = { 0 };
1386 int ret;
1387
1388 ret = rv770_convert_power_state_to_smc(rdev, radeon_new_state, &state);
1389 if (ret)
1390 return ret;
1391
1392 return rv770_copy_bytes_to_smc(rdev, address, (const u8 *)&state,
1393 sizeof(RV770_SMC_SWSTATE),
1394 pi->sram_end);
1395}
1396
1397int rv770_halt_smc(struct radeon_device *rdev)
1398{
1399 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
1400 return -EINVAL;
1401
1402 if (rv770_wait_for_smc_inactive(rdev) != PPSMC_Result_OK)
1403 return -EINVAL;
1404
1405 return 0;
1406}
1407
1408int rv770_resume_smc(struct radeon_device *rdev)
1409{
1410 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_Resume) != PPSMC_Result_OK)
1411 return -EINVAL;
1412 return 0;
1413}
1414
1415int rv770_set_sw_state(struct radeon_device *rdev)
1416{
1417 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK)
1418 return -EINVAL;
1419 return 0;
1420}
1421
1422int rv770_set_boot_state(struct radeon_device *rdev)
1423{
1424 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToInitialState) != PPSMC_Result_OK)
1425 return -EINVAL;
1426 return 0;
1427}
1428
1429void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
1430 struct radeon_ps *new_ps,
1431 struct radeon_ps *old_ps)
1432{
1433 struct rv7xx_ps *new_state = rv770_get_ps(new_ps);
1434 struct rv7xx_ps *current_state = rv770_get_ps(old_ps);
1435
1436 if ((new_ps->vclk == old_ps->vclk) &&
1437 (new_ps->dclk == old_ps->dclk))
1438 return;
1439
1440 if (new_state->high.sclk >= current_state->high.sclk)
1441 return;
1442
1443 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
1444}
1445
1446void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
1447 struct radeon_ps *new_ps,
1448 struct radeon_ps *old_ps)
1449{
1450 struct rv7xx_ps *new_state = rv770_get_ps(new_ps);
1451 struct rv7xx_ps *current_state = rv770_get_ps(old_ps);
1452
1453 if ((new_ps->vclk == old_ps->vclk) &&
1454 (new_ps->dclk == old_ps->dclk))
1455 return;
1456
1457 if (new_state->high.sclk < current_state->high.sclk)
1458 return;
1459
1460 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
1461}
1462
1463int rv770_restrict_performance_levels_before_switch(struct radeon_device *rdev)
1464{
1465 if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_NoForcedLevel)) != PPSMC_Result_OK)
1466 return -EINVAL;
1467
1468 if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_TwoLevelsDisabled)) != PPSMC_Result_OK)
1469 return -EINVAL;
1470
1471 return 0;
1472}
1473
1474int rv770_unrestrict_performance_levels_after_switch(struct radeon_device *rdev)
1475{
1476 if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_NoForcedLevel)) != PPSMC_Result_OK)
1477 return -EINVAL;
1478
1479 if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_ZeroLevelsDisabled)) != PPSMC_Result_OK)
1480 return -EINVAL;
1481
1482 return 0;
1483}
1484
1485void r7xx_start_smc(struct radeon_device *rdev)
1486{
1487 rv770_start_smc(rdev);
1488 rv770_start_smc_clock(rdev);
1489}
1490
1491
1492void r7xx_stop_smc(struct radeon_device *rdev)
1493{
1494 rv770_reset_smc(rdev);
1495 rv770_stop_smc_clock(rdev);
1496}
1497
1498static void rv770_read_clock_registers(struct radeon_device *rdev)
1499{
1500 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1501
1502 pi->clk_regs.rv770.cg_spll_func_cntl =
1503 RREG32(CG_SPLL_FUNC_CNTL);
1504 pi->clk_regs.rv770.cg_spll_func_cntl_2 =
1505 RREG32(CG_SPLL_FUNC_CNTL_2);
1506 pi->clk_regs.rv770.cg_spll_func_cntl_3 =
1507 RREG32(CG_SPLL_FUNC_CNTL_3);
1508 pi->clk_regs.rv770.cg_spll_spread_spectrum =
1509 RREG32(CG_SPLL_SPREAD_SPECTRUM);
1510 pi->clk_regs.rv770.cg_spll_spread_spectrum_2 =
1511 RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
1512 pi->clk_regs.rv770.mpll_ad_func_cntl =
1513 RREG32(MPLL_AD_FUNC_CNTL);
1514 pi->clk_regs.rv770.mpll_ad_func_cntl_2 =
1515 RREG32(MPLL_AD_FUNC_CNTL_2);
1516 pi->clk_regs.rv770.mpll_dq_func_cntl =
1517 RREG32(MPLL_DQ_FUNC_CNTL);
1518 pi->clk_regs.rv770.mpll_dq_func_cntl_2 =
1519 RREG32(MPLL_DQ_FUNC_CNTL_2);
1520 pi->clk_regs.rv770.mclk_pwrmgt_cntl =
1521 RREG32(MCLK_PWRMGT_CNTL);
1522 pi->clk_regs.rv770.dll_cntl = RREG32(DLL_CNTL);
1523}
1524
1525static void r7xx_read_clock_registers(struct radeon_device *rdev)
1526{
1527 if (rdev->family == CHIP_RV740)
1528 rv740_read_clock_registers(rdev);
1529 else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1530 rv730_read_clock_registers(rdev);
1531 else
1532 rv770_read_clock_registers(rdev);
1533}
1534
1535void rv770_read_voltage_smio_registers(struct radeon_device *rdev)
1536{
1537 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1538
1539 pi->s0_vid_lower_smio_cntl =
1540 RREG32(S0_VID_LOWER_SMIO_CNTL);
1541}
1542
1543void rv770_reset_smio_status(struct radeon_device *rdev)
1544{
1545 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1546 u32 sw_smio_index, vid_smio_cntl;
1547
1548 sw_smio_index =
1549 (RREG32(GENERAL_PWRMGT) & SW_SMIO_INDEX_MASK) >> SW_SMIO_INDEX_SHIFT;
1550 switch (sw_smio_index) {
1551 case 3:
1552 vid_smio_cntl = RREG32(S3_VID_LOWER_SMIO_CNTL);
1553 break;
1554 case 2:
1555 vid_smio_cntl = RREG32(S2_VID_LOWER_SMIO_CNTL);
1556 break;
1557 case 1:
1558 vid_smio_cntl = RREG32(S1_VID_LOWER_SMIO_CNTL);
1559 break;
1560 case 0:
1561 return;
1562 default:
1563 vid_smio_cntl = pi->s0_vid_lower_smio_cntl;
1564 break;
1565 }
1566
1567 WREG32(S0_VID_LOWER_SMIO_CNTL, vid_smio_cntl);
1568 WREG32_P(GENERAL_PWRMGT, SW_SMIO_INDEX(0), ~SW_SMIO_INDEX_MASK);
1569}
1570
1571void rv770_get_memory_type(struct radeon_device *rdev)
1572{
1573 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1574 u32 tmp;
1575
1576 tmp = RREG32(MC_SEQ_MISC0);
1577
1578 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
1579 MC_SEQ_MISC0_GDDR5_VALUE)
1580 pi->mem_gddr5 = true;
1581 else
1582 pi->mem_gddr5 = false;
1583
1584}
1585
1586void rv770_get_pcie_gen2_status(struct radeon_device *rdev)
1587{
1588 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1589 u32 tmp;
1590
1591 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1592
1593 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
1594 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
1595 pi->pcie_gen2 = true;
1596 else
1597 pi->pcie_gen2 = false;
1598
1599 if (pi->pcie_gen2) {
1600 if (tmp & LC_CURRENT_DATA_RATE)
1601 pi->boot_in_gen2 = true;
1602 else
1603 pi->boot_in_gen2 = false;
1604 } else
1605 pi->boot_in_gen2 = false;
1606}
1607
1608#if 0
1609static int rv770_enter_ulp_state(struct radeon_device *rdev)
1610{
1611 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1612
1613 if (pi->gfx_clock_gating) {
1614 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
1615 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
1616 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
1617 RREG32(GB_TILING_CONFIG);
1618 }
1619
1620 WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
1621 ~HOST_SMC_MSG_MASK);
1622
1623 udelay(7000);
1624
1625 return 0;
1626}
1627
1628static int rv770_exit_ulp_state(struct radeon_device *rdev)
1629{
1630 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1631 int i;
1632
1633 WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_ResumeFromMinimumPower),
1634 ~HOST_SMC_MSG_MASK);
1635
1636 udelay(7000);
1637
1638 for (i = 0; i < rdev->usec_timeout; i++) {
1639 if (((RREG32(SMC_MSG) & HOST_SMC_RESP_MASK) >> HOST_SMC_RESP_SHIFT) == 1)
1640 break;
1641 udelay(1000);
1642 }
1643
1644 if (pi->gfx_clock_gating)
1645 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
1646
1647 return 0;
1648}
1649#endif
1650
1651static void rv770_get_mclk_odt_threshold(struct radeon_device *rdev)
1652{
1653 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1654 u8 memory_module_index;
1655 struct atom_memory_info memory_info;
1656
1657 pi->mclk_odt_threshold = 0;
1658
1659 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) {
1660 memory_module_index = rv770_get_memory_module_index(rdev);
1661
1662 if (radeon_atom_get_memory_info(rdev, memory_module_index, &memory_info))
1663 return;
1664
1665 if (memory_info.mem_type == MEM_TYPE_DDR2 ||
1666 memory_info.mem_type == MEM_TYPE_DDR3)
1667 pi->mclk_odt_threshold = 30000;
1668 }
1669}
1670
1671void rv770_get_max_vddc(struct radeon_device *rdev)
1672{
1673 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1674 u16 vddc;
1675
1676 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc))
1677 pi->max_vddc = 0;
1678 else
1679 pi->max_vddc = vddc;
1680}
1681
1682void rv770_program_response_times(struct radeon_device *rdev)
1683{
1684 u32 voltage_response_time, backbias_response_time;
1685 u32 acpi_delay_time, vbi_time_out;
1686 u32 vddc_dly, bb_dly, acpi_dly, vbi_dly;
1687 u32 reference_clock;
1688
1689 voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
1690 backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
1691
1692 if (voltage_response_time == 0)
1693 voltage_response_time = 1000;
1694
1695 if (backbias_response_time == 0)
1696 backbias_response_time = 1000;
1697
1698 acpi_delay_time = 15000;
1699 vbi_time_out = 100000;
1700
1701 reference_clock = radeon_get_xclk(rdev);
1702
1703 vddc_dly = (voltage_response_time * reference_clock) / 1600;
1704 bb_dly = (backbias_response_time * reference_clock) / 1600;
1705 acpi_dly = (acpi_delay_time * reference_clock) / 1600;
1706 vbi_dly = (vbi_time_out * reference_clock) / 1600;
1707
1708 rv770_write_smc_soft_register(rdev,
1709 RV770_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
1710 rv770_write_smc_soft_register(rdev,
1711 RV770_SMC_SOFT_REGISTER_delay_bbias, bb_dly);
1712 rv770_write_smc_soft_register(rdev,
1713 RV770_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
1714 rv770_write_smc_soft_register(rdev,
1715 RV770_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
1716#if 0
1717 /* XXX look up hw revision */
1718 if (WEKIVA_A21)
1719 rv770_write_smc_soft_register(rdev,
1720 RV770_SMC_SOFT_REGISTER_baby_step_timer,
1721 0x10);
1722#endif
1723}
1724
1725static void rv770_program_dcodt_before_state_switch(struct radeon_device *rdev,
1726 struct radeon_ps *radeon_new_state,
1727 struct radeon_ps *radeon_current_state)
1728{
1729 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1730 struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state);
1731 struct rv7xx_ps *current_state = rv770_get_ps(radeon_current_state);
1732 bool current_use_dc = false;
1733 bool new_use_dc = false;
1734
1735 if (pi->mclk_odt_threshold == 0)
1736 return;
1737
1738 if (current_state->high.mclk <= pi->mclk_odt_threshold)
1739 current_use_dc = true;
1740
1741 if (new_state->high.mclk <= pi->mclk_odt_threshold)
1742 new_use_dc = true;
1743
1744 if (current_use_dc == new_use_dc)
1745 return;
1746
1747 if (!current_use_dc && new_use_dc)
1748 return;
1749
1750 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1751 rv730_program_dcodt(rdev, new_use_dc);
1752}
1753
1754static void rv770_program_dcodt_after_state_switch(struct radeon_device *rdev,
1755 struct radeon_ps *radeon_new_state,
1756 struct radeon_ps *radeon_current_state)
1757{
1758 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1759 struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state);
1760 struct rv7xx_ps *current_state = rv770_get_ps(radeon_current_state);
1761 bool current_use_dc = false;
1762 bool new_use_dc = false;
1763
1764 if (pi->mclk_odt_threshold == 0)
1765 return;
1766
1767 if (current_state->high.mclk <= pi->mclk_odt_threshold)
1768 current_use_dc = true;
1769
1770 if (new_state->high.mclk <= pi->mclk_odt_threshold)
1771 new_use_dc = true;
1772
1773 if (current_use_dc == new_use_dc)
1774 return;
1775
1776 if (current_use_dc && !new_use_dc)
1777 return;
1778
1779 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1780 rv730_program_dcodt(rdev, new_use_dc);
1781}
1782
1783static void rv770_retrieve_odt_values(struct radeon_device *rdev)
1784{
1785 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1786
1787 if (pi->mclk_odt_threshold == 0)
1788 return;
1789
1790 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1791 rv730_get_odt_values(rdev);
1792}
1793
1794static void rv770_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1795{
1796 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1797 bool want_thermal_protection;
1798 enum radeon_dpm_event_src dpm_event_src;
1799
1800 switch (sources) {
1801 case 0:
1802 default:
1803 want_thermal_protection = false;
1804 break;
1805 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1806 want_thermal_protection = true;
1807 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
1808 break;
1809
1810 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1811 want_thermal_protection = true;
1812 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
1813 break;
1814
1815 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1816 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1817 want_thermal_protection = true;
1818 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1819 break;
1820 }
1821
1822 if (want_thermal_protection) {
1823 WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
1824 if (pi->thermal_protection)
1825 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
1826 } else {
1827 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
1828 }
1829}
1830
1831void rv770_enable_auto_throttle_source(struct radeon_device *rdev,
1832 enum radeon_dpm_auto_throttle_src source,
1833 bool enable)
1834{
1835 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1836
1837 if (enable) {
1838 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1839 pi->active_auto_throttle_sources |= 1 << source;
1840 rv770_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1841 }
1842 } else {
1843 if (pi->active_auto_throttle_sources & (1 << source)) {
1844 pi->active_auto_throttle_sources &= ~(1 << source);
1845 rv770_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1846 }
1847 }
1848}
1849
1850int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
1851 int min_temp, int max_temp)
1852{
1853 int low_temp = 0 * 1000;
1854 int high_temp = 255 * 1000;
1855
1856 if (low_temp < min_temp)
1857 low_temp = min_temp;
1858 if (high_temp > max_temp)
1859 high_temp = max_temp;
1860 if (high_temp < low_temp) {
1861 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1862 return -EINVAL;
1863 }
1864
1865 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
1866 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
1867 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
1868
1869 rdev->pm.dpm.thermal.min_temp = low_temp;
1870 rdev->pm.dpm.thermal.max_temp = high_temp;
1871
1872 return 0;
1873}
1874
1875int rv770_dpm_enable(struct radeon_device *rdev)
1876{
1877 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1878 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1879 int ret;
1880
1881 if (pi->gfx_clock_gating)
1882 rv770_restore_cgcg(rdev);
1883
1884 if (rv770_dpm_enabled(rdev))
1885 return -EINVAL;
1886
1887 if (pi->voltage_control) {
1888 rv770_enable_voltage_control(rdev, true);
1889 ret = rv770_construct_vddc_table(rdev);
1890 if (ret) {
1891 DRM_ERROR("rv770_construct_vddc_table failed\n");
1892 return ret;
1893 }
1894 }
1895
1896 if (pi->dcodt)
1897 rv770_retrieve_odt_values(rdev);
1898
1899 if (pi->mvdd_control) {
1900 ret = rv770_get_mvdd_configuration(rdev);
1901 if (ret) {
1902 DRM_ERROR("rv770_get_mvdd_configuration failed\n");
1903 return ret;
1904 }
1905 }
1906
1907 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1908 rv770_enable_backbias(rdev, true);
1909
1910 rv770_enable_spread_spectrum(rdev, true);
1911
1912 if (pi->thermal_protection)
1913 rv770_enable_thermal_protection(rdev, true);
1914
1915 rv770_program_mpll_timing_parameters(rdev);
1916 rv770_setup_bsp(rdev);
1917 rv770_program_git(rdev);
1918 rv770_program_tp(rdev);
1919 rv770_program_tpp(rdev);
1920 rv770_program_sstp(rdev);
1921 rv770_program_engine_speed_parameters(rdev);
1922 rv770_enable_display_gap(rdev);
1923 rv770_program_vc(rdev);
1924
1925 if (pi->dynamic_pcie_gen2)
1926 rv770_enable_dynamic_pcie_gen2(rdev, true);
1927
1928 ret = rv770_upload_firmware(rdev);
1929 if (ret) {
1930 DRM_ERROR("rv770_upload_firmware failed\n");
1931 return ret;
1932 }
1933 ret = rv770_init_smc_table(rdev, boot_ps);
1934 if (ret) {
1935 DRM_ERROR("rv770_init_smc_table failed\n");
1936 return ret;
1937 }
1938
1939 rv770_program_response_times(rdev);
1940 r7xx_start_smc(rdev);
1941
1942 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1943 rv730_start_dpm(rdev);
1944 else
1945 rv770_start_dpm(rdev);
1946
1947 if (pi->gfx_clock_gating)
1948 rv770_gfx_clock_gating_enable(rdev, true);
1949
1950 if (pi->mg_clock_gating)
1951 rv770_mg_clock_gating_enable(rdev, true);
1952
1953 if (rdev->irq.installed &&
1954 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1955 PPSMC_Result result;
1956
1957 ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1958 if (ret)
1959 return ret;
1960 rdev->irq.dpm_thermal = true;
1961 radeon_irq_set(rdev);
1962 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
1963
1964 if (result != PPSMC_Result_OK)
1965 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1966 }
1967
1968 rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
1969
1970 return 0;
1971}
1972
1973void rv770_dpm_disable(struct radeon_device *rdev)
1974{
1975 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1976
1977 if (!rv770_dpm_enabled(rdev))
1978 return;
1979
1980 rv770_clear_vc(rdev);
1981
1982 if (pi->thermal_protection)
1983 rv770_enable_thermal_protection(rdev, false);
1984
1985 rv770_enable_spread_spectrum(rdev, false);
1986
1987 if (pi->dynamic_pcie_gen2)
1988 rv770_enable_dynamic_pcie_gen2(rdev, false);
1989
1990 if (rdev->irq.installed &&
1991 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1992 rdev->irq.dpm_thermal = false;
1993 radeon_irq_set(rdev);
1994 }
1995
1996 if (pi->gfx_clock_gating)
1997 rv770_gfx_clock_gating_enable(rdev, false);
1998
1999 if (pi->mg_clock_gating)
2000 rv770_mg_clock_gating_enable(rdev, false);
2001
2002 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
2003 rv730_stop_dpm(rdev);
2004 else
2005 rv770_stop_dpm(rdev);
2006
2007 r7xx_stop_smc(rdev);
2008 rv770_reset_smio_status(rdev);
2009}
2010
2011int rv770_dpm_set_power_state(struct radeon_device *rdev)
2012{
2013 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2014 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
2015 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
2016 int ret;
2017
2018 ret = rv770_restrict_performance_levels_before_switch(rdev);
2019 if (ret) {
2020 DRM_ERROR("rv770_restrict_performance_levels_before_switch failed\n");
2021 return ret;
2022 }
2023 rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
2024 ret = rv770_halt_smc(rdev);
2025 if (ret) {
2026 DRM_ERROR("rv770_halt_smc failed\n");
2027 return ret;
2028 }
2029 ret = rv770_upload_sw_state(rdev, new_ps);
2030 if (ret) {
2031 DRM_ERROR("rv770_upload_sw_state failed\n");
2032 return ret;
2033 }
2034 r7xx_program_memory_timing_parameters(rdev, new_ps);
2035 if (pi->dcodt)
2036 rv770_program_dcodt_before_state_switch(rdev, new_ps, old_ps);
2037 ret = rv770_resume_smc(rdev);
2038 if (ret) {
2039 DRM_ERROR("rv770_resume_smc failed\n");
2040 return ret;
2041 }
2042 ret = rv770_set_sw_state(rdev);
2043 if (ret) {
2044 DRM_ERROR("rv770_set_sw_state failed\n");
2045 return ret;
2046 }
2047 if (pi->dcodt)
2048 rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps);
2049 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
2050 ret = rv770_unrestrict_performance_levels_after_switch(rdev);
2051 if (ret) {
2052 DRM_ERROR("rv770_unrestrict_performance_levels_after_switch failed\n");
2053 return ret;
2054 }
2055
2056 return 0;
2057}
2058
2059void rv770_dpm_reset_asic(struct radeon_device *rdev)
2060{
2061 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2062 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
2063
2064 rv770_restrict_performance_levels_before_switch(rdev);
2065 if (pi->dcodt)
2066 rv770_program_dcodt_before_state_switch(rdev, boot_ps, boot_ps);
2067 rv770_set_boot_state(rdev);
2068 if (pi->dcodt)
2069 rv770_program_dcodt_after_state_switch(rdev, boot_ps, boot_ps);
2070}
2071
2072void rv770_dpm_setup_asic(struct radeon_device *rdev)
2073{
2074 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2075
2076 r7xx_read_clock_registers(rdev);
2077 rv770_read_voltage_smio_registers(rdev);
2078 rv770_get_memory_type(rdev);
2079 if (pi->dcodt)
2080 rv770_get_mclk_odt_threshold(rdev);
2081 rv770_get_pcie_gen2_status(rdev);
2082
2083 rv770_enable_acpi_pm(rdev);
2084
2085 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
2086 rv770_enable_l0s(rdev);
2087 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
2088 rv770_enable_l1(rdev);
2089 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
2090 rv770_enable_pll_sleep_in_l1(rdev);
2091}
2092
2093void rv770_dpm_display_configuration_changed(struct radeon_device *rdev)
2094{
2095 rv770_program_display_gap(rdev);
2096}
2097
2098union power_info {
2099 struct _ATOM_POWERPLAY_INFO info;
2100 struct _ATOM_POWERPLAY_INFO_V2 info_2;
2101 struct _ATOM_POWERPLAY_INFO_V3 info_3;
2102 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
2103 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
2104 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
2105};
2106
2107union pplib_clock_info {
2108 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
2109 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
2110 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
2111 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
2112};
2113
2114union pplib_power_state {
2115 struct _ATOM_PPLIB_STATE v1;
2116 struct _ATOM_PPLIB_STATE_V2 v2;
2117};
2118
2119static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
2120 struct radeon_ps *rps,
2121 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
2122 u8 table_rev)
2123{
2124 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2125 rps->class = le16_to_cpu(non_clock_info->usClassification);
2126 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
2127
2128 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2129 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2130 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2131 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
2132 rps->vclk = RV770_DEFAULT_VCLK_FREQ;
2133 rps->dclk = RV770_DEFAULT_DCLK_FREQ;
2134 } else {
2135 rps->vclk = 0;
2136 rps->dclk = 0;
2137 }
2138
2139 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
2140 rdev->pm.dpm.boot_ps = rps;
2141 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
2142 rdev->pm.dpm.uvd_ps = rps;
2143}
2144
2145static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
2146 struct radeon_ps *rps, int index,
2147 union pplib_clock_info *clock_info)
2148{
2149 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2150 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2151 struct rv7xx_ps *ps = rv770_get_ps(rps);
2152 u32 sclk, mclk;
2153 u16 vddc;
2154 struct rv7xx_pl *pl;
2155
2156 switch (index) {
2157 case 0:
2158 pl = &ps->low;
2159 break;
2160 case 1:
2161 pl = &ps->medium;
2162 break;
2163 case 2:
2164 default:
2165 pl = &ps->high;
2166 break;
2167 }
2168
2169 if (rdev->family >= CHIP_CEDAR) {
2170 sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
2171 sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
2172 mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
2173 mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
2174
2175 pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC);
2176 pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI);
2177 pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags);
2178 } else {
2179 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
2180 sclk |= clock_info->r600.ucEngineClockHigh << 16;
2181 mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
2182 mclk |= clock_info->r600.ucMemoryClockHigh << 16;
2183
2184 pl->vddc = le16_to_cpu(clock_info->r600.usVDDC);
2185 pl->flags = le32_to_cpu(clock_info->r600.ulFlags);
2186 }
2187
2188 pl->mclk = mclk;
2189 pl->sclk = sclk;
2190
2191 /* patch up vddc if necessary */
2192 if (pl->vddc == 0xff01) {
2193 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
2194 pl->vddc = vddc;
2195 }
2196
2197 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
2198 pi->acpi_vddc = pl->vddc;
2199 if (rdev->family >= CHIP_CEDAR)
2200 eg_pi->acpi_vddci = pl->vddci;
2201 if (ps->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
2202 pi->acpi_pcie_gen2 = true;
2203 else
2204 pi->acpi_pcie_gen2 = false;
2205 }
2206
2207 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
2208 if (rdev->family >= CHIP_BARTS) {
2209 eg_pi->ulv.supported = true;
2210 eg_pi->ulv.pl = pl;
2211 }
2212 }
2213
2214 if (pi->min_vddc_in_table > pl->vddc)
2215 pi->min_vddc_in_table = pl->vddc;
2216
2217 if (pi->max_vddc_in_table < pl->vddc)
2218 pi->max_vddc_in_table = pl->vddc;
2219
2220 /* patch up boot state */
2221 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2222 u16 vddc, vddci, mvdd;
2223 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
2224 pl->mclk = rdev->clock.default_mclk;
2225 pl->sclk = rdev->clock.default_sclk;
2226 pl->vddc = vddc;
2227 pl->vddci = vddci;
2228 }
2229
2230 if (rdev->family >= CHIP_BARTS) {
2231 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
2232 ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
2233 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
2234 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
2235 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
2236 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
2237 }
2238 }
2239}
2240
2241int rv7xx_parse_power_table(struct radeon_device *rdev)
2242{
2243 struct radeon_mode_info *mode_info = &rdev->mode_info;
2244 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2245 union pplib_power_state *power_state;
2246 int i, j;
2247 union pplib_clock_info *clock_info;
2248 union power_info *power_info;
2249 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2250 u16 data_offset;
2251 u8 frev, crev;
2252 struct rv7xx_ps *ps;
2253
2254 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2255 &frev, &crev, &data_offset))
2256 return -EINVAL;
2257 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2258
2259 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
2260 power_info->pplib.ucNumStates, GFP_KERNEL);
2261 if (!rdev->pm.dpm.ps)
2262 return -ENOMEM;
2263 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
2264 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
2265 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
2266
2267 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
2268 power_state = (union pplib_power_state *)
2269 (mode_info->atom_context->bios + data_offset +
2270 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
2271 i * power_info->pplib.ucStateEntrySize);
2272 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2273 (mode_info->atom_context->bios + data_offset +
2274 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
2275 (power_state->v1.ucNonClockStateIndex *
2276 power_info->pplib.ucNonClockSize));
2277 if (power_info->pplib.ucStateEntrySize - 1) {
2278 ps = kzalloc(sizeof(struct rv7xx_ps), GFP_KERNEL);
2279 if (ps == NULL) {
2280 kfree(rdev->pm.dpm.ps);
2281 return -ENOMEM;
2282 }
2283 rdev->pm.dpm.ps[i].ps_priv = ps;
2284 rv7xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
2285 non_clock_info,
2286 power_info->pplib.ucNonClockSize);
2287 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
2288 clock_info = (union pplib_clock_info *)
2289 (mode_info->atom_context->bios + data_offset +
2290 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
2291 (power_state->v1.ucClockStateIndices[j] *
2292 power_info->pplib.ucClockInfoSize));
2293 rv7xx_parse_pplib_clock_info(rdev,
2294 &rdev->pm.dpm.ps[i], j,
2295 clock_info);
2296 }
2297 }
2298 }
2299 rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
2300 return 0;
2301}
2302
2303int rv770_dpm_init(struct radeon_device *rdev)
2304{
2305 struct rv7xx_power_info *pi;
2306 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
2307 uint16_t data_offset, size;
2308 uint8_t frev, crev;
2309 struct atom_clock_dividers dividers;
2310 int ret;
2311
2312 pi = kzalloc(sizeof(struct rv7xx_power_info), GFP_KERNEL);
2313 if (pi == NULL)
2314 return -ENOMEM;
2315 rdev->pm.dpm.priv = pi;
2316
2317 rv770_get_max_vddc(rdev);
2318
2319 pi->acpi_vddc = 0;
2320 pi->min_vddc_in_table = 0;
2321 pi->max_vddc_in_table = 0;
2322
2323 ret = rv7xx_parse_power_table(rdev);
2324 if (ret)
2325 return ret;
2326
2327 if (rdev->pm.dpm.voltage_response_time == 0)
2328 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
2329 if (rdev->pm.dpm.backbias_response_time == 0)
2330 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
2331
2332 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
2333 0, false, &dividers);
2334 if (ret)
2335 pi->ref_div = dividers.ref_div + 1;
2336 else
2337 pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
2338
2339 pi->mclk_strobe_mode_threshold = 30000;
2340 pi->mclk_edc_enable_threshold = 30000;
2341
2342 pi->rlp = RV770_RLP_DFLT;
2343 pi->rmp = RV770_RMP_DFLT;
2344 pi->lhp = RV770_LHP_DFLT;
2345 pi->lmp = RV770_LMP_DFLT;
2346
2347 pi->voltage_control =
2348 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
2349
2350 pi->mvdd_control =
2351 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
2352
2353 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
2354 &frev, &crev, &data_offset)) {
2355 pi->sclk_ss = true;
2356 pi->mclk_ss = true;
2357 pi->dynamic_ss = true;
2358 } else {
2359 pi->sclk_ss = false;
2360 pi->mclk_ss = false;
2361 pi->dynamic_ss = false;
2362 }
2363
2364 pi->asi = RV770_ASI_DFLT;
2365 pi->pasi = RV770_HASI_DFLT;
2366 pi->vrc = RV770_VRC_DFLT;
2367
2368 pi->power_gating = false;
2369
2370 pi->gfx_clock_gating = true;
2371
2372 pi->mg_clock_gating = true;
2373 pi->mgcgtssm = true;
2374
2375 pi->dynamic_pcie_gen2 = true;
2376
2377 if (pi->gfx_clock_gating &&
2378 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
2379 pi->thermal_protection = true;
2380 else
2381 pi->thermal_protection = false;
2382
2383 pi->display_gap = true;
2384
2385 if (rdev->flags & RADEON_IS_MOBILITY)
2386 pi->dcodt = true;
2387 else
2388 pi->dcodt = false;
2389
2390 pi->ulps = true;
2391
2392 pi->mclk_stutter_mode_threshold = 0;
2393
2394 pi->sram_end = SMC_RAM_END;
2395 pi->state_table_start = RV770_SMC_TABLE_ADDRESS;
2396 pi->soft_regs_start = RV770_SMC_SOFT_REGISTERS_START;
2397
2398 return 0;
2399}
2400
2401void rv770_dpm_print_power_state(struct radeon_device *rdev,
2402 struct radeon_ps *rps)
2403{
2404 struct rv7xx_ps *ps = rv770_get_ps(rps);
2405 struct rv7xx_pl *pl;
2406
2407 r600_dpm_print_class_info(rps->class, rps->class2);
2408 r600_dpm_print_cap_info(rps->caps);
2409 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2410 if (rdev->family >= CHIP_CEDAR) {
2411 pl = &ps->low;
2412 printk("\t\tpower level 0 sclk: %u mclk: %u vddc: %u vddci: %u\n",
2413 pl->sclk, pl->mclk, pl->vddc, pl->vddci);
2414 pl = &ps->medium;
2415 printk("\t\tpower level 1 sclk: %u mclk: %u vddc: %u vddci: %u\n",
2416 pl->sclk, pl->mclk, pl->vddc, pl->vddci);
2417 pl = &ps->high;
2418 printk("\t\tpower level 2 sclk: %u mclk: %u vddc: %u vddci: %u\n",
2419 pl->sclk, pl->mclk, pl->vddc, pl->vddci);
2420 } else {
2421 pl = &ps->low;
2422 printk("\t\tpower level 0 sclk: %u mclk: %u vddc: %u\n",
2423 pl->sclk, pl->mclk, pl->vddc);
2424 pl = &ps->medium;
2425 printk("\t\tpower level 1 sclk: %u mclk: %u vddc: %u\n",
2426 pl->sclk, pl->mclk, pl->vddc);
2427 pl = &ps->high;
2428 printk("\t\tpower level 2 sclk: %u mclk: %u vddc: %u\n",
2429 pl->sclk, pl->mclk, pl->vddc);
2430 }
2431 r600_dpm_print_ps_status(rdev, rps);
2432}
2433
2434void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
2435 struct seq_file *m)
2436{
2437 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
2438 struct rv7xx_ps *ps = rv770_get_ps(rps);
2439 struct rv7xx_pl *pl;
2440 u32 current_index =
2441 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
2442 CURRENT_PROFILE_INDEX_SHIFT;
2443
2444 if (current_index > 2) {
2445 seq_printf(m, "invalid dpm profile %d\n", current_index);
2446 } else {
2447 if (current_index == 0)
2448 pl = &ps->low;
2449 else if (current_index == 1)
2450 pl = &ps->medium;
2451 else /* current_index == 2 */
2452 pl = &ps->high;
2453 seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2454 if (rdev->family >= CHIP_CEDAR) {
2455 seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
2456 current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
2457 } else {
2458 seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n",
2459 current_index, pl->sclk, pl->mclk, pl->vddc);
2460 }
2461 }
2462}
2463
2464void rv770_dpm_fini(struct radeon_device *rdev)
2465{
2466 int i;
2467
2468 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2469 kfree(rdev->pm.dpm.ps[i].ps_priv);
2470 }
2471 kfree(rdev->pm.dpm.ps);
2472 kfree(rdev->pm.dpm.priv);
2473}
2474
2475u32 rv770_dpm_get_sclk(struct radeon_device *rdev, bool low)
2476{
2477 struct rv7xx_ps *requested_state = rv770_get_ps(rdev->pm.dpm.requested_ps);
2478
2479 if (low)
2480 return requested_state->low.sclk;
2481 else
2482 return requested_state->high.sclk;
2483}
2484
2485u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
2486{
2487 struct rv7xx_ps *requested_state = rv770_get_ps(rdev->pm.dpm.requested_ps);
2488
2489 if (low)
2490 return requested_state->low.mclk;
2491 else
2492 return requested_state->high.mclk;
2493}
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h
new file mode 100644
index 000000000000..f1e1fcf7f622
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_dpm.h
@@ -0,0 +1,288 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __RV770_DPM_H__
24#define __RV770_DPM_H__
25
26#include "rv770_smc.h"
27
28struct rv770_clock_registers {
29 u32 cg_spll_func_cntl;
30 u32 cg_spll_func_cntl_2;
31 u32 cg_spll_func_cntl_3;
32 u32 cg_spll_spread_spectrum;
33 u32 cg_spll_spread_spectrum_2;
34 u32 mpll_ad_func_cntl;
35 u32 mpll_ad_func_cntl_2;
36 u32 mpll_dq_func_cntl;
37 u32 mpll_dq_func_cntl_2;
38 u32 mclk_pwrmgt_cntl;
39 u32 dll_cntl;
40 u32 mpll_ss1;
41 u32 mpll_ss2;
42};
43
44struct rv730_clock_registers {
45 u32 cg_spll_func_cntl;
46 u32 cg_spll_func_cntl_2;
47 u32 cg_spll_func_cntl_3;
48 u32 cg_spll_spread_spectrum;
49 u32 cg_spll_spread_spectrum_2;
50 u32 mclk_pwrmgt_cntl;
51 u32 dll_cntl;
52 u32 mpll_func_cntl;
53 u32 mpll_func_cntl2;
54 u32 mpll_func_cntl3;
55 u32 mpll_ss;
56 u32 mpll_ss2;
57};
58
59union r7xx_clock_registers {
60 struct rv770_clock_registers rv770;
61 struct rv730_clock_registers rv730;
62};
63
64struct vddc_table_entry {
65 u16 vddc;
66 u8 vddc_index;
67 u8 high_smio;
68 u32 low_smio;
69};
70
71#define MAX_NO_OF_MVDD_VALUES 2
72#define MAX_NO_VREG_STEPS 32
73
74struct rv7xx_power_info {
75 /* flags */
76 bool mem_gddr5;
77 bool pcie_gen2;
78 bool dynamic_pcie_gen2;
79 bool acpi_pcie_gen2;
80 bool boot_in_gen2;
81 bool voltage_control; /* vddc */
82 bool mvdd_control;
83 bool sclk_ss;
84 bool mclk_ss;
85 bool dynamic_ss;
86 bool gfx_clock_gating;
87 bool mg_clock_gating;
88 bool mgcgtssm;
89 bool power_gating;
90 bool thermal_protection;
91 bool display_gap;
92 bool dcodt;
93 bool ulps;
94 /* registers */
95 union r7xx_clock_registers clk_regs;
96 u32 s0_vid_lower_smio_cntl;
97 /* voltage */
98 u32 vddc_mask_low;
99 u32 mvdd_mask_low;
100 u32 mvdd_split_frequency;
101 u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES];
102 u16 max_vddc;
103 u16 max_vddc_in_table;
104 u16 min_vddc_in_table;
105 struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS];
106 u8 valid_vddc_entries;
107 /* dc odt */
108 u32 mclk_odt_threshold;
109 u8 odt_value_0[2];
110 u8 odt_value_1[2];
111 /* stored values */
112 u32 boot_sclk;
113 u16 acpi_vddc;
114 u32 ref_div;
115 u32 active_auto_throttle_sources;
116 u32 mclk_stutter_mode_threshold;
117 u32 mclk_strobe_mode_threshold;
118 u32 mclk_edc_enable_threshold;
119 u32 bsp;
120 u32 bsu;
121 u32 pbsp;
122 u32 pbsu;
123 u32 dsp;
124 u32 psp;
125 u32 asi;
126 u32 pasi;
127 u32 vrc;
128 u32 restricted_levels;
129 u32 rlp;
130 u32 rmp;
131 u32 lhp;
132 u32 lmp;
133 /* smc offsets */
134 u16 state_table_start;
135 u16 soft_regs_start;
136 u16 sram_end;
137 /* scratch structs */
138 RV770_SMC_STATETABLE smc_statetable;
139};
140
141struct rv7xx_pl {
142 u32 sclk;
143 u32 mclk;
144 u16 vddc;
145 u16 vddci; /* eg+ only */
146 u32 flags;
147 enum radeon_pcie_gen pcie_gen; /* si+ only */
148};
149
150struct rv7xx_ps {
151 struct rv7xx_pl high;
152 struct rv7xx_pl medium;
153 struct rv7xx_pl low;
154 bool dc_compatible;
155};
156
157#define RV770_RLP_DFLT 10
158#define RV770_RMP_DFLT 25
159#define RV770_LHP_DFLT 25
160#define RV770_LMP_DFLT 10
161#define RV770_VRC_DFLT 0x003f
162#define RV770_ASI_DFLT 1000
163#define RV770_HASI_DFLT 200000
164#define RV770_MGCGTTLOCAL0_DFLT 0x00100000
165#define RV7XX_MGCGTTLOCAL0_DFLT 0
166#define RV770_MGCGTTLOCAL1_DFLT 0xFFFF0000
167#define RV770_MGCGCGTSSMCTRL_DFLT 0x55940000
168
169#define MVDD_LOW_INDEX 0
170#define MVDD_HIGH_INDEX 1
171
172#define MVDD_LOW_VALUE 0
173#define MVDD_HIGH_VALUE 0xffff
174
175#define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */
176#define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */
177
178/* rv730/rv710 */
179int rv730_populate_sclk_value(struct radeon_device *rdev,
180 u32 engine_clock,
181 RV770_SMC_SCLK_VALUE *sclk);
182int rv730_populate_mclk_value(struct radeon_device *rdev,
183 u32 engine_clock, u32 memory_clock,
184 LPRV7XX_SMC_MCLK_VALUE mclk);
185void rv730_read_clock_registers(struct radeon_device *rdev);
186int rv730_populate_smc_acpi_state(struct radeon_device *rdev,
187 RV770_SMC_STATETABLE *table);
188int rv730_populate_smc_initial_state(struct radeon_device *rdev,
189 struct radeon_ps *radeon_initial_state,
190 RV770_SMC_STATETABLE *table);
191void rv730_program_memory_timing_parameters(struct radeon_device *rdev,
192 struct radeon_ps *radeon_state);
193void rv730_power_gating_enable(struct radeon_device *rdev,
194 bool enable);
195void rv730_start_dpm(struct radeon_device *rdev);
196void rv730_stop_dpm(struct radeon_device *rdev);
197void rv730_program_dcodt(struct radeon_device *rdev, bool use_dcodt);
198void rv730_get_odt_values(struct radeon_device *rdev);
199
200/* rv740 */
201int rv740_populate_sclk_value(struct radeon_device *rdev, u32 engine_clock,
202 RV770_SMC_SCLK_VALUE *sclk);
203int rv740_populate_mclk_value(struct radeon_device *rdev,
204 u32 engine_clock, u32 memory_clock,
205 RV7XX_SMC_MCLK_VALUE *mclk);
206void rv740_read_clock_registers(struct radeon_device *rdev);
207int rv740_populate_smc_acpi_state(struct radeon_device *rdev,
208 RV770_SMC_STATETABLE *table);
209void rv740_enable_mclk_spread_spectrum(struct radeon_device *rdev,
210 bool enable);
211u8 rv740_get_mclk_frequency_ratio(u32 memory_clock);
212u32 rv740_get_dll_speed(bool is_gddr5, u32 memory_clock);
213u32 rv740_get_decoded_reference_divider(u32 encoded_ref);
214
215/* rv770 */
216u32 rv770_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf);
217int rv770_populate_vddc_value(struct radeon_device *rdev, u16 vddc,
218 RV770_SMC_VOLTAGE_VALUE *voltage);
219int rv770_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
220 RV770_SMC_VOLTAGE_VALUE *voltage);
221u8 rv770_get_seq_value(struct radeon_device *rdev,
222 struct rv7xx_pl *pl);
223int rv770_populate_initial_mvdd_value(struct radeon_device *rdev,
224 RV770_SMC_VOLTAGE_VALUE *voltage);
225u32 rv770_calculate_memory_refresh_rate(struct radeon_device *rdev,
226 u32 engine_clock);
227void rv770_program_response_times(struct radeon_device *rdev);
228int rv770_populate_smc_sp(struct radeon_device *rdev,
229 struct radeon_ps *radeon_state,
230 RV770_SMC_SWSTATE *smc_state);
231int rv770_populate_smc_t(struct radeon_device *rdev,
232 struct radeon_ps *radeon_state,
233 RV770_SMC_SWSTATE *smc_state);
234void rv770_read_voltage_smio_registers(struct radeon_device *rdev);
235void rv770_get_memory_type(struct radeon_device *rdev);
236void r7xx_start_smc(struct radeon_device *rdev);
237u8 rv770_get_memory_module_index(struct radeon_device *rdev);
238void rv770_get_max_vddc(struct radeon_device *rdev);
239void rv770_get_pcie_gen2_status(struct radeon_device *rdev);
240void rv770_enable_acpi_pm(struct radeon_device *rdev);
241void rv770_restore_cgcg(struct radeon_device *rdev);
242bool rv770_dpm_enabled(struct radeon_device *rdev);
243void rv770_enable_voltage_control(struct radeon_device *rdev,
244 bool enable);
245void rv770_enable_backbias(struct radeon_device *rdev,
246 bool enable);
247void rv770_enable_thermal_protection(struct radeon_device *rdev,
248 bool enable);
249void rv770_enable_auto_throttle_source(struct radeon_device *rdev,
250 enum radeon_dpm_auto_throttle_src source,
251 bool enable);
252void rv770_setup_bsp(struct radeon_device *rdev);
253void rv770_program_git(struct radeon_device *rdev);
254void rv770_program_tp(struct radeon_device *rdev);
255void rv770_program_tpp(struct radeon_device *rdev);
256void rv770_program_sstp(struct radeon_device *rdev);
257void rv770_program_engine_speed_parameters(struct radeon_device *rdev);
258void rv770_program_vc(struct radeon_device *rdev);
259void rv770_clear_vc(struct radeon_device *rdev);
260int rv770_upload_firmware(struct radeon_device *rdev);
261void rv770_stop_dpm(struct radeon_device *rdev);
262void r7xx_stop_smc(struct radeon_device *rdev);
263void rv770_reset_smio_status(struct radeon_device *rdev);
264int rv770_restrict_performance_levels_before_switch(struct radeon_device *rdev);
265int rv770_unrestrict_performance_levels_after_switch(struct radeon_device *rdev);
266int rv770_halt_smc(struct radeon_device *rdev);
267int rv770_resume_smc(struct radeon_device *rdev);
268int rv770_set_sw_state(struct radeon_device *rdev);
269int rv770_set_boot_state(struct radeon_device *rdev);
270int rv7xx_parse_power_table(struct radeon_device *rdev);
271void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
272 struct radeon_ps *new_ps,
273 struct radeon_ps *old_ps);
274void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
275 struct radeon_ps *new_ps,
276 struct radeon_ps *old_ps);
277
278/* smc */
279int rv770_read_smc_soft_register(struct radeon_device *rdev,
280 u16 reg_offset, u32 *value);
281int rv770_write_smc_soft_register(struct radeon_device *rdev,
282 u16 reg_offset, u32 value);
283
284/* thermal */
285int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
286 int min_temp, int max_temp);
287
288#endif
diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c
new file mode 100644
index 000000000000..ab95da570215
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_smc.c
@@ -0,0 +1,621 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include <linux/firmware.h>
26#include "drmP.h"
27#include "radeon.h"
28#include "rv770d.h"
29#include "rv770_dpm.h"
30#include "rv770_smc.h"
31#include "atom.h"
32#include "radeon_ucode.h"
33
34#define FIRST_SMC_INT_VECT_REG 0xFFD8
35#define FIRST_INT_VECT_S19 0xFFC0
36
37static const u8 rv770_smc_int_vectors[] =
38{
39 0x08, 0x10, 0x08, 0x10,
40 0x08, 0x10, 0x08, 0x10,
41 0x08, 0x10, 0x08, 0x10,
42 0x08, 0x10, 0x08, 0x10,
43 0x08, 0x10, 0x08, 0x10,
44 0x08, 0x10, 0x08, 0x10,
45 0x08, 0x10, 0x08, 0x10,
46 0x08, 0x10, 0x08, 0x10,
47 0x08, 0x10, 0x08, 0x10,
48 0x08, 0x10, 0x08, 0x10,
49 0x08, 0x10, 0x08, 0x10,
50 0x08, 0x10, 0x08, 0x10,
51 0x08, 0x10, 0x0C, 0xD7,
52 0x08, 0x2B, 0x08, 0x10,
53 0x03, 0x51, 0x03, 0x51,
54 0x03, 0x51, 0x03, 0x51
55};
56
57static const u8 rv730_smc_int_vectors[] =
58{
59 0x08, 0x15, 0x08, 0x15,
60 0x08, 0x15, 0x08, 0x15,
61 0x08, 0x15, 0x08, 0x15,
62 0x08, 0x15, 0x08, 0x15,
63 0x08, 0x15, 0x08, 0x15,
64 0x08, 0x15, 0x08, 0x15,
65 0x08, 0x15, 0x08, 0x15,
66 0x08, 0x15, 0x08, 0x15,
67 0x08, 0x15, 0x08, 0x15,
68 0x08, 0x15, 0x08, 0x15,
69 0x08, 0x15, 0x08, 0x15,
70 0x08, 0x15, 0x08, 0x15,
71 0x08, 0x15, 0x0C, 0xBB,
72 0x08, 0x30, 0x08, 0x15,
73 0x03, 0x56, 0x03, 0x56,
74 0x03, 0x56, 0x03, 0x56
75};
76
77static const u8 rv710_smc_int_vectors[] =
78{
79 0x08, 0x04, 0x08, 0x04,
80 0x08, 0x04, 0x08, 0x04,
81 0x08, 0x04, 0x08, 0x04,
82 0x08, 0x04, 0x08, 0x04,
83 0x08, 0x04, 0x08, 0x04,
84 0x08, 0x04, 0x08, 0x04,
85 0x08, 0x04, 0x08, 0x04,
86 0x08, 0x04, 0x08, 0x04,
87 0x08, 0x04, 0x08, 0x04,
88 0x08, 0x04, 0x08, 0x04,
89 0x08, 0x04, 0x08, 0x04,
90 0x08, 0x04, 0x08, 0x04,
91 0x08, 0x04, 0x0C, 0xCB,
92 0x08, 0x1F, 0x08, 0x04,
93 0x03, 0x51, 0x03, 0x51,
94 0x03, 0x51, 0x03, 0x51
95};
96
97static const u8 rv740_smc_int_vectors[] =
98{
99 0x08, 0x10, 0x08, 0x10,
100 0x08, 0x10, 0x08, 0x10,
101 0x08, 0x10, 0x08, 0x10,
102 0x08, 0x10, 0x08, 0x10,
103 0x08, 0x10, 0x08, 0x10,
104 0x08, 0x10, 0x08, 0x10,
105 0x08, 0x10, 0x08, 0x10,
106 0x08, 0x10, 0x08, 0x10,
107 0x08, 0x10, 0x08, 0x10,
108 0x08, 0x10, 0x08, 0x10,
109 0x08, 0x10, 0x08, 0x10,
110 0x08, 0x10, 0x08, 0x10,
111 0x08, 0x10, 0x0C, 0xD7,
112 0x08, 0x2B, 0x08, 0x10,
113 0x03, 0x51, 0x03, 0x51,
114 0x03, 0x51, 0x03, 0x51
115};
116
117static const u8 cedar_smc_int_vectors[] =
118{
119 0x0B, 0x05, 0x0B, 0x05,
120 0x0B, 0x05, 0x0B, 0x05,
121 0x0B, 0x05, 0x0B, 0x05,
122 0x0B, 0x05, 0x0B, 0x05,
123 0x0B, 0x05, 0x0B, 0x05,
124 0x0B, 0x05, 0x0B, 0x05,
125 0x0B, 0x05, 0x0B, 0x05,
126 0x0B, 0x05, 0x0B, 0x05,
127 0x0B, 0x05, 0x0B, 0x05,
128 0x0B, 0x05, 0x0B, 0x05,
129 0x0B, 0x05, 0x0B, 0x05,
130 0x0B, 0x05, 0x0B, 0x05,
131 0x0B, 0x05, 0x11, 0x8B,
132 0x0B, 0x20, 0x0B, 0x05,
133 0x04, 0xF6, 0x04, 0xF6,
134 0x04, 0xF6, 0x04, 0xF6
135};
136
137static const u8 redwood_smc_int_vectors[] =
138{
139 0x0B, 0x05, 0x0B, 0x05,
140 0x0B, 0x05, 0x0B, 0x05,
141 0x0B, 0x05, 0x0B, 0x05,
142 0x0B, 0x05, 0x0B, 0x05,
143 0x0B, 0x05, 0x0B, 0x05,
144 0x0B, 0x05, 0x0B, 0x05,
145 0x0B, 0x05, 0x0B, 0x05,
146 0x0B, 0x05, 0x0B, 0x05,
147 0x0B, 0x05, 0x0B, 0x05,
148 0x0B, 0x05, 0x0B, 0x05,
149 0x0B, 0x05, 0x0B, 0x05,
150 0x0B, 0x05, 0x0B, 0x05,
151 0x0B, 0x05, 0x11, 0x8B,
152 0x0B, 0x20, 0x0B, 0x05,
153 0x04, 0xF6, 0x04, 0xF6,
154 0x04, 0xF6, 0x04, 0xF6
155};
156
157static const u8 juniper_smc_int_vectors[] =
158{
159 0x0B, 0x05, 0x0B, 0x05,
160 0x0B, 0x05, 0x0B, 0x05,
161 0x0B, 0x05, 0x0B, 0x05,
162 0x0B, 0x05, 0x0B, 0x05,
163 0x0B, 0x05, 0x0B, 0x05,
164 0x0B, 0x05, 0x0B, 0x05,
165 0x0B, 0x05, 0x0B, 0x05,
166 0x0B, 0x05, 0x0B, 0x05,
167 0x0B, 0x05, 0x0B, 0x05,
168 0x0B, 0x05, 0x0B, 0x05,
169 0x0B, 0x05, 0x0B, 0x05,
170 0x0B, 0x05, 0x0B, 0x05,
171 0x0B, 0x05, 0x11, 0x8B,
172 0x0B, 0x20, 0x0B, 0x05,
173 0x04, 0xF6, 0x04, 0xF6,
174 0x04, 0xF6, 0x04, 0xF6
175};
176
177static const u8 cypress_smc_int_vectors[] =
178{
179 0x0B, 0x05, 0x0B, 0x05,
180 0x0B, 0x05, 0x0B, 0x05,
181 0x0B, 0x05, 0x0B, 0x05,
182 0x0B, 0x05, 0x0B, 0x05,
183 0x0B, 0x05, 0x0B, 0x05,
184 0x0B, 0x05, 0x0B, 0x05,
185 0x0B, 0x05, 0x0B, 0x05,
186 0x0B, 0x05, 0x0B, 0x05,
187 0x0B, 0x05, 0x0B, 0x05,
188 0x0B, 0x05, 0x0B, 0x05,
189 0x0B, 0x05, 0x0B, 0x05,
190 0x0B, 0x05, 0x0B, 0x05,
191 0x0B, 0x05, 0x11, 0x8B,
192 0x0B, 0x20, 0x0B, 0x05,
193 0x04, 0xF6, 0x04, 0xF6,
194 0x04, 0xF6, 0x04, 0xF6
195};
196
197static const u8 barts_smc_int_vectors[] =
198{
199 0x0C, 0x14, 0x0C, 0x14,
200 0x0C, 0x14, 0x0C, 0x14,
201 0x0C, 0x14, 0x0C, 0x14,
202 0x0C, 0x14, 0x0C, 0x14,
203 0x0C, 0x14, 0x0C, 0x14,
204 0x0C, 0x14, 0x0C, 0x14,
205 0x0C, 0x14, 0x0C, 0x14,
206 0x0C, 0x14, 0x0C, 0x14,
207 0x0C, 0x14, 0x0C, 0x14,
208 0x0C, 0x14, 0x0C, 0x14,
209 0x0C, 0x14, 0x0C, 0x14,
210 0x0C, 0x14, 0x0C, 0x14,
211 0x0C, 0x14, 0x12, 0xAA,
212 0x0C, 0x2F, 0x15, 0xF6,
213 0x15, 0xF6, 0x05, 0x0A,
214 0x05, 0x0A, 0x05, 0x0A
215};
216
217static const u8 turks_smc_int_vectors[] =
218{
219 0x0C, 0x14, 0x0C, 0x14,
220 0x0C, 0x14, 0x0C, 0x14,
221 0x0C, 0x14, 0x0C, 0x14,
222 0x0C, 0x14, 0x0C, 0x14,
223 0x0C, 0x14, 0x0C, 0x14,
224 0x0C, 0x14, 0x0C, 0x14,
225 0x0C, 0x14, 0x0C, 0x14,
226 0x0C, 0x14, 0x0C, 0x14,
227 0x0C, 0x14, 0x0C, 0x14,
228 0x0C, 0x14, 0x0C, 0x14,
229 0x0C, 0x14, 0x0C, 0x14,
230 0x0C, 0x14, 0x0C, 0x14,
231 0x0C, 0x14, 0x12, 0xAA,
232 0x0C, 0x2F, 0x15, 0xF6,
233 0x15, 0xF6, 0x05, 0x0A,
234 0x05, 0x0A, 0x05, 0x0A
235};
236
237static const u8 caicos_smc_int_vectors[] =
238{
239 0x0C, 0x14, 0x0C, 0x14,
240 0x0C, 0x14, 0x0C, 0x14,
241 0x0C, 0x14, 0x0C, 0x14,
242 0x0C, 0x14, 0x0C, 0x14,
243 0x0C, 0x14, 0x0C, 0x14,
244 0x0C, 0x14, 0x0C, 0x14,
245 0x0C, 0x14, 0x0C, 0x14,
246 0x0C, 0x14, 0x0C, 0x14,
247 0x0C, 0x14, 0x0C, 0x14,
248 0x0C, 0x14, 0x0C, 0x14,
249 0x0C, 0x14, 0x0C, 0x14,
250 0x0C, 0x14, 0x0C, 0x14,
251 0x0C, 0x14, 0x12, 0xAA,
252 0x0C, 0x2F, 0x15, 0xF6,
253 0x15, 0xF6, 0x05, 0x0A,
254 0x05, 0x0A, 0x05, 0x0A
255};
256
257static const u8 cayman_smc_int_vectors[] =
258{
259 0x12, 0x05, 0x12, 0x05,
260 0x12, 0x05, 0x12, 0x05,
261 0x12, 0x05, 0x12, 0x05,
262 0x12, 0x05, 0x12, 0x05,
263 0x12, 0x05, 0x12, 0x05,
264 0x12, 0x05, 0x12, 0x05,
265 0x12, 0x05, 0x12, 0x05,
266 0x12, 0x05, 0x12, 0x05,
267 0x12, 0x05, 0x12, 0x05,
268 0x12, 0x05, 0x12, 0x05,
269 0x12, 0x05, 0x12, 0x05,
270 0x12, 0x05, 0x12, 0x05,
271 0x12, 0x05, 0x18, 0xEA,
272 0x12, 0x20, 0x1C, 0x34,
273 0x1C, 0x34, 0x08, 0x72,
274 0x08, 0x72, 0x08, 0x72
275};
276
277int rv770_set_smc_sram_address(struct radeon_device *rdev,
278 u16 smc_address, u16 limit)
279{
280 u32 addr;
281
282 if (smc_address & 3)
283 return -EINVAL;
284 if ((smc_address + 3) > limit)
285 return -EINVAL;
286
287 addr = smc_address;
288 addr |= SMC_SRAM_AUTO_INC_DIS;
289
290 WREG32(SMC_SRAM_ADDR, addr);
291
292 return 0;
293}
294
295int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
296 u16 smc_start_address, const u8 *src,
297 u16 byte_count, u16 limit)
298{
299 u32 data, original_data, extra_shift;
300 u16 addr;
301 int ret;
302
303 if (smc_start_address & 3)
304 return -EINVAL;
305 if ((smc_start_address + byte_count) > limit)
306 return -EINVAL;
307
308 addr = smc_start_address;
309
310 while (byte_count >= 4) {
311 /* SMC address space is BE */
312 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
313
314 ret = rv770_set_smc_sram_address(rdev, addr, limit);
315 if (ret)
316 return ret;
317
318 WREG32(SMC_SRAM_DATA, data);
319
320 src += 4;
321 byte_count -= 4;
322 addr += 4;
323 }
324
325 /* RMW for final bytes */
326 if (byte_count > 0) {
327 data = 0;
328
329 ret = rv770_set_smc_sram_address(rdev, addr, limit);
330 if (ret)
331 return ret;
332
333 original_data = RREG32(SMC_SRAM_DATA);
334
335 extra_shift = 8 * (4 - byte_count);
336
337 while (byte_count > 0) {
338 /* SMC address space is BE */
339 data = (data << 8) + *src++;
340 byte_count--;
341 }
342
343 data <<= extra_shift;
344
345 data |= (original_data & ~((~0UL) << extra_shift));
346
347 ret = rv770_set_smc_sram_address(rdev, addr, limit);
348 if (ret)
349 return ret;
350
351 WREG32(SMC_SRAM_DATA, data);
352 }
353
354 return 0;
355}
356
357static int rv770_program_interrupt_vectors(struct radeon_device *rdev,
358 u32 smc_first_vector, const u8 *src,
359 u32 byte_count)
360{
361 u32 tmp, i;
362
363 if (byte_count % 4)
364 return -EINVAL;
365
366 if (smc_first_vector < FIRST_SMC_INT_VECT_REG) {
367 tmp = FIRST_SMC_INT_VECT_REG - smc_first_vector;
368
369 if (tmp > byte_count)
370 return 0;
371
372 byte_count -= tmp;
373 src += tmp;
374 smc_first_vector = FIRST_SMC_INT_VECT_REG;
375 }
376
377 for (i = 0; i < byte_count; i += 4) {
378 /* SMC address space is BE */
379 tmp = (src[i] << 24) | (src[i + 1] << 16) | (src[i + 2] << 8) | src[i + 3];
380
381 WREG32(SMC_ISR_FFD8_FFDB + i, tmp);
382 }
383
384 return 0;
385}
386
387void rv770_start_smc(struct radeon_device *rdev)
388{
389 WREG32_P(SMC_IO, SMC_RST_N, ~SMC_RST_N);
390}
391
392void rv770_reset_smc(struct radeon_device *rdev)
393{
394 WREG32_P(SMC_IO, 0, ~SMC_RST_N);
395}
396
397void rv770_stop_smc_clock(struct radeon_device *rdev)
398{
399 WREG32_P(SMC_IO, 0, ~SMC_CLK_EN);
400}
401
402void rv770_start_smc_clock(struct radeon_device *rdev)
403{
404 WREG32_P(SMC_IO, SMC_CLK_EN, ~SMC_CLK_EN);
405}
406
407bool rv770_is_smc_running(struct radeon_device *rdev)
408{
409 u32 tmp;
410
411 tmp = RREG32(SMC_IO);
412
413 if ((tmp & SMC_RST_N) && (tmp & SMC_CLK_EN))
414 return true;
415 else
416 return false;
417}
418
419PPSMC_Result rv770_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
420{
421 u32 tmp;
422 int i;
423 PPSMC_Result result;
424
425 if (!rv770_is_smc_running(rdev))
426 return PPSMC_Result_Failed;
427
428 WREG32_P(SMC_MSG, HOST_SMC_MSG(msg), ~HOST_SMC_MSG_MASK);
429
430 for (i = 0; i < rdev->usec_timeout; i++) {
431 tmp = RREG32(SMC_MSG) & HOST_SMC_RESP_MASK;
432 tmp >>= HOST_SMC_RESP_SHIFT;
433 if (tmp != 0)
434 break;
435 udelay(1);
436 }
437
438 tmp = RREG32(SMC_MSG) & HOST_SMC_RESP_MASK;
439 tmp >>= HOST_SMC_RESP_SHIFT;
440
441 result = (PPSMC_Result)tmp;
442 return result;
443}
444
445PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev)
446{
447 int i;
448 PPSMC_Result result = PPSMC_Result_OK;
449
450 if (!rv770_is_smc_running(rdev))
451 return result;
452
453 for (i = 0; i < rdev->usec_timeout; i++) {
454 if (RREG32(SMC_IO) & SMC_STOP_MODE)
455 break;
456 udelay(1);
457 }
458
459 return result;
460}
461
462static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit)
463{
464 u16 i;
465
466 for (i = 0; i < limit; i += 4) {
467 rv770_set_smc_sram_address(rdev, i, limit);
468 WREG32(SMC_SRAM_DATA, 0);
469 }
470}
471
472int rv770_load_smc_ucode(struct radeon_device *rdev,
473 u16 limit)
474{
475 int ret;
476 const u8 *int_vect;
477 u16 int_vect_start_address;
478 u16 int_vect_size;
479 const u8 *ucode_data;
480 u16 ucode_start_address;
481 u16 ucode_size;
482
483 if (!rdev->smc_fw)
484 return -EINVAL;
485
486 rv770_clear_smc_sram(rdev, limit);
487
488 switch (rdev->family) {
489 case CHIP_RV770:
490 ucode_start_address = RV770_SMC_UCODE_START;
491 ucode_size = RV770_SMC_UCODE_SIZE;
492 int_vect = (const u8 *)&rv770_smc_int_vectors;
493 int_vect_start_address = RV770_SMC_INT_VECTOR_START;
494 int_vect_size = RV770_SMC_INT_VECTOR_SIZE;
495 break;
496 case CHIP_RV730:
497 ucode_start_address = RV730_SMC_UCODE_START;
498 ucode_size = RV730_SMC_UCODE_SIZE;
499 int_vect = (const u8 *)&rv730_smc_int_vectors;
500 int_vect_start_address = RV730_SMC_INT_VECTOR_START;
501 int_vect_size = RV730_SMC_INT_VECTOR_SIZE;
502 break;
503 case CHIP_RV710:
504 ucode_start_address = RV710_SMC_UCODE_START;
505 ucode_size = RV710_SMC_UCODE_SIZE;
506 int_vect = (const u8 *)&rv710_smc_int_vectors;
507 int_vect_start_address = RV710_SMC_INT_VECTOR_START;
508 int_vect_size = RV710_SMC_INT_VECTOR_SIZE;
509 break;
510 case CHIP_RV740:
511 ucode_start_address = RV740_SMC_UCODE_START;
512 ucode_size = RV740_SMC_UCODE_SIZE;
513 int_vect = (const u8 *)&rv740_smc_int_vectors;
514 int_vect_start_address = RV740_SMC_INT_VECTOR_START;
515 int_vect_size = RV740_SMC_INT_VECTOR_SIZE;
516 break;
517 case CHIP_CEDAR:
518 ucode_start_address = CEDAR_SMC_UCODE_START;
519 ucode_size = CEDAR_SMC_UCODE_SIZE;
520 int_vect = (const u8 *)&cedar_smc_int_vectors;
521 int_vect_start_address = CEDAR_SMC_INT_VECTOR_START;
522 int_vect_size = CEDAR_SMC_INT_VECTOR_SIZE;
523 break;
524 case CHIP_REDWOOD:
525 ucode_start_address = REDWOOD_SMC_UCODE_START;
526 ucode_size = REDWOOD_SMC_UCODE_SIZE;
527 int_vect = (const u8 *)&redwood_smc_int_vectors;
528 int_vect_start_address = REDWOOD_SMC_INT_VECTOR_START;
529 int_vect_size = REDWOOD_SMC_INT_VECTOR_SIZE;
530 break;
531 case CHIP_JUNIPER:
532 ucode_start_address = JUNIPER_SMC_UCODE_START;
533 ucode_size = JUNIPER_SMC_UCODE_SIZE;
534 int_vect = (const u8 *)&juniper_smc_int_vectors;
535 int_vect_start_address = JUNIPER_SMC_INT_VECTOR_START;
536 int_vect_size = JUNIPER_SMC_INT_VECTOR_SIZE;
537 break;
538 case CHIP_CYPRESS:
539 case CHIP_HEMLOCK:
540 ucode_start_address = CYPRESS_SMC_UCODE_START;
541 ucode_size = CYPRESS_SMC_UCODE_SIZE;
542 int_vect = (const u8 *)&cypress_smc_int_vectors;
543 int_vect_start_address = CYPRESS_SMC_INT_VECTOR_START;
544 int_vect_size = CYPRESS_SMC_INT_VECTOR_SIZE;
545 break;
546 case CHIP_BARTS:
547 ucode_start_address = BARTS_SMC_UCODE_START;
548 ucode_size = BARTS_SMC_UCODE_SIZE;
549 int_vect = (const u8 *)&barts_smc_int_vectors;
550 int_vect_start_address = BARTS_SMC_INT_VECTOR_START;
551 int_vect_size = BARTS_SMC_INT_VECTOR_SIZE;
552 break;
553 case CHIP_TURKS:
554 ucode_start_address = TURKS_SMC_UCODE_START;
555 ucode_size = TURKS_SMC_UCODE_SIZE;
556 int_vect = (const u8 *)&turks_smc_int_vectors;
557 int_vect_start_address = TURKS_SMC_INT_VECTOR_START;
558 int_vect_size = TURKS_SMC_INT_VECTOR_SIZE;
559 break;
560 case CHIP_CAICOS:
561 ucode_start_address = CAICOS_SMC_UCODE_START;
562 ucode_size = CAICOS_SMC_UCODE_SIZE;
563 int_vect = (const u8 *)&caicos_smc_int_vectors;
564 int_vect_start_address = CAICOS_SMC_INT_VECTOR_START;
565 int_vect_size = CAICOS_SMC_INT_VECTOR_SIZE;
566 break;
567 case CHIP_CAYMAN:
568 ucode_start_address = CAYMAN_SMC_UCODE_START;
569 ucode_size = CAYMAN_SMC_UCODE_SIZE;
570 int_vect = (const u8 *)&cayman_smc_int_vectors;
571 int_vect_start_address = CAYMAN_SMC_INT_VECTOR_START;
572 int_vect_size = CAYMAN_SMC_INT_VECTOR_SIZE;
573 break;
574 default:
575 DRM_ERROR("unknown asic in smc ucode loader\n");
576 BUG();
577 }
578
579 /* load the ucode */
580 ucode_data = (const u8 *)rdev->smc_fw->data;
581 ret = rv770_copy_bytes_to_smc(rdev, ucode_start_address,
582 ucode_data, ucode_size, limit);
583 if (ret)
584 return ret;
585
586 /* set up the int vectors */
587 ret = rv770_program_interrupt_vectors(rdev, int_vect_start_address,
588 int_vect, int_vect_size);
589 if (ret)
590 return ret;
591
592 return 0;
593}
594
595int rv770_read_smc_sram_dword(struct radeon_device *rdev,
596 u16 smc_address, u32 *value, u16 limit)
597{
598 int ret;
599
600 ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
601 if (ret)
602 return ret;
603
604 *value = RREG32(SMC_SRAM_DATA);
605
606 return 0;
607}
608
609int rv770_write_smc_sram_dword(struct radeon_device *rdev,
610 u16 smc_address, u32 value, u16 limit)
611{
612 int ret;
613
614 ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
615 if (ret)
616 return ret;
617
618 WREG32(SMC_SRAM_DATA, value);
619
620 return 0;
621}
diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h
new file mode 100644
index 000000000000..f78d92a4b325
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_smc.h
@@ -0,0 +1,209 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __RV770_SMC_H__
24#define __RV770_SMC_H__
25
26#include "ppsmc.h"
27
28#pragma pack(push, 1)
29
30#define RV770_SMC_TABLE_ADDRESS 0xB000
31
32#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3
33
34struct RV770_SMC_SCLK_VALUE
35{
36 uint32_t vCG_SPLL_FUNC_CNTL;
37 uint32_t vCG_SPLL_FUNC_CNTL_2;
38 uint32_t vCG_SPLL_FUNC_CNTL_3;
39 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
40 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
41 uint32_t sclk_value;
42};
43
44typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
45
46struct RV770_SMC_MCLK_VALUE
47{
48 uint32_t vMPLL_AD_FUNC_CNTL;
49 uint32_t vMPLL_AD_FUNC_CNTL_2;
50 uint32_t vMPLL_DQ_FUNC_CNTL;
51 uint32_t vMPLL_DQ_FUNC_CNTL_2;
52 uint32_t vMCLK_PWRMGT_CNTL;
53 uint32_t vDLL_CNTL;
54 uint32_t vMPLL_SS;
55 uint32_t vMPLL_SS2;
56 uint32_t mclk_value;
57};
58
59typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
60
61
62struct RV730_SMC_MCLK_VALUE
63{
64 uint32_t vMCLK_PWRMGT_CNTL;
65 uint32_t vDLL_CNTL;
66 uint32_t vMPLL_FUNC_CNTL;
67 uint32_t vMPLL_FUNC_CNTL2;
68 uint32_t vMPLL_FUNC_CNTL3;
69 uint32_t vMPLL_SS;
70 uint32_t vMPLL_SS2;
71 uint32_t mclk_value;
72};
73
74typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
75
76struct RV770_SMC_VOLTAGE_VALUE
77{
78 uint16_t value;
79 uint8_t index;
80 uint8_t padding;
81};
82
83typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
84
85union RV7XX_SMC_MCLK_VALUE
86{
87 RV770_SMC_MCLK_VALUE mclk770;
88 RV730_SMC_MCLK_VALUE mclk730;
89};
90
91typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
92
93struct RV770_SMC_HW_PERFORMANCE_LEVEL
94{
95 uint8_t arbValue;
96 union{
97 uint8_t seqValue;
98 uint8_t ACIndex;
99 };
100 uint8_t displayWatermark;
101 uint8_t gen2PCIE;
102 uint8_t gen2XSP;
103 uint8_t backbias;
104 uint8_t strobeMode;
105 uint8_t mcFlags;
106 uint32_t aT;
107 uint32_t bSP;
108 RV770_SMC_SCLK_VALUE sclk;
109 RV7XX_SMC_MCLK_VALUE mclk;
110 RV770_SMC_VOLTAGE_VALUE vddc;
111 RV770_SMC_VOLTAGE_VALUE mvdd;
112 RV770_SMC_VOLTAGE_VALUE vddci;
113 uint8_t reserved1;
114 uint8_t reserved2;
115 uint8_t stateFlags;
116 uint8_t padding;
117};
118
119#define SMC_STROBE_RATIO 0x0F
120#define SMC_STROBE_ENABLE 0x10
121
122#define SMC_MC_EDC_RD_FLAG 0x01
123#define SMC_MC_EDC_WR_FLAG 0x02
124#define SMC_MC_RTT_ENABLE 0x04
125#define SMC_MC_STUTTER_EN 0x08
126
127typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
128
129struct RV770_SMC_SWSTATE
130{
131 uint8_t flags;
132 uint8_t padding1;
133 uint8_t padding2;
134 uint8_t padding3;
135 RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
136};
137
138typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
139
140#define RV770_SMC_VOLTAGEMASK_VDDC 0
141#define RV770_SMC_VOLTAGEMASK_MVDD 1
142#define RV770_SMC_VOLTAGEMASK_VDDCI 2
143#define RV770_SMC_VOLTAGEMASK_MAX 4
144
145struct RV770_SMC_VOLTAGEMASKTABLE
146{
147 uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX];
148 uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
149};
150
151typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
152
153#define MAX_NO_VREG_STEPS 32
154
155struct RV770_SMC_STATETABLE
156{
157 uint8_t thermalProtectType;
158 uint8_t systemFlags;
159 uint8_t maxVDDCIndexInPPTable;
160 uint8_t extraFlags;
161 uint8_t highSMIO[MAX_NO_VREG_STEPS];
162 uint32_t lowSMIO[MAX_NO_VREG_STEPS];
163 RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable;
164 RV770_SMC_SWSTATE initialState;
165 RV770_SMC_SWSTATE ACPIState;
166 RV770_SMC_SWSTATE driverState;
167 RV770_SMC_SWSTATE ULVState;
168};
169
170typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
171
172#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
173
174#pragma pack(pop)
175
176#define RV770_SMC_SOFT_REGISTERS_START 0x104
177
178#define RV770_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0
179#define RV770_SMC_SOFT_REGISTER_baby_step_timer 0x8
180#define RV770_SMC_SOFT_REGISTER_delay_bbias 0xC
181#define RV770_SMC_SOFT_REGISTER_delay_vreg 0x10
182#define RV770_SMC_SOFT_REGISTER_delay_acpi 0x2C
183#define RV770_SMC_SOFT_REGISTER_seq_index 0x64
184#define RV770_SMC_SOFT_REGISTER_mvdd_chg_time 0x68
185#define RV770_SMC_SOFT_REGISTER_mclk_switch_lim 0x78
186#define RV770_SMC_SOFT_REGISTER_mc_block_delay 0x90
187#define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C
188#define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0
189
190int rv770_set_smc_sram_address(struct radeon_device *rdev,
191 u16 smc_address, u16 limit);
192int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
193 u16 smc_start_address, const u8 *src,
194 u16 byte_count, u16 limit);
195void rv770_start_smc(struct radeon_device *rdev);
196void rv770_reset_smc(struct radeon_device *rdev);
197void rv770_stop_smc_clock(struct radeon_device *rdev);
198void rv770_start_smc_clock(struct radeon_device *rdev);
199bool rv770_is_smc_running(struct radeon_device *rdev);
200PPSMC_Result rv770_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
201PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev);
202int rv770_read_smc_sram_dword(struct radeon_device *rdev,
203 u16 smc_address, u32 *value, u16 limit);
204int rv770_write_smc_sram_dword(struct radeon_device *rdev,
205 u16 smc_address, u32 value, u16 limit);
206int rv770_load_smc_ucode(struct radeon_device *rdev,
207 u16 limit);
208
209#endif
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 85b16266f748..6bef2b7d601b 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -62,6 +62,246 @@
62# define UPLL_FB_DIV(x) ((x) << 0) 62# define UPLL_FB_DIV(x) ((x) << 0)
63# define UPLL_FB_DIV_MASK 0x01FFFFFF 63# define UPLL_FB_DIV_MASK 0x01FFFFFF
64 64
65/* pm registers */
66#define SMC_SRAM_ADDR 0x200
67#define SMC_SRAM_AUTO_INC_DIS (1 << 16)
68#define SMC_SRAM_DATA 0x204
69#define SMC_IO 0x208
70#define SMC_RST_N (1 << 0)
71#define SMC_STOP_MODE (1 << 2)
72#define SMC_CLK_EN (1 << 11)
73#define SMC_MSG 0x20c
74#define HOST_SMC_MSG(x) ((x) << 0)
75#define HOST_SMC_MSG_MASK (0xff << 0)
76#define HOST_SMC_MSG_SHIFT 0
77#define HOST_SMC_RESP(x) ((x) << 8)
78#define HOST_SMC_RESP_MASK (0xff << 8)
79#define HOST_SMC_RESP_SHIFT 8
80#define SMC_HOST_MSG(x) ((x) << 16)
81#define SMC_HOST_MSG_MASK (0xff << 16)
82#define SMC_HOST_MSG_SHIFT 16
83#define SMC_HOST_RESP(x) ((x) << 24)
84#define SMC_HOST_RESP_MASK (0xff << 24)
85#define SMC_HOST_RESP_SHIFT 24
86
87#define SMC_ISR_FFD8_FFDB 0x218
88
89#define CG_SPLL_FUNC_CNTL 0x600
90#define SPLL_RESET (1 << 0)
91#define SPLL_SLEEP (1 << 1)
92#define SPLL_DIVEN (1 << 2)
93#define SPLL_BYPASS_EN (1 << 3)
94#define SPLL_REF_DIV(x) ((x) << 4)
95#define SPLL_REF_DIV_MASK (0x3f << 4)
96#define SPLL_HILEN(x) ((x) << 12)
97#define SPLL_HILEN_MASK (0xf << 12)
98#define SPLL_LOLEN(x) ((x) << 16)
99#define SPLL_LOLEN_MASK (0xf << 16)
100#define CG_SPLL_FUNC_CNTL_2 0x604
101#define SCLK_MUX_SEL(x) ((x) << 0)
102#define SCLK_MUX_SEL_MASK (0x1ff << 0)
103#define CG_SPLL_FUNC_CNTL_3 0x608
104#define SPLL_FB_DIV(x) ((x) << 0)
105#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
106#define SPLL_DITHEN (1 << 28)
107
108#define SPLL_CNTL_MODE 0x610
109#define SPLL_DIV_SYNC (1 << 5)
110
111#define MPLL_AD_FUNC_CNTL 0x624
112#define CLKF(x) ((x) << 0)
113#define CLKF_MASK (0x7f << 0)
114#define CLKR(x) ((x) << 7)
115#define CLKR_MASK (0x1f << 7)
116#define CLKFRAC(x) ((x) << 12)
117#define CLKFRAC_MASK (0x1f << 12)
118#define YCLK_POST_DIV(x) ((x) << 17)
119#define YCLK_POST_DIV_MASK (3 << 17)
120#define IBIAS(x) ((x) << 20)
121#define IBIAS_MASK (0x3ff << 20)
122#define RESET (1 << 30)
123#define PDNB (1 << 31)
124#define MPLL_AD_FUNC_CNTL_2 0x628
125#define BYPASS (1 << 19)
126#define BIAS_GEN_PDNB (1 << 24)
127#define RESET_EN (1 << 25)
128#define VCO_MODE (1 << 29)
129#define MPLL_DQ_FUNC_CNTL 0x62c
130#define MPLL_DQ_FUNC_CNTL_2 0x630
131
132#define GENERAL_PWRMGT 0x63c
133# define GLOBAL_PWRMGT_EN (1 << 0)
134# define STATIC_PM_EN (1 << 1)
135# define THERMAL_PROTECTION_DIS (1 << 2)
136# define THERMAL_PROTECTION_TYPE (1 << 3)
137# define ENABLE_GEN2PCIE (1 << 4)
138# define ENABLE_GEN2XSP (1 << 5)
139# define SW_SMIO_INDEX(x) ((x) << 6)
140# define SW_SMIO_INDEX_MASK (3 << 6)
141# define SW_SMIO_INDEX_SHIFT 6
142# define LOW_VOLT_D2_ACPI (1 << 8)
143# define LOW_VOLT_D3_ACPI (1 << 9)
144# define VOLT_PWRMGT_EN (1 << 10)
145# define BACKBIAS_PAD_EN (1 << 18)
146# define BACKBIAS_VALUE (1 << 19)
147# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
148# define AC_DC_SW (1 << 24)
149
150#define CG_TPC 0x640
151#define SCLK_PWRMGT_CNTL 0x644
152# define SCLK_PWRMGT_OFF (1 << 0)
153# define SCLK_LOW_D1 (1 << 1)
154# define FIR_RESET (1 << 4)
155# define FIR_FORCE_TREND_SEL (1 << 5)
156# define FIR_TREND_MODE (1 << 6)
157# define DYN_GFX_CLK_OFF_EN (1 << 7)
158# define GFX_CLK_FORCE_ON (1 << 8)
159# define GFX_CLK_REQUEST_OFF (1 << 9)
160# define GFX_CLK_FORCE_OFF (1 << 10)
161# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
162# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
163# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
164#define MCLK_PWRMGT_CNTL 0x648
165# define DLL_SPEED(x) ((x) << 0)
166# define DLL_SPEED_MASK (0x1f << 0)
167# define MPLL_PWRMGT_OFF (1 << 5)
168# define DLL_READY (1 << 6)
169# define MC_INT_CNTL (1 << 7)
170# define MRDCKA0_SLEEP (1 << 8)
171# define MRDCKA1_SLEEP (1 << 9)
172# define MRDCKB0_SLEEP (1 << 10)
173# define MRDCKB1_SLEEP (1 << 11)
174# define MRDCKC0_SLEEP (1 << 12)
175# define MRDCKC1_SLEEP (1 << 13)
176# define MRDCKD0_SLEEP (1 << 14)
177# define MRDCKD1_SLEEP (1 << 15)
178# define MRDCKA0_RESET (1 << 16)
179# define MRDCKA1_RESET (1 << 17)
180# define MRDCKB0_RESET (1 << 18)
181# define MRDCKB1_RESET (1 << 19)
182# define MRDCKC0_RESET (1 << 20)
183# define MRDCKC1_RESET (1 << 21)
184# define MRDCKD0_RESET (1 << 22)
185# define MRDCKD1_RESET (1 << 23)
186# define DLL_READY_READ (1 << 24)
187# define USE_DISPLAY_GAP (1 << 25)
188# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
189# define MPLL_TURNOFF_D2 (1 << 28)
190#define DLL_CNTL 0x64c
191# define MRDCKA0_BYPASS (1 << 24)
192# define MRDCKA1_BYPASS (1 << 25)
193# define MRDCKB0_BYPASS (1 << 26)
194# define MRDCKB1_BYPASS (1 << 27)
195# define MRDCKC0_BYPASS (1 << 28)
196# define MRDCKC1_BYPASS (1 << 29)
197# define MRDCKD0_BYPASS (1 << 30)
198# define MRDCKD1_BYPASS (1 << 31)
199
200#define MPLL_TIME 0x654
201# define MPLL_LOCK_TIME(x) ((x) << 0)
202# define MPLL_LOCK_TIME_MASK (0xffff << 0)
203# define MPLL_RESET_TIME(x) ((x) << 16)
204# define MPLL_RESET_TIME_MASK (0xffff << 16)
205
206#define CG_CLKPIN_CNTL 0x660
207# define MUX_TCLK_TO_XCLK (1 << 8)
208# define XTALIN_DIVIDE (1 << 9)
209
210#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
211# define CURRENT_PROFILE_INDEX_MASK (0xf << 4)
212# define CURRENT_PROFILE_INDEX_SHIFT 4
213
214#define S0_VID_LOWER_SMIO_CNTL 0x678
215#define S1_VID_LOWER_SMIO_CNTL 0x67c
216#define S2_VID_LOWER_SMIO_CNTL 0x680
217#define S3_VID_LOWER_SMIO_CNTL 0x684
218
219#define CG_FTV 0x690
220#define CG_FFCT_0 0x694
221# define UTC_0(x) ((x) << 0)
222# define UTC_0_MASK (0x3ff << 0)
223# define DTC_0(x) ((x) << 10)
224# define DTC_0_MASK (0x3ff << 10)
225
226#define CG_BSP 0x6d0
227# define BSP(x) ((x) << 0)
228# define BSP_MASK (0xffff << 0)
229# define BSU(x) ((x) << 16)
230# define BSU_MASK (0xf << 16)
231#define CG_AT 0x6d4
232# define CG_R(x) ((x) << 0)
233# define CG_R_MASK (0xffff << 0)
234# define CG_L(x) ((x) << 16)
235# define CG_L_MASK (0xffff << 16)
236#define CG_GIT 0x6d8
237# define CG_GICST(x) ((x) << 0)
238# define CG_GICST_MASK (0xffff << 0)
239# define CG_GIPOT(x) ((x) << 16)
240# define CG_GIPOT_MASK (0xffff << 16)
241
242#define CG_SSP 0x6e8
243# define SST(x) ((x) << 0)
244# define SST_MASK (0xffff << 0)
245# define SSTU(x) ((x) << 16)
246# define SSTU_MASK (0xf << 16)
247
248#define CG_DISPLAY_GAP_CNTL 0x714
249# define DISP1_GAP(x) ((x) << 0)
250# define DISP1_GAP_MASK (3 << 0)
251# define DISP2_GAP(x) ((x) << 2)
252# define DISP2_GAP_MASK (3 << 2)
253# define VBI_TIMER_COUNT(x) ((x) << 4)
254# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
255# define VBI_TIMER_UNIT(x) ((x) << 20)
256# define VBI_TIMER_UNIT_MASK (7 << 20)
257# define DISP1_GAP_MCHG(x) ((x) << 24)
258# define DISP1_GAP_MCHG_MASK (3 << 24)
259# define DISP2_GAP_MCHG(x) ((x) << 26)
260# define DISP2_GAP_MCHG_MASK (3 << 26)
261
262#define CG_SPLL_SPREAD_SPECTRUM 0x790
263#define SSEN (1 << 0)
264#define CLKS(x) ((x) << 4)
265#define CLKS_MASK (0xfff << 4)
266#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
267#define CLKV(x) ((x) << 0)
268#define CLKV_MASK (0x3ffffff << 0)
269#define CG_MPLL_SPREAD_SPECTRUM 0x798
270#define CG_UPLL_SPREAD_SPECTRUM 0x79c
271# define SSEN_MASK 0x00000001
272
273#define CG_CGTT_LOCAL_0 0x7d0
274#define CG_CGTT_LOCAL_1 0x7d4
275
276#define BIOS_SCRATCH_4 0x1734
277
278#define MC_SEQ_MISC0 0x2a00
279#define MC_SEQ_MISC0_GDDR5_SHIFT 28
280#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
281#define MC_SEQ_MISC0_GDDR5_VALUE 5
282
283#define MC_ARB_SQM_RATIO 0x2770
284#define STATE0(x) ((x) << 0)
285#define STATE0_MASK (0xff << 0)
286#define STATE1(x) ((x) << 8)
287#define STATE1_MASK (0xff << 8)
288#define STATE2(x) ((x) << 16)
289#define STATE2_MASK (0xff << 16)
290#define STATE3(x) ((x) << 24)
291#define STATE3_MASK (0xff << 24)
292
293#define MC_ARB_RFSH_RATE 0x27b0
294#define POWERMODE0(x) ((x) << 0)
295#define POWERMODE0_MASK (0xff << 0)
296#define POWERMODE1(x) ((x) << 8)
297#define POWERMODE1_MASK (0xff << 8)
298#define POWERMODE2(x) ((x) << 16)
299#define POWERMODE2_MASK (0xff << 16)
300#define POWERMODE3(x) ((x) << 24)
301#define POWERMODE3_MASK (0xff << 24)
302
303#define CGTS_SM_CTRL_REG 0x9150
304
65/* Registers */ 305/* Registers */
66#define CB_COLOR0_BASE 0x28040 306#define CB_COLOR0_BASE 0x28040
67#define CB_COLOR1_BASE 0x28044 307#define CB_COLOR1_BASE 0x28044
@@ -86,8 +326,8 @@
86#define CONFIG_MEMSIZE 0x5428 326#define CONFIG_MEMSIZE 0x5428
87 327
88#define CP_ME_CNTL 0x86D8 328#define CP_ME_CNTL 0x86D8
89#define CP_ME_HALT (1<<28) 329#define CP_ME_HALT (1 << 28)
90#define CP_PFP_HALT (1<<26) 330#define CP_PFP_HALT (1 << 26)
91#define CP_ME_RAM_DATA 0xC160 331#define CP_ME_RAM_DATA 0xC160
92#define CP_ME_RAM_RADDR 0xC158 332#define CP_ME_RAM_RADDR 0xC158
93#define CP_ME_RAM_WADDR 0xC15C 333#define CP_ME_RAM_WADDR 0xC15C
@@ -157,9 +397,22 @@
157#define GUI_ACTIVE (1<<31) 397#define GUI_ACTIVE (1<<31)
158#define GRBM_STATUS2 0x8014 398#define GRBM_STATUS2 0x8014
159 399
160#define CG_CLKPIN_CNTL 0x660 400#define CG_THERMAL_CTRL 0x72C
161# define MUX_TCLK_TO_XCLK (1 << 8) 401#define DPM_EVENT_SRC(x) ((x) << 0)
162# define XTALIN_DIVIDE (1 << 9) 402#define DPM_EVENT_SRC_MASK (7 << 0)
403#define DIG_THERM_DPM(x) ((x) << 14)
404#define DIG_THERM_DPM_MASK 0x003FC000
405#define DIG_THERM_DPM_SHIFT 14
406
407#define CG_THERMAL_INT 0x734
408#define DIG_THERM_INTH(x) ((x) << 8)
409#define DIG_THERM_INTH_MASK 0x0000FF00
410#define DIG_THERM_INTH_SHIFT 8
411#define DIG_THERM_INTL(x) ((x) << 16)
412#define DIG_THERM_INTL_MASK 0x00FF0000
413#define DIG_THERM_INTL_SHIFT 16
414#define THERM_INT_MASK_HIGH (1 << 24)
415#define THERM_INT_MASK_LOW (1 << 25)
163 416
164#define CG_MULT_THERMAL_STATUS 0x740 417#define CG_MULT_THERMAL_STATUS 0x740
165#define ASIC_T(x) ((x) << 16) 418#define ASIC_T(x) ((x) << 16)
@@ -662,7 +915,22 @@
662#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c 915#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
663#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c 916#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
664 917
665/* PCIE link stuff */ 918/* PCIE indirect regs */
919#define PCIE_P_CNTL 0x40
920# define P_PLL_PWRDN_IN_L1L23 (1 << 3)
921# define P_PLL_BUF_PDNB (1 << 4)
922# define P_PLL_PDNB (1 << 9)
923# define P_ALLOW_PRX_FRONTEND_SHUTOFF (1 << 12)
924/* PCIE PORT regs */
925#define PCIE_LC_CNTL 0xa0
926# define LC_L0S_INACTIVITY(x) ((x) << 8)
927# define LC_L0S_INACTIVITY_MASK (0xf << 8)
928# define LC_L0S_INACTIVITY_SHIFT 8
929# define LC_L1_INACTIVITY(x) ((x) << 12)
930# define LC_L1_INACTIVITY_MASK (0xf << 12)
931# define LC_L1_INACTIVITY_SHIFT 12
932# define LC_PMI_TO_L1_DIS (1 << 16)
933# define LC_ASPM_TO_L1_DIS (1 << 24)
666#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ 934#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
667#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ 935#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
668# define LC_LINK_WIDTH_SHIFT 0 936# define LC_LINK_WIDTH_SHIFT 0
@@ -690,6 +958,9 @@
690# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) 958# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
691# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 959# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
692# define LC_CURRENT_DATA_RATE (1 << 11) 960# define LC_CURRENT_DATA_RATE (1 << 11)
961# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
962# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
963# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
693# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) 964# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
694# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) 965# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
695# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) 966# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index a1b0da6b5808..234906709067 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -32,40 +32,43 @@
32#include "sid.h" 32#include "sid.h"
33#include "atom.h" 33#include "atom.h"
34#include "si_blit_shaders.h" 34#include "si_blit_shaders.h"
35#include "clearstate_si.h"
36#include "radeon_ucode.h"
35 37
36#define SI_PFP_UCODE_SIZE 2144
37#define SI_PM4_UCODE_SIZE 2144
38#define SI_CE_UCODE_SIZE 2144
39#define SI_RLC_UCODE_SIZE 2048
40#define SI_MC_UCODE_SIZE 7769
41#define OLAND_MC_UCODE_SIZE 7863
42 38
43MODULE_FIRMWARE("radeon/TAHITI_pfp.bin"); 39MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
44MODULE_FIRMWARE("radeon/TAHITI_me.bin"); 40MODULE_FIRMWARE("radeon/TAHITI_me.bin");
45MODULE_FIRMWARE("radeon/TAHITI_ce.bin"); 41MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
46MODULE_FIRMWARE("radeon/TAHITI_mc.bin"); 42MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
47MODULE_FIRMWARE("radeon/TAHITI_rlc.bin"); 43MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
44MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
48MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); 45MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
49MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); 46MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
50MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin"); 47MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
51MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin"); 48MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
52MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin"); 49MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
50MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
53MODULE_FIRMWARE("radeon/VERDE_pfp.bin"); 51MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
54MODULE_FIRMWARE("radeon/VERDE_me.bin"); 52MODULE_FIRMWARE("radeon/VERDE_me.bin");
55MODULE_FIRMWARE("radeon/VERDE_ce.bin"); 53MODULE_FIRMWARE("radeon/VERDE_ce.bin");
56MODULE_FIRMWARE("radeon/VERDE_mc.bin"); 54MODULE_FIRMWARE("radeon/VERDE_mc.bin");
57MODULE_FIRMWARE("radeon/VERDE_rlc.bin"); 55MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
56MODULE_FIRMWARE("radeon/VERDE_smc.bin");
58MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); 57MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
59MODULE_FIRMWARE("radeon/OLAND_me.bin"); 58MODULE_FIRMWARE("radeon/OLAND_me.bin");
60MODULE_FIRMWARE("radeon/OLAND_ce.bin"); 59MODULE_FIRMWARE("radeon/OLAND_ce.bin");
61MODULE_FIRMWARE("radeon/OLAND_mc.bin"); 60MODULE_FIRMWARE("radeon/OLAND_mc.bin");
62MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); 61MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
62MODULE_FIRMWARE("radeon/OLAND_smc.bin");
63MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); 63MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
64MODULE_FIRMWARE("radeon/HAINAN_me.bin"); 64MODULE_FIRMWARE("radeon/HAINAN_me.bin");
65MODULE_FIRMWARE("radeon/HAINAN_ce.bin"); 65MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
66MODULE_FIRMWARE("radeon/HAINAN_mc.bin"); 66MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
67MODULE_FIRMWARE("radeon/HAINAN_rlc.bin"); 67MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
68MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
68 69
70static void si_pcie_gen3_enable(struct radeon_device *rdev);
71static void si_program_aspm(struct radeon_device *rdev);
69extern int r600_ih_ring_alloc(struct radeon_device *rdev); 72extern int r600_ih_ring_alloc(struct radeon_device *rdev);
70extern void r600_ih_ring_fini(struct radeon_device *rdev); 73extern void r600_ih_ring_fini(struct radeon_device *rdev);
71extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); 74extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
@@ -75,6 +78,228 @@ extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
75extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); 78extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
76extern bool evergreen_is_display_hung(struct radeon_device *rdev); 79extern bool evergreen_is_display_hung(struct radeon_device *rdev);
77 80
81static const u32 verde_rlc_save_restore_register_list[] =
82{
83 (0x8000 << 16) | (0x98f4 >> 2),
84 0x00000000,
85 (0x8040 << 16) | (0x98f4 >> 2),
86 0x00000000,
87 (0x8000 << 16) | (0xe80 >> 2),
88 0x00000000,
89 (0x8040 << 16) | (0xe80 >> 2),
90 0x00000000,
91 (0x8000 << 16) | (0x89bc >> 2),
92 0x00000000,
93 (0x8040 << 16) | (0x89bc >> 2),
94 0x00000000,
95 (0x8000 << 16) | (0x8c1c >> 2),
96 0x00000000,
97 (0x8040 << 16) | (0x8c1c >> 2),
98 0x00000000,
99 (0x9c00 << 16) | (0x98f0 >> 2),
100 0x00000000,
101 (0x9c00 << 16) | (0xe7c >> 2),
102 0x00000000,
103 (0x8000 << 16) | (0x9148 >> 2),
104 0x00000000,
105 (0x8040 << 16) | (0x9148 >> 2),
106 0x00000000,
107 (0x9c00 << 16) | (0x9150 >> 2),
108 0x00000000,
109 (0x9c00 << 16) | (0x897c >> 2),
110 0x00000000,
111 (0x9c00 << 16) | (0x8d8c >> 2),
112 0x00000000,
113 (0x9c00 << 16) | (0xac54 >> 2),
114 0X00000000,
115 0x3,
116 (0x9c00 << 16) | (0x98f8 >> 2),
117 0x00000000,
118 (0x9c00 << 16) | (0x9910 >> 2),
119 0x00000000,
120 (0x9c00 << 16) | (0x9914 >> 2),
121 0x00000000,
122 (0x9c00 << 16) | (0x9918 >> 2),
123 0x00000000,
124 (0x9c00 << 16) | (0x991c >> 2),
125 0x00000000,
126 (0x9c00 << 16) | (0x9920 >> 2),
127 0x00000000,
128 (0x9c00 << 16) | (0x9924 >> 2),
129 0x00000000,
130 (0x9c00 << 16) | (0x9928 >> 2),
131 0x00000000,
132 (0x9c00 << 16) | (0x992c >> 2),
133 0x00000000,
134 (0x9c00 << 16) | (0x9930 >> 2),
135 0x00000000,
136 (0x9c00 << 16) | (0x9934 >> 2),
137 0x00000000,
138 (0x9c00 << 16) | (0x9938 >> 2),
139 0x00000000,
140 (0x9c00 << 16) | (0x993c >> 2),
141 0x00000000,
142 (0x9c00 << 16) | (0x9940 >> 2),
143 0x00000000,
144 (0x9c00 << 16) | (0x9944 >> 2),
145 0x00000000,
146 (0x9c00 << 16) | (0x9948 >> 2),
147 0x00000000,
148 (0x9c00 << 16) | (0x994c >> 2),
149 0x00000000,
150 (0x9c00 << 16) | (0x9950 >> 2),
151 0x00000000,
152 (0x9c00 << 16) | (0x9954 >> 2),
153 0x00000000,
154 (0x9c00 << 16) | (0x9958 >> 2),
155 0x00000000,
156 (0x9c00 << 16) | (0x995c >> 2),
157 0x00000000,
158 (0x9c00 << 16) | (0x9960 >> 2),
159 0x00000000,
160 (0x9c00 << 16) | (0x9964 >> 2),
161 0x00000000,
162 (0x9c00 << 16) | (0x9968 >> 2),
163 0x00000000,
164 (0x9c00 << 16) | (0x996c >> 2),
165 0x00000000,
166 (0x9c00 << 16) | (0x9970 >> 2),
167 0x00000000,
168 (0x9c00 << 16) | (0x9974 >> 2),
169 0x00000000,
170 (0x9c00 << 16) | (0x9978 >> 2),
171 0x00000000,
172 (0x9c00 << 16) | (0x997c >> 2),
173 0x00000000,
174 (0x9c00 << 16) | (0x9980 >> 2),
175 0x00000000,
176 (0x9c00 << 16) | (0x9984 >> 2),
177 0x00000000,
178 (0x9c00 << 16) | (0x9988 >> 2),
179 0x00000000,
180 (0x9c00 << 16) | (0x998c >> 2),
181 0x00000000,
182 (0x9c00 << 16) | (0x8c00 >> 2),
183 0x00000000,
184 (0x9c00 << 16) | (0x8c14 >> 2),
185 0x00000000,
186 (0x9c00 << 16) | (0x8c04 >> 2),
187 0x00000000,
188 (0x9c00 << 16) | (0x8c08 >> 2),
189 0x00000000,
190 (0x8000 << 16) | (0x9b7c >> 2),
191 0x00000000,
192 (0x8040 << 16) | (0x9b7c >> 2),
193 0x00000000,
194 (0x8000 << 16) | (0xe84 >> 2),
195 0x00000000,
196 (0x8040 << 16) | (0xe84 >> 2),
197 0x00000000,
198 (0x8000 << 16) | (0x89c0 >> 2),
199 0x00000000,
200 (0x8040 << 16) | (0x89c0 >> 2),
201 0x00000000,
202 (0x8000 << 16) | (0x914c >> 2),
203 0x00000000,
204 (0x8040 << 16) | (0x914c >> 2),
205 0x00000000,
206 (0x8000 << 16) | (0x8c20 >> 2),
207 0x00000000,
208 (0x8040 << 16) | (0x8c20 >> 2),
209 0x00000000,
210 (0x8000 << 16) | (0x9354 >> 2),
211 0x00000000,
212 (0x8040 << 16) | (0x9354 >> 2),
213 0x00000000,
214 (0x9c00 << 16) | (0x9060 >> 2),
215 0x00000000,
216 (0x9c00 << 16) | (0x9364 >> 2),
217 0x00000000,
218 (0x9c00 << 16) | (0x9100 >> 2),
219 0x00000000,
220 (0x9c00 << 16) | (0x913c >> 2),
221 0x00000000,
222 (0x8000 << 16) | (0x90e0 >> 2),
223 0x00000000,
224 (0x8000 << 16) | (0x90e4 >> 2),
225 0x00000000,
226 (0x8000 << 16) | (0x90e8 >> 2),
227 0x00000000,
228 (0x8040 << 16) | (0x90e0 >> 2),
229 0x00000000,
230 (0x8040 << 16) | (0x90e4 >> 2),
231 0x00000000,
232 (0x8040 << 16) | (0x90e8 >> 2),
233 0x00000000,
234 (0x9c00 << 16) | (0x8bcc >> 2),
235 0x00000000,
236 (0x9c00 << 16) | (0x8b24 >> 2),
237 0x00000000,
238 (0x9c00 << 16) | (0x88c4 >> 2),
239 0x00000000,
240 (0x9c00 << 16) | (0x8e50 >> 2),
241 0x00000000,
242 (0x9c00 << 16) | (0x8c0c >> 2),
243 0x00000000,
244 (0x9c00 << 16) | (0x8e58 >> 2),
245 0x00000000,
246 (0x9c00 << 16) | (0x8e5c >> 2),
247 0x00000000,
248 (0x9c00 << 16) | (0x9508 >> 2),
249 0x00000000,
250 (0x9c00 << 16) | (0x950c >> 2),
251 0x00000000,
252 (0x9c00 << 16) | (0x9494 >> 2),
253 0x00000000,
254 (0x9c00 << 16) | (0xac0c >> 2),
255 0x00000000,
256 (0x9c00 << 16) | (0xac10 >> 2),
257 0x00000000,
258 (0x9c00 << 16) | (0xac14 >> 2),
259 0x00000000,
260 (0x9c00 << 16) | (0xae00 >> 2),
261 0x00000000,
262 (0x9c00 << 16) | (0xac08 >> 2),
263 0x00000000,
264 (0x9c00 << 16) | (0x88d4 >> 2),
265 0x00000000,
266 (0x9c00 << 16) | (0x88c8 >> 2),
267 0x00000000,
268 (0x9c00 << 16) | (0x88cc >> 2),
269 0x00000000,
270 (0x9c00 << 16) | (0x89b0 >> 2),
271 0x00000000,
272 (0x9c00 << 16) | (0x8b10 >> 2),
273 0x00000000,
274 (0x9c00 << 16) | (0x8a14 >> 2),
275 0x00000000,
276 (0x9c00 << 16) | (0x9830 >> 2),
277 0x00000000,
278 (0x9c00 << 16) | (0x9834 >> 2),
279 0x00000000,
280 (0x9c00 << 16) | (0x9838 >> 2),
281 0x00000000,
282 (0x9c00 << 16) | (0x9a10 >> 2),
283 0x00000000,
284 (0x8000 << 16) | (0x9870 >> 2),
285 0x00000000,
286 (0x8000 << 16) | (0x9874 >> 2),
287 0x00000000,
288 (0x8001 << 16) | (0x9870 >> 2),
289 0x00000000,
290 (0x8001 << 16) | (0x9874 >> 2),
291 0x00000000,
292 (0x8040 << 16) | (0x9870 >> 2),
293 0x00000000,
294 (0x8040 << 16) | (0x9874 >> 2),
295 0x00000000,
296 (0x8041 << 16) | (0x9870 >> 2),
297 0x00000000,
298 (0x8041 << 16) | (0x9874 >> 2),
299 0x00000000,
300 0x00000000
301};
302
78static const u32 tahiti_golden_rlc_registers[] = 303static const u32 tahiti_golden_rlc_registers[] =
79{ 304{
80 0xc424, 0xffffffff, 0x00601005, 305 0xc424, 0xffffffff, 0x00601005,
@@ -1320,6 +1545,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1320 const char *chip_name; 1545 const char *chip_name;
1321 const char *rlc_chip_name; 1546 const char *rlc_chip_name;
1322 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; 1547 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
1548 size_t smc_req_size;
1323 char fw_name[30]; 1549 char fw_name[30];
1324 int err; 1550 int err;
1325 1551
@@ -1341,6 +1567,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1341 ce_req_size = SI_CE_UCODE_SIZE * 4; 1567 ce_req_size = SI_CE_UCODE_SIZE * 4;
1342 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1568 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1343 mc_req_size = SI_MC_UCODE_SIZE * 4; 1569 mc_req_size = SI_MC_UCODE_SIZE * 4;
1570 smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
1344 break; 1571 break;
1345 case CHIP_PITCAIRN: 1572 case CHIP_PITCAIRN:
1346 chip_name = "PITCAIRN"; 1573 chip_name = "PITCAIRN";
@@ -1350,6 +1577,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1350 ce_req_size = SI_CE_UCODE_SIZE * 4; 1577 ce_req_size = SI_CE_UCODE_SIZE * 4;
1351 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1578 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1352 mc_req_size = SI_MC_UCODE_SIZE * 4; 1579 mc_req_size = SI_MC_UCODE_SIZE * 4;
1580 smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
1353 break; 1581 break;
1354 case CHIP_VERDE: 1582 case CHIP_VERDE:
1355 chip_name = "VERDE"; 1583 chip_name = "VERDE";
@@ -1359,6 +1587,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1359 ce_req_size = SI_CE_UCODE_SIZE * 4; 1587 ce_req_size = SI_CE_UCODE_SIZE * 4;
1360 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1588 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1361 mc_req_size = SI_MC_UCODE_SIZE * 4; 1589 mc_req_size = SI_MC_UCODE_SIZE * 4;
1590 smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
1362 break; 1591 break;
1363 case CHIP_OLAND: 1592 case CHIP_OLAND:
1364 chip_name = "OLAND"; 1593 chip_name = "OLAND";
@@ -1368,6 +1597,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1368 ce_req_size = SI_CE_UCODE_SIZE * 4; 1597 ce_req_size = SI_CE_UCODE_SIZE * 4;
1369 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1598 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1370 mc_req_size = OLAND_MC_UCODE_SIZE * 4; 1599 mc_req_size = OLAND_MC_UCODE_SIZE * 4;
1600 smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
1371 break; 1601 break;
1372 case CHIP_HAINAN: 1602 case CHIP_HAINAN:
1373 chip_name = "HAINAN"; 1603 chip_name = "HAINAN";
@@ -1377,6 +1607,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1377 ce_req_size = SI_CE_UCODE_SIZE * 4; 1607 ce_req_size = SI_CE_UCODE_SIZE * 4;
1378 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1608 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1379 mc_req_size = OLAND_MC_UCODE_SIZE * 4; 1609 mc_req_size = OLAND_MC_UCODE_SIZE * 4;
1610 smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
1380 break; 1611 break;
1381 default: BUG(); 1612 default: BUG();
1382 } 1613 }
@@ -1439,6 +1670,17 @@ static int si_init_microcode(struct radeon_device *rdev)
1439 err = -EINVAL; 1670 err = -EINVAL;
1440 } 1671 }
1441 1672
1673 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1674 err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev);
1675 if (err)
1676 goto out;
1677 if (rdev->smc_fw->size != smc_req_size) {
1678 printk(KERN_ERR
1679 "si_smc: Bogus length %zu in firmware \"%s\"\n",
1680 rdev->smc_fw->size, fw_name);
1681 err = -EINVAL;
1682 }
1683
1442out: 1684out:
1443 platform_device_unregister(pdev); 1685 platform_device_unregister(pdev);
1444 1686
@@ -1457,6 +1699,8 @@ out:
1457 rdev->rlc_fw = NULL; 1699 rdev->rlc_fw = NULL;
1458 release_firmware(rdev->mc_fw); 1700 release_firmware(rdev->mc_fw);
1459 rdev->mc_fw = NULL; 1701 rdev->mc_fw = NULL;
1702 release_firmware(rdev->smc_fw);
1703 rdev->smc_fw = NULL;
1460 } 1704 }
1461 return err; 1705 return err;
1462} 1706}
@@ -1792,7 +2036,8 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
1792 u32 lb_size, u32 num_heads) 2036 u32 lb_size, u32 num_heads)
1793{ 2037{
1794 struct drm_display_mode *mode = &radeon_crtc->base.mode; 2038 struct drm_display_mode *mode = &radeon_crtc->base.mode;
1795 struct dce6_wm_params wm; 2039 struct dce6_wm_params wm_low, wm_high;
2040 u32 dram_channels;
1796 u32 pixel_period; 2041 u32 pixel_period;
1797 u32 line_time = 0; 2042 u32 line_time = 0;
1798 u32 latency_watermark_a = 0, latency_watermark_b = 0; 2043 u32 latency_watermark_a = 0, latency_watermark_b = 0;
@@ -1808,38 +2053,83 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
1808 priority_a_cnt = 0; 2053 priority_a_cnt = 0;
1809 priority_b_cnt = 0; 2054 priority_b_cnt = 0;
1810 2055
1811 wm.yclk = rdev->pm.current_mclk * 10;
1812 wm.sclk = rdev->pm.current_sclk * 10;
1813 wm.disp_clk = mode->clock;
1814 wm.src_width = mode->crtc_hdisplay;
1815 wm.active_time = mode->crtc_hdisplay * pixel_period;
1816 wm.blank_time = line_time - wm.active_time;
1817 wm.interlaced = false;
1818 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1819 wm.interlaced = true;
1820 wm.vsc = radeon_crtc->vsc;
1821 wm.vtaps = 1;
1822 if (radeon_crtc->rmx_type != RMX_OFF)
1823 wm.vtaps = 2;
1824 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
1825 wm.lb_size = lb_size;
1826 if (rdev->family == CHIP_ARUBA) 2056 if (rdev->family == CHIP_ARUBA)
1827 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); 2057 dram_channels = evergreen_get_number_of_dram_channels(rdev);
1828 else 2058 else
1829 wm.dram_channels = si_get_number_of_dram_channels(rdev); 2059 dram_channels = si_get_number_of_dram_channels(rdev);
1830 wm.num_heads = num_heads; 2060
2061 /* watermark for high clocks */
2062 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2063 wm_high.yclk =
2064 radeon_dpm_get_mclk(rdev, false) * 10;
2065 wm_high.sclk =
2066 radeon_dpm_get_sclk(rdev, false) * 10;
2067 } else {
2068 wm_high.yclk = rdev->pm.current_mclk * 10;
2069 wm_high.sclk = rdev->pm.current_sclk * 10;
2070 }
2071
2072 wm_high.disp_clk = mode->clock;
2073 wm_high.src_width = mode->crtc_hdisplay;
2074 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
2075 wm_high.blank_time = line_time - wm_high.active_time;
2076 wm_high.interlaced = false;
2077 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2078 wm_high.interlaced = true;
2079 wm_high.vsc = radeon_crtc->vsc;
2080 wm_high.vtaps = 1;
2081 if (radeon_crtc->rmx_type != RMX_OFF)
2082 wm_high.vtaps = 2;
2083 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
2084 wm_high.lb_size = lb_size;
2085 wm_high.dram_channels = dram_channels;
2086 wm_high.num_heads = num_heads;
2087
2088 /* watermark for low clocks */
2089 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2090 wm_low.yclk =
2091 radeon_dpm_get_mclk(rdev, true) * 10;
2092 wm_low.sclk =
2093 radeon_dpm_get_sclk(rdev, true) * 10;
2094 } else {
2095 wm_low.yclk = rdev->pm.current_mclk * 10;
2096 wm_low.sclk = rdev->pm.current_sclk * 10;
2097 }
2098
2099 wm_low.disp_clk = mode->clock;
2100 wm_low.src_width = mode->crtc_hdisplay;
2101 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
2102 wm_low.blank_time = line_time - wm_low.active_time;
2103 wm_low.interlaced = false;
2104 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2105 wm_low.interlaced = true;
2106 wm_low.vsc = radeon_crtc->vsc;
2107 wm_low.vtaps = 1;
2108 if (radeon_crtc->rmx_type != RMX_OFF)
2109 wm_low.vtaps = 2;
2110 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
2111 wm_low.lb_size = lb_size;
2112 wm_low.dram_channels = dram_channels;
2113 wm_low.num_heads = num_heads;
1831 2114
1832 /* set for high clocks */ 2115 /* set for high clocks */
1833 latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535); 2116 latency_watermark_a = min(dce6_latency_watermark(&wm_high), (u32)65535);
1834 /* set for low clocks */ 2117 /* set for low clocks */
1835 /* wm.yclk = low clk; wm.sclk = low clk */ 2118 latency_watermark_b = min(dce6_latency_watermark(&wm_low), (u32)65535);
1836 latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535);
1837 2119
1838 /* possibly force display priority to high */ 2120 /* possibly force display priority to high */
1839 /* should really do this at mode validation time... */ 2121 /* should really do this at mode validation time... */
1840 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || 2122 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1841 !dce6_average_bandwidth_vs_available_bandwidth(&wm) || 2123 !dce6_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1842 !dce6_check_latency_hiding(&wm) || 2124 !dce6_check_latency_hiding(&wm_high) ||
2125 (rdev->disp_priority == 2)) {
2126 DRM_DEBUG_KMS("force priority to high\n");
2127 priority_a_cnt |= PRIORITY_ALWAYS_ON;
2128 priority_b_cnt |= PRIORITY_ALWAYS_ON;
2129 }
2130 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
2131 !dce6_average_bandwidth_vs_available_bandwidth(&wm_low) ||
2132 !dce6_check_latency_hiding(&wm_low) ||
1843 (rdev->disp_priority == 2)) { 2133 (rdev->disp_priority == 2)) {
1844 DRM_DEBUG_KMS("force priority to high\n"); 2134 DRM_DEBUG_KMS("force priority to high\n");
1845 priority_a_cnt |= PRIORITY_ALWAYS_ON; 2135 priority_a_cnt |= PRIORITY_ALWAYS_ON;
@@ -1895,6 +2185,10 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
1895 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); 2185 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
1896 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); 2186 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
1897 2187
2188 /* save values for DPM */
2189 radeon_crtc->line_time = line_time;
2190 radeon_crtc->wm_high = latency_watermark_a;
2191 radeon_crtc->wm_low = latency_watermark_b;
1898} 2192}
1899 2193
1900void dce6_bandwidth_update(struct radeon_device *rdev) 2194void dce6_bandwidth_update(struct radeon_device *rdev)
@@ -3535,8 +3829,8 @@ static void si_mc_program(struct radeon_device *rdev)
3535 } 3829 }
3536} 3830}
3537 3831
3538static void si_vram_gtt_location(struct radeon_device *rdev, 3832void si_vram_gtt_location(struct radeon_device *rdev,
3539 struct radeon_mc *mc) 3833 struct radeon_mc *mc)
3540{ 3834{
3541 if (mc->mc_vram_size > 0xFFC0000000ULL) { 3835 if (mc->mc_vram_size > 0xFFC0000000ULL) {
3542 /* leave room for at least 1024M GTT */ 3836 /* leave room for at least 1024M GTT */
@@ -4282,6 +4576,450 @@ void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4282} 4576}
4283 4577
4284/* 4578/*
4579 * Power and clock gating
4580 */
4581static void si_wait_for_rlc_serdes(struct radeon_device *rdev)
4582{
4583 int i;
4584
4585 for (i = 0; i < rdev->usec_timeout; i++) {
4586 if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
4587 break;
4588 udelay(1);
4589 }
4590
4591 for (i = 0; i < rdev->usec_timeout; i++) {
4592 if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
4593 break;
4594 udelay(1);
4595 }
4596}
4597
4598static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
4599 bool enable)
4600{
4601 u32 tmp = RREG32(CP_INT_CNTL_RING0);
4602 u32 mask;
4603 int i;
4604
4605 if (enable)
4606 tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4607 else
4608 tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4609 WREG32(CP_INT_CNTL_RING0, tmp);
4610
4611 if (!enable) {
4612 /* read a gfx register */
4613 tmp = RREG32(DB_DEPTH_INFO);
4614
4615 mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
4616 for (i = 0; i < rdev->usec_timeout; i++) {
4617 if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
4618 break;
4619 udelay(1);
4620 }
4621 }
4622}
4623
4624static void si_set_uvd_dcm(struct radeon_device *rdev,
4625 bool sw_mode)
4626{
4627 u32 tmp, tmp2;
4628
4629 tmp = RREG32(UVD_CGC_CTRL);
4630 tmp &= ~(CLK_OD_MASK | CG_DT_MASK);
4631 tmp |= DCM | CG_DT(1) | CLK_OD(4);
4632
4633 if (sw_mode) {
4634 tmp &= ~0x7ffff800;
4635 tmp2 = DYN_OR_EN | DYN_RR_EN | G_DIV_ID(7);
4636 } else {
4637 tmp |= 0x7ffff800;
4638 tmp2 = 0;
4639 }
4640
4641 WREG32(UVD_CGC_CTRL, tmp);
4642 WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
4643}
4644
4645static void si_init_uvd_internal_cg(struct radeon_device *rdev)
4646{
4647 bool hw_mode = true;
4648
4649 if (hw_mode) {
4650 si_set_uvd_dcm(rdev, false);
4651 } else {
4652 u32 tmp = RREG32(UVD_CGC_CTRL);
4653 tmp &= ~DCM;
4654 WREG32(UVD_CGC_CTRL, tmp);
4655 }
4656}
4657
4658static u32 si_halt_rlc(struct radeon_device *rdev)
4659{
4660 u32 data, orig;
4661
4662 orig = data = RREG32(RLC_CNTL);
4663
4664 if (data & RLC_ENABLE) {
4665 data &= ~RLC_ENABLE;
4666 WREG32(RLC_CNTL, data);
4667
4668 si_wait_for_rlc_serdes(rdev);
4669 }
4670
4671 return orig;
4672}
4673
4674static void si_update_rlc(struct radeon_device *rdev, u32 rlc)
4675{
4676 u32 tmp;
4677
4678 tmp = RREG32(RLC_CNTL);
4679 if (tmp != rlc)
4680 WREG32(RLC_CNTL, rlc);
4681}
4682
4683static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
4684{
4685 u32 data, orig;
4686
4687 orig = data = RREG32(DMA_PG);
4688 if (enable)
4689 data |= PG_CNTL_ENABLE;
4690 else
4691 data &= ~PG_CNTL_ENABLE;
4692 if (orig != data)
4693 WREG32(DMA_PG, data);
4694}
4695
4696static void si_init_dma_pg(struct radeon_device *rdev)
4697{
4698 u32 tmp;
4699
4700 WREG32(DMA_PGFSM_WRITE, 0x00002000);
4701 WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
4702
4703 for (tmp = 0; tmp < 5; tmp++)
4704 WREG32(DMA_PGFSM_WRITE, 0);
4705}
4706
4707static void si_enable_gfx_cgpg(struct radeon_device *rdev,
4708 bool enable)
4709{
4710 u32 tmp;
4711
4712 if (enable) {
4713 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
4714 WREG32(RLC_TTOP_D, tmp);
4715
4716 tmp = RREG32(RLC_PG_CNTL);
4717 tmp |= GFX_PG_ENABLE;
4718 WREG32(RLC_PG_CNTL, tmp);
4719
4720 tmp = RREG32(RLC_AUTO_PG_CTRL);
4721 tmp |= AUTO_PG_EN;
4722 WREG32(RLC_AUTO_PG_CTRL, tmp);
4723 } else {
4724 tmp = RREG32(RLC_AUTO_PG_CTRL);
4725 tmp &= ~AUTO_PG_EN;
4726 WREG32(RLC_AUTO_PG_CTRL, tmp);
4727
4728 tmp = RREG32(DB_RENDER_CONTROL);
4729 }
4730}
4731
4732static void si_init_gfx_cgpg(struct radeon_device *rdev)
4733{
4734 u32 tmp;
4735
4736 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
4737
4738 tmp = RREG32(RLC_PG_CNTL);
4739 tmp |= GFX_PG_SRC;
4740 WREG32(RLC_PG_CNTL, tmp);
4741
4742 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
4743
4744 tmp = RREG32(RLC_AUTO_PG_CTRL);
4745
4746 tmp &= ~GRBM_REG_SGIT_MASK;
4747 tmp |= GRBM_REG_SGIT(0x700);
4748 tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
4749 WREG32(RLC_AUTO_PG_CTRL, tmp);
4750}
4751
4752static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
4753{
4754 u32 mask = 0, tmp, tmp1;
4755 int i;
4756
4757 si_select_se_sh(rdev, se, sh);
4758 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
4759 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
4760 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
4761
4762 tmp &= 0xffff0000;
4763
4764 tmp |= tmp1;
4765 tmp >>= 16;
4766
4767 for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
4768 mask <<= 1;
4769 mask |= 1;
4770 }
4771
4772 return (~tmp) & mask;
4773}
4774
4775static void si_init_ao_cu_mask(struct radeon_device *rdev)
4776{
4777 u32 i, j, k, active_cu_number = 0;
4778 u32 mask, counter, cu_bitmap;
4779 u32 tmp = 0;
4780
4781 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
4782 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
4783 mask = 1;
4784 cu_bitmap = 0;
4785 counter = 0;
4786 for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
4787 if (si_get_cu_active_bitmap(rdev, i, j) & mask) {
4788 if (counter < 2)
4789 cu_bitmap |= mask;
4790 counter++;
4791 }
4792 mask <<= 1;
4793 }
4794
4795 active_cu_number += counter;
4796 tmp |= (cu_bitmap << (i * 16 + j * 8));
4797 }
4798 }
4799
4800 WREG32(RLC_PG_AO_CU_MASK, tmp);
4801
4802 tmp = RREG32(RLC_MAX_PG_CU);
4803 tmp &= ~MAX_PU_CU_MASK;
4804 tmp |= MAX_PU_CU(active_cu_number);
4805 WREG32(RLC_MAX_PG_CU, tmp);
4806}
4807
4808static void si_enable_cgcg(struct radeon_device *rdev,
4809 bool enable)
4810{
4811 u32 data, orig, tmp;
4812
4813 orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
4814
4815 si_enable_gui_idle_interrupt(rdev, enable);
4816
4817 if (enable) {
4818 WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
4819
4820 tmp = si_halt_rlc(rdev);
4821
4822 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
4823 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
4824 WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
4825
4826 si_wait_for_rlc_serdes(rdev);
4827
4828 si_update_rlc(rdev, tmp);
4829
4830 WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
4831
4832 data |= CGCG_EN | CGLS_EN;
4833 } else {
4834 RREG32(CB_CGTT_SCLK_CTRL);
4835 RREG32(CB_CGTT_SCLK_CTRL);
4836 RREG32(CB_CGTT_SCLK_CTRL);
4837 RREG32(CB_CGTT_SCLK_CTRL);
4838
4839 data &= ~(CGCG_EN | CGLS_EN);
4840 }
4841
4842 if (orig != data)
4843 WREG32(RLC_CGCG_CGLS_CTRL, data);
4844}
4845
4846static void si_enable_mgcg(struct radeon_device *rdev,
4847 bool enable)
4848{
4849 u32 data, orig, tmp = 0;
4850
4851 if (enable) {
4852 orig = data = RREG32(CGTS_SM_CTRL_REG);
4853 data = 0x96940200;
4854 if (orig != data)
4855 WREG32(CGTS_SM_CTRL_REG, data);
4856
4857 orig = data = RREG32(CP_MEM_SLP_CNTL);
4858 data |= CP_MEM_LS_EN;
4859 if (orig != data)
4860 WREG32(CP_MEM_SLP_CNTL, data);
4861
4862 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
4863 data &= 0xffffffc0;
4864 if (orig != data)
4865 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
4866
4867 tmp = si_halt_rlc(rdev);
4868
4869 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
4870 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
4871 WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
4872
4873 si_update_rlc(rdev, tmp);
4874 } else {
4875 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
4876 data |= 0x00000003;
4877 if (orig != data)
4878 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
4879
4880 data = RREG32(CP_MEM_SLP_CNTL);
4881 if (data & CP_MEM_LS_EN) {
4882 data &= ~CP_MEM_LS_EN;
4883 WREG32(CP_MEM_SLP_CNTL, data);
4884 }
4885 orig = data = RREG32(CGTS_SM_CTRL_REG);
4886 data |= LS_OVERRIDE | OVERRIDE;
4887 if (orig != data)
4888 WREG32(CGTS_SM_CTRL_REG, data);
4889
4890 tmp = si_halt_rlc(rdev);
4891
4892 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
4893 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
4894 WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
4895
4896 si_update_rlc(rdev, tmp);
4897 }
4898}
4899
4900static void si_enable_uvd_mgcg(struct radeon_device *rdev,
4901 bool enable)
4902{
4903 u32 orig, data, tmp;
4904
4905 if (enable) {
4906 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
4907 tmp |= 0x3fff;
4908 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
4909
4910 orig = data = RREG32(UVD_CGC_CTRL);
4911 data |= DCM;
4912 if (orig != data)
4913 WREG32(UVD_CGC_CTRL, data);
4914
4915 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0);
4916 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0);
4917 } else {
4918 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
4919 tmp &= ~0x3fff;
4920 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
4921
4922 orig = data = RREG32(UVD_CGC_CTRL);
4923 data &= ~DCM;
4924 if (orig != data)
4925 WREG32(UVD_CGC_CTRL, data);
4926
4927 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0xffffffff);
4928 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0xffffffff);
4929 }
4930}
4931
4932static const u32 mc_cg_registers[] =
4933{
4934 MC_HUB_MISC_HUB_CG,
4935 MC_HUB_MISC_SIP_CG,
4936 MC_HUB_MISC_VM_CG,
4937 MC_XPB_CLK_GAT,
4938 ATC_MISC_CG,
4939 MC_CITF_MISC_WR_CG,
4940 MC_CITF_MISC_RD_CG,
4941 MC_CITF_MISC_VM_CG,
4942 VM_L2_CG,
4943};
4944
4945static void si_enable_mc_ls(struct radeon_device *rdev,
4946 bool enable)
4947{
4948 int i;
4949 u32 orig, data;
4950
4951 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
4952 orig = data = RREG32(mc_cg_registers[i]);
4953 if (enable)
4954 data |= MC_LS_ENABLE;
4955 else
4956 data &= ~MC_LS_ENABLE;
4957 if (data != orig)
4958 WREG32(mc_cg_registers[i], data);
4959 }
4960}
4961
4962
4963static void si_init_cg(struct radeon_device *rdev)
4964{
4965 bool has_uvd = true;
4966
4967 si_enable_mgcg(rdev, true);
4968 si_enable_cgcg(rdev, true);
4969 /* disable MC LS on Tahiti */
4970 if (rdev->family == CHIP_TAHITI)
4971 si_enable_mc_ls(rdev, false);
4972 if (has_uvd) {
4973 si_enable_uvd_mgcg(rdev, true);
4974 si_init_uvd_internal_cg(rdev);
4975 }
4976}
4977
4978static void si_fini_cg(struct radeon_device *rdev)
4979{
4980 bool has_uvd = true;
4981
4982 if (has_uvd)
4983 si_enable_uvd_mgcg(rdev, false);
4984 si_enable_cgcg(rdev, false);
4985 si_enable_mgcg(rdev, false);
4986}
4987
4988static void si_init_pg(struct radeon_device *rdev)
4989{
4990 bool has_pg = false;
4991
4992 /* only cape verde supports PG */
4993 if (rdev->family == CHIP_VERDE)
4994 has_pg = true;
4995
4996 if (has_pg) {
4997 si_init_ao_cu_mask(rdev);
4998 si_init_dma_pg(rdev);
4999 si_enable_dma_pg(rdev, true);
5000 si_init_gfx_cgpg(rdev);
5001 si_enable_gfx_cgpg(rdev, true);
5002 } else {
5003 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5004 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5005 }
5006}
5007
5008static void si_fini_pg(struct radeon_device *rdev)
5009{
5010 bool has_pg = false;
5011
5012 /* only cape verde supports PG */
5013 if (rdev->family == CHIP_VERDE)
5014 has_pg = true;
5015
5016 if (has_pg) {
5017 si_enable_dma_pg(rdev, false);
5018 si_enable_gfx_cgpg(rdev, false);
5019 }
5020}
5021
5022/*
4285 * RLC 5023 * RLC
4286 */ 5024 */
4287void si_rlc_fini(struct radeon_device *rdev) 5025void si_rlc_fini(struct radeon_device *rdev)
@@ -4313,8 +5051,15 @@ void si_rlc_fini(struct radeon_device *rdev)
4313 } 5051 }
4314} 5052}
4315 5053
5054#define RLC_CLEAR_STATE_END_MARKER 0x00000001
5055
4316int si_rlc_init(struct radeon_device *rdev) 5056int si_rlc_init(struct radeon_device *rdev)
4317{ 5057{
5058 volatile u32 *dst_ptr;
5059 u32 dws, data, i, j, k, reg_num;
5060 u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index;
5061 u64 reg_list_mc_addr;
5062 const struct cs_section_def *cs_data = si_cs_data;
4318 int r; 5063 int r;
4319 5064
4320 /* save restore block */ 5065 /* save restore block */
@@ -4335,18 +5080,44 @@ int si_rlc_init(struct radeon_device *rdev)
4335 } 5080 }
4336 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, 5081 r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
4337 &rdev->rlc.save_restore_gpu_addr); 5082 &rdev->rlc.save_restore_gpu_addr);
4338 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
4339 if (r) { 5083 if (r) {
5084 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
4340 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); 5085 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
4341 si_rlc_fini(rdev); 5086 si_rlc_fini(rdev);
4342 return r; 5087 return r;
4343 } 5088 }
4344 5089
5090 if (rdev->family == CHIP_VERDE) {
5091 r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
5092 if (r) {
5093 dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
5094 si_rlc_fini(rdev);
5095 return r;
5096 }
5097 /* write the sr buffer */
5098 dst_ptr = rdev->rlc.sr_ptr;
5099 for (i = 0; i < ARRAY_SIZE(verde_rlc_save_restore_register_list); i++) {
5100 dst_ptr[i] = verde_rlc_save_restore_register_list[i];
5101 }
5102 radeon_bo_kunmap(rdev->rlc.save_restore_obj);
5103 }
5104 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
5105
4345 /* clear state block */ 5106 /* clear state block */
5107 reg_list_num = 0;
5108 dws = 0;
5109 for (i = 0; cs_data[i].section != NULL; i++) {
5110 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
5111 reg_list_num++;
5112 dws += cs_data[i].section[j].reg_count;
5113 }
5114 }
5115 reg_list_blk_index = (3 * reg_list_num + 2);
5116 dws += reg_list_blk_index;
5117
4346 if (rdev->rlc.clear_state_obj == NULL) { 5118 if (rdev->rlc.clear_state_obj == NULL) {
4347 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 5119 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
4348 RADEON_GEM_DOMAIN_VRAM, NULL, 5120 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
4349 &rdev->rlc.clear_state_obj);
4350 if (r) { 5121 if (r) {
4351 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); 5122 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
4352 si_rlc_fini(rdev); 5123 si_rlc_fini(rdev);
@@ -4360,24 +5131,113 @@ int si_rlc_init(struct radeon_device *rdev)
4360 } 5131 }
4361 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, 5132 r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
4362 &rdev->rlc.clear_state_gpu_addr); 5133 &rdev->rlc.clear_state_gpu_addr);
4363 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4364 if (r) { 5134 if (r) {
5135
5136 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4365 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); 5137 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
4366 si_rlc_fini(rdev); 5138 si_rlc_fini(rdev);
4367 return r; 5139 return r;
4368 } 5140 }
5141 r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
5142 if (r) {
5143 dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
5144 si_rlc_fini(rdev);
5145 return r;
5146 }
5147 /* set up the cs buffer */
5148 dst_ptr = rdev->rlc.cs_ptr;
5149 reg_list_hdr_blk_index = 0;
5150 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
5151 data = upper_32_bits(reg_list_mc_addr);
5152 dst_ptr[reg_list_hdr_blk_index] = data;
5153 reg_list_hdr_blk_index++;
5154 for (i = 0; cs_data[i].section != NULL; i++) {
5155 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
5156 reg_num = cs_data[i].section[j].reg_count;
5157 data = reg_list_mc_addr & 0xffffffff;
5158 dst_ptr[reg_list_hdr_blk_index] = data;
5159 reg_list_hdr_blk_index++;
5160
5161 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
5162 dst_ptr[reg_list_hdr_blk_index] = data;
5163 reg_list_hdr_blk_index++;
5164
5165 data = 0x08000000 | (reg_num * 4);
5166 dst_ptr[reg_list_hdr_blk_index] = data;
5167 reg_list_hdr_blk_index++;
5168
5169 for (k = 0; k < reg_num; k++) {
5170 data = cs_data[i].section[j].extent[k];
5171 dst_ptr[reg_list_blk_index + k] = data;
5172 }
5173 reg_list_mc_addr += reg_num * 4;
5174 reg_list_blk_index += reg_num;
5175 }
5176 }
5177 dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
5178
5179 radeon_bo_kunmap(rdev->rlc.clear_state_obj);
5180 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4369 5181
4370 return 0; 5182 return 0;
4371} 5183}
4372 5184
5185static void si_rlc_reset(struct radeon_device *rdev)
5186{
5187 u32 tmp = RREG32(GRBM_SOFT_RESET);
5188
5189 tmp |= SOFT_RESET_RLC;
5190 WREG32(GRBM_SOFT_RESET, tmp);
5191 udelay(50);
5192 tmp &= ~SOFT_RESET_RLC;
5193 WREG32(GRBM_SOFT_RESET, tmp);
5194 udelay(50);
5195}
5196
4373static void si_rlc_stop(struct radeon_device *rdev) 5197static void si_rlc_stop(struct radeon_device *rdev)
4374{ 5198{
4375 WREG32(RLC_CNTL, 0); 5199 WREG32(RLC_CNTL, 0);
5200
5201 si_enable_gui_idle_interrupt(rdev, false);
5202
5203 si_wait_for_rlc_serdes(rdev);
4376} 5204}
4377 5205
4378static void si_rlc_start(struct radeon_device *rdev) 5206static void si_rlc_start(struct radeon_device *rdev)
4379{ 5207{
4380 WREG32(RLC_CNTL, RLC_ENABLE); 5208 WREG32(RLC_CNTL, RLC_ENABLE);
5209
5210 si_enable_gui_idle_interrupt(rdev, true);
5211
5212 udelay(50);
5213}
5214
5215static bool si_lbpw_supported(struct radeon_device *rdev)
5216{
5217 u32 tmp;
5218
5219 /* Enable LBPW only for DDR3 */
5220 tmp = RREG32(MC_SEQ_MISC0);
5221 if ((tmp & 0xF0000000) == 0xB0000000)
5222 return true;
5223 return false;
5224}
5225
5226static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
5227{
5228 u32 tmp;
5229
5230 tmp = RREG32(RLC_LB_CNTL);
5231 if (enable)
5232 tmp |= LOAD_BALANCE_ENABLE;
5233 else
5234 tmp &= ~LOAD_BALANCE_ENABLE;
5235 WREG32(RLC_LB_CNTL, tmp);
5236
5237 if (!enable) {
5238 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5239 WREG32(SPI_LB_CU_MASK, 0x00ff);
5240 }
4381} 5241}
4382 5242
4383static int si_rlc_resume(struct radeon_device *rdev) 5243static int si_rlc_resume(struct radeon_device *rdev)
@@ -4390,14 +5250,18 @@ static int si_rlc_resume(struct radeon_device *rdev)
4390 5250
4391 si_rlc_stop(rdev); 5251 si_rlc_stop(rdev);
4392 5252
5253 si_rlc_reset(rdev);
5254
5255 si_init_pg(rdev);
5256
5257 si_init_cg(rdev);
5258
4393 WREG32(RLC_RL_BASE, 0); 5259 WREG32(RLC_RL_BASE, 0);
4394 WREG32(RLC_RL_SIZE, 0); 5260 WREG32(RLC_RL_SIZE, 0);
4395 WREG32(RLC_LB_CNTL, 0); 5261 WREG32(RLC_LB_CNTL, 0);
4396 WREG32(RLC_LB_CNTR_MAX, 0xffffffff); 5262 WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
4397 WREG32(RLC_LB_CNTR_INIT, 0); 5263 WREG32(RLC_LB_CNTR_INIT, 0);
4398 5264 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
4399 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
4400 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
4401 5265
4402 WREG32(RLC_MC_CNTL, 0); 5266 WREG32(RLC_MC_CNTL, 0);
4403 WREG32(RLC_UCODE_CNTL, 0); 5267 WREG32(RLC_UCODE_CNTL, 0);
@@ -4409,6 +5273,8 @@ static int si_rlc_resume(struct radeon_device *rdev)
4409 } 5273 }
4410 WREG32(RLC_UCODE_ADDR, 0); 5274 WREG32(RLC_UCODE_ADDR, 0);
4411 5275
5276 si_enable_lbpw(rdev, si_lbpw_supported(rdev));
5277
4412 si_rlc_start(rdev); 5278 si_rlc_start(rdev);
4413 5279
4414 return 0; 5280 return 0;
@@ -4578,6 +5444,7 @@ int si_irq_set(struct radeon_device *rdev)
4578 u32 grbm_int_cntl = 0; 5444 u32 grbm_int_cntl = 0;
4579 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 5445 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
4580 u32 dma_cntl, dma_cntl1; 5446 u32 dma_cntl, dma_cntl1;
5447 u32 thermal_int = 0;
4581 5448
4582 if (!rdev->irq.installed) { 5449 if (!rdev->irq.installed) {
4583 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 5450 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -4603,6 +5470,9 @@ int si_irq_set(struct radeon_device *rdev)
4603 dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 5470 dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
4604 dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; 5471 dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
4605 5472
5473 thermal_int = RREG32(CG_THERMAL_INT) &
5474 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
5475
4606 /* enable CP interrupts on all rings */ 5476 /* enable CP interrupts on all rings */
4607 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 5477 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
4608 DRM_DEBUG("si_irq_set: sw int gfx\n"); 5478 DRM_DEBUG("si_irq_set: sw int gfx\n");
@@ -4689,6 +5559,11 @@ int si_irq_set(struct radeon_device *rdev)
4689 5559
4690 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 5560 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
4691 5561
5562 if (rdev->irq.dpm_thermal) {
5563 DRM_DEBUG("dpm thermal\n");
5564 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
5565 }
5566
4692 if (rdev->num_crtc >= 2) { 5567 if (rdev->num_crtc >= 2) {
4693 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); 5568 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
4694 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); 5569 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
@@ -4724,6 +5599,8 @@ int si_irq_set(struct radeon_device *rdev)
4724 WREG32(DC_HPD6_INT_CONTROL, hpd6); 5599 WREG32(DC_HPD6_INT_CONTROL, hpd6);
4725 } 5600 }
4726 5601
5602 WREG32(CG_THERMAL_INT, thermal_int);
5603
4727 return 0; 5604 return 0;
4728} 5605}
4729 5606
@@ -4888,6 +5765,7 @@ int si_irq_process(struct radeon_device *rdev)
4888 u32 src_id, src_data, ring_id; 5765 u32 src_id, src_data, ring_id;
4889 u32 ring_index; 5766 u32 ring_index;
4890 bool queue_hotplug = false; 5767 bool queue_hotplug = false;
5768 bool queue_thermal = false;
4891 5769
4892 if (!rdev->ih.enabled || rdev->shutdown) 5770 if (!rdev->ih.enabled || rdev->shutdown)
4893 return IRQ_NONE; 5771 return IRQ_NONE;
@@ -5158,6 +6036,16 @@ restart_ih:
5158 DRM_DEBUG("IH: DMA trap\n"); 6036 DRM_DEBUG("IH: DMA trap\n");
5159 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); 6037 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
5160 break; 6038 break;
6039 case 230: /* thermal low to high */
6040 DRM_DEBUG("IH: thermal low to high\n");
6041 rdev->pm.dpm.thermal.high_to_low = false;
6042 queue_thermal = true;
6043 break;
6044 case 231: /* thermal high to low */
6045 DRM_DEBUG("IH: thermal high to low\n");
6046 rdev->pm.dpm.thermal.high_to_low = true;
6047 queue_thermal = true;
6048 break;
5161 case 233: /* GUI IDLE */ 6049 case 233: /* GUI IDLE */
5162 DRM_DEBUG("IH: GUI idle\n"); 6050 DRM_DEBUG("IH: GUI idle\n");
5163 break; 6051 break;
@@ -5176,6 +6064,8 @@ restart_ih:
5176 } 6064 }
5177 if (queue_hotplug) 6065 if (queue_hotplug)
5178 schedule_work(&rdev->hotplug_work); 6066 schedule_work(&rdev->hotplug_work);
6067 if (queue_thermal && rdev->pm.dpm_enabled)
6068 schedule_work(&rdev->pm.dpm.thermal.work);
5179 rdev->ih.rptr = rptr; 6069 rdev->ih.rptr = rptr;
5180 WREG32(IH_RB_RPTR, rdev->ih.rptr); 6070 WREG32(IH_RB_RPTR, rdev->ih.rptr);
5181 atomic_set(&rdev->ih.lock, 0); 6071 atomic_set(&rdev->ih.lock, 0);
@@ -5270,6 +6160,11 @@ static int si_startup(struct radeon_device *rdev)
5270 struct radeon_ring *ring; 6160 struct radeon_ring *ring;
5271 int r; 6161 int r;
5272 6162
6163 /* enable pcie gen2/3 link */
6164 si_pcie_gen3_enable(rdev);
6165 /* enable aspm */
6166 si_program_aspm(rdev);
6167
5273 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || 6168 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
5274 !rdev->rlc_fw || !rdev->mc_fw) { 6169 !rdev->rlc_fw || !rdev->mc_fw) {
5275 r = si_init_microcode(rdev); 6170 r = si_init_microcode(rdev);
@@ -5609,6 +6504,8 @@ void si_fini(struct radeon_device *rdev)
5609 cayman_dma_fini(rdev); 6504 cayman_dma_fini(rdev);
5610 si_irq_fini(rdev); 6505 si_irq_fini(rdev);
5611 si_rlc_fini(rdev); 6506 si_rlc_fini(rdev);
6507 si_fini_cg(rdev);
6508 si_fini_pg(rdev);
5612 radeon_wb_fini(rdev); 6509 radeon_wb_fini(rdev);
5613 radeon_vm_manager_fini(rdev); 6510 radeon_vm_manager_fini(rdev);
5614 radeon_ib_pool_fini(rdev); 6511 radeon_ib_pool_fini(rdev);
@@ -5735,3 +6632,361 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
5735 6632
5736 return 0; 6633 return 0;
5737} 6634}
6635
6636static void si_pcie_gen3_enable(struct radeon_device *rdev)
6637{
6638 struct pci_dev *root = rdev->pdev->bus->self;
6639 int bridge_pos, gpu_pos;
6640 u32 speed_cntl, mask, current_data_rate;
6641 int ret, i;
6642 u16 tmp16;
6643
6644 if (radeon_pcie_gen2 == 0)
6645 return;
6646
6647 if (rdev->flags & RADEON_IS_IGP)
6648 return;
6649
6650 if (!(rdev->flags & RADEON_IS_PCIE))
6651 return;
6652
6653 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
6654 if (ret != 0)
6655 return;
6656
6657 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
6658 return;
6659
6660 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
6661 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
6662 LC_CURRENT_DATA_RATE_SHIFT;
6663 if (mask & DRM_PCIE_SPEED_80) {
6664 if (current_data_rate == 2) {
6665 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
6666 return;
6667 }
6668 DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
6669 } else if (mask & DRM_PCIE_SPEED_50) {
6670 if (current_data_rate == 1) {
6671 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
6672 return;
6673 }
6674 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
6675 }
6676
6677 bridge_pos = pci_pcie_cap(root);
6678 if (!bridge_pos)
6679 return;
6680
6681 gpu_pos = pci_pcie_cap(rdev->pdev);
6682 if (!gpu_pos)
6683 return;
6684
6685 if (mask & DRM_PCIE_SPEED_80) {
6686 /* re-try equalization if gen3 is not already enabled */
6687 if (current_data_rate != 2) {
6688 u16 bridge_cfg, gpu_cfg;
6689 u16 bridge_cfg2, gpu_cfg2;
6690 u32 max_lw, current_lw, tmp;
6691
6692 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
6693 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
6694
6695 tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
6696 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
6697
6698 tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
6699 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
6700
6701 tmp = RREG32_PCIE(PCIE_LC_STATUS1);
6702 max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
6703 current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
6704
6705 if (current_lw < max_lw) {
6706 tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
6707 if (tmp & LC_RENEGOTIATION_SUPPORT) {
6708 tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
6709 tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
6710 tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
6711 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
6712 }
6713 }
6714
6715 for (i = 0; i < 10; i++) {
6716 /* check status */
6717 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
6718 if (tmp16 & PCI_EXP_DEVSTA_TRPND)
6719 break;
6720
6721 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
6722 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
6723
6724 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
6725 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
6726
6727 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
6728 tmp |= LC_SET_QUIESCE;
6729 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
6730
6731 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
6732 tmp |= LC_REDO_EQ;
6733 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
6734
6735 mdelay(100);
6736
6737 /* linkctl */
6738 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
6739 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
6740 tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
6741 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
6742
6743 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
6744 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
6745 tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
6746 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
6747
6748 /* linkctl2 */
6749 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
6750 tmp16 &= ~((1 << 4) | (7 << 9));
6751 tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
6752 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
6753
6754 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
6755 tmp16 &= ~((1 << 4) | (7 << 9));
6756 tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
6757 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
6758
6759 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
6760 tmp &= ~LC_SET_QUIESCE;
6761 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
6762 }
6763 }
6764 }
6765
6766 /* set the link speed */
6767 speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
6768 speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
6769 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
6770
6771 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
6772 tmp16 &= ~0xf;
6773 if (mask & DRM_PCIE_SPEED_80)
6774 tmp16 |= 3; /* gen3 */
6775 else if (mask & DRM_PCIE_SPEED_50)
6776 tmp16 |= 2; /* gen2 */
6777 else
6778 tmp16 |= 1; /* gen1 */
6779 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
6780
6781 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
6782 speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
6783 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
6784
6785 for (i = 0; i < rdev->usec_timeout; i++) {
6786 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
6787 if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
6788 break;
6789 udelay(1);
6790 }
6791}
6792
6793static void si_program_aspm(struct radeon_device *rdev)
6794{
6795 u32 data, orig;
6796 bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
6797 bool disable_clkreq = false;
6798
6799 if (!(rdev->flags & RADEON_IS_PCIE))
6800 return;
6801
6802 orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
6803 data &= ~LC_XMIT_N_FTS_MASK;
6804 data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
6805 if (orig != data)
6806 WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
6807
6808 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
6809 data |= LC_GO_TO_RECOVERY;
6810 if (orig != data)
6811 WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
6812
6813 orig = data = RREG32_PCIE(PCIE_P_CNTL);
6814 data |= P_IGNORE_EDB_ERR;
6815 if (orig != data)
6816 WREG32_PCIE(PCIE_P_CNTL, data);
6817
6818 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
6819 data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
6820 data |= LC_PMI_TO_L1_DIS;
6821 if (!disable_l0s)
6822 data |= LC_L0S_INACTIVITY(7);
6823
6824 if (!disable_l1) {
6825 data |= LC_L1_INACTIVITY(7);
6826 data &= ~LC_PMI_TO_L1_DIS;
6827 if (orig != data)
6828 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
6829
6830 if (!disable_plloff_in_l1) {
6831 bool clk_req_support;
6832
6833 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
6834 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
6835 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
6836 if (orig != data)
6837 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
6838
6839 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
6840 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
6841 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
6842 if (orig != data)
6843 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
6844
6845 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
6846 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
6847 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
6848 if (orig != data)
6849 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
6850
6851 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
6852 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
6853 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
6854 if (orig != data)
6855 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
6856
6857 if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
6858 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
6859 data &= ~PLL_RAMP_UP_TIME_0_MASK;
6860 if (orig != data)
6861 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
6862
6863 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
6864 data &= ~PLL_RAMP_UP_TIME_1_MASK;
6865 if (orig != data)
6866 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
6867
6868 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2);
6869 data &= ~PLL_RAMP_UP_TIME_2_MASK;
6870 if (orig != data)
6871 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2, data);
6872
6873 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3);
6874 data &= ~PLL_RAMP_UP_TIME_3_MASK;
6875 if (orig != data)
6876 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3, data);
6877
6878 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
6879 data &= ~PLL_RAMP_UP_TIME_0_MASK;
6880 if (orig != data)
6881 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
6882
6883 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
6884 data &= ~PLL_RAMP_UP_TIME_1_MASK;
6885 if (orig != data)
6886 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
6887
6888 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2);
6889 data &= ~PLL_RAMP_UP_TIME_2_MASK;
6890 if (orig != data)
6891 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2, data);
6892
6893 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3);
6894 data &= ~PLL_RAMP_UP_TIME_3_MASK;
6895 if (orig != data)
6896 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3, data);
6897 }
6898 orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
6899 data &= ~LC_DYN_LANES_PWR_STATE_MASK;
6900 data |= LC_DYN_LANES_PWR_STATE(3);
6901 if (orig != data)
6902 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
6903
6904 orig = data = RREG32_PIF_PHY0(PB0_PIF_CNTL);
6905 data &= ~LS2_EXIT_TIME_MASK;
6906 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
6907 data |= LS2_EXIT_TIME(5);
6908 if (orig != data)
6909 WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
6910
6911 orig = data = RREG32_PIF_PHY1(PB1_PIF_CNTL);
6912 data &= ~LS2_EXIT_TIME_MASK;
6913 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
6914 data |= LS2_EXIT_TIME(5);
6915 if (orig != data)
6916 WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
6917
6918 if (!disable_clkreq) {
6919 struct pci_dev *root = rdev->pdev->bus->self;
6920 u32 lnkcap;
6921
6922 clk_req_support = false;
6923 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
6924 if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
6925 clk_req_support = true;
6926 } else {
6927 clk_req_support = false;
6928 }
6929
6930 if (clk_req_support) {
6931 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
6932 data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
6933 if (orig != data)
6934 WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
6935
6936 orig = data = RREG32(THM_CLK_CNTL);
6937 data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
6938 data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
6939 if (orig != data)
6940 WREG32(THM_CLK_CNTL, data);
6941
6942 orig = data = RREG32(MISC_CLK_CNTL);
6943 data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
6944 data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
6945 if (orig != data)
6946 WREG32(MISC_CLK_CNTL, data);
6947
6948 orig = data = RREG32(CG_CLKPIN_CNTL);
6949 data &= ~BCLK_AS_XCLK;
6950 if (orig != data)
6951 WREG32(CG_CLKPIN_CNTL, data);
6952
6953 orig = data = RREG32(CG_CLKPIN_CNTL_2);
6954 data &= ~FORCE_BIF_REFCLK_EN;
6955 if (orig != data)
6956 WREG32(CG_CLKPIN_CNTL_2, data);
6957
6958 orig = data = RREG32(MPLL_BYPASSCLK_SEL);
6959 data &= ~MPLL_CLKOUT_SEL_MASK;
6960 data |= MPLL_CLKOUT_SEL(4);
6961 if (orig != data)
6962 WREG32(MPLL_BYPASSCLK_SEL, data);
6963
6964 orig = data = RREG32(SPLL_CNTL_MODE);
6965 data &= ~SPLL_REFCLK_SEL_MASK;
6966 if (orig != data)
6967 WREG32(SPLL_CNTL_MODE, data);
6968 }
6969 }
6970 } else {
6971 if (orig != data)
6972 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
6973 }
6974
6975 orig = data = RREG32_PCIE(PCIE_CNTL2);
6976 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
6977 if (orig != data)
6978 WREG32_PCIE(PCIE_CNTL2, data);
6979
6980 if (!disable_l0s) {
6981 data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
6982 if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
6983 data = RREG32_PCIE(PCIE_LC_STATUS1);
6984 if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
6985 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
6986 data &= ~LC_L0S_INACTIVITY_MASK;
6987 if (orig != data)
6988 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
6989 }
6990 }
6991 }
6992}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
new file mode 100644
index 000000000000..a7e97cd05e96
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -0,0 +1,6407 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "sid.h"
27#include "r600_dpm.h"
28#include "si_dpm.h"
29#include "atom.h"
30#include <linux/math64.h>
31#include <linux/seq_file.h>
32
33#define MC_CG_ARB_FREQ_F0 0x0a
34#define MC_CG_ARB_FREQ_F1 0x0b
35#define MC_CG_ARB_FREQ_F2 0x0c
36#define MC_CG_ARB_FREQ_F3 0x0d
37
38#define SMC_RAM_END 0x20000
39
40#define DDR3_DRAM_ROWS 0x2000
41
42#define SCLK_MIN_DEEPSLEEP_FREQ 1350
43
44static const struct si_cac_config_reg cac_weights_tahiti[] =
45{
46 { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
47 { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
48 { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
49 { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
50 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
51 { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
52 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
53 { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
54 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
55 { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
56 { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
57 { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
58 { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
59 { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
60 { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
61 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
62 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
63 { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
64 { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
65 { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
66 { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
67 { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
68 { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
69 { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
70 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
71 { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
72 { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
73 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
74 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
75 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
76 { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
77 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
78 { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
79 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
80 { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
81 { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
82 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
83 { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
84 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
85 { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
86 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
87 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
88 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
89 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
90 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
91 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
92 { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
93 { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
94 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
95 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
96 { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
97 { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
98 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
99 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
100 { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
101 { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
102 { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
103 { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
104 { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
105 { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
106 { 0xFFFFFFFF }
107};
108
109static const struct si_cac_config_reg lcac_tahiti[] =
110{
111 { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
112 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
113 { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
114 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
115 { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
116 { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
117 { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
118 { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
119 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
120 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
121 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
122 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
123 { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
124 { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
125 { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
126 { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
127 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
128 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
129 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
130 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
131 { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
132 { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
133 { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
134 { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
135 { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
136 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
137 { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
138 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
139 { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
140 { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
141 { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
142 { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
143 { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
144 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
145 { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
146 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
147 { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
148 { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
149 { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
150 { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
151 { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
152 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
153 { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
154 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
155 { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
156 { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
157 { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
158 { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
159 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
160 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
161 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
162 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
163 { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
164 { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
165 { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
166 { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
167 { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
168 { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
169 { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
170 { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
171 { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
172 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
173 { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
174 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
175 { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
176 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
177 { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
178 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
179 { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
180 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
181 { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
182 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
183 { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
184 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
185 { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
186 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
187 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
188 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
189 { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
190 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
191 { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
192 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
193 { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
194 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
195 { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
196 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
197 { 0xFFFFFFFF }
198
199};
200
201static const struct si_cac_config_reg cac_override_tahiti[] =
202{
203 { 0xFFFFFFFF }
204};
205
206static const struct si_powertune_data powertune_data_tahiti =
207{
208 ((1 << 16) | 27027),
209 6,
210 0,
211 4,
212 95,
213 {
214 0UL,
215 0UL,
216 4521550UL,
217 309631529UL,
218 -1270850L,
219 4513710L,
220 40
221 },
222 595000000UL,
223 12,
224 {
225 0,
226 0,
227 0,
228 0,
229 0,
230 0,
231 0,
232 0
233 },
234 true
235};
236
237static const struct si_dte_data dte_data_tahiti =
238{
239 { 1159409, 0, 0, 0, 0 },
240 { 777, 0, 0, 0, 0 },
241 2,
242 54000,
243 127000,
244 25,
245 2,
246 10,
247 13,
248 { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
249 { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
250 { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
251 85,
252 false
253};
254
255static const struct si_dte_data dte_data_tahiti_le =
256{
257 { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
258 { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
259 0x5,
260 0xAFC8,
261 0x64,
262 0x32,
263 1,
264 0,
265 0x10,
266 { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
267 { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
268 { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
269 85,
270 true
271};
272
273static const struct si_dte_data dte_data_tahiti_pro =
274{
275 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
276 { 0x0, 0x0, 0x0, 0x0, 0x0 },
277 5,
278 45000,
279 100,
280 0xA,
281 1,
282 0,
283 0x10,
284 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
285 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
286 { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
287 90,
288 true
289};
290
291static const struct si_dte_data dte_data_new_zealand =
292{
293 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
294 { 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
295 0x5,
296 0xAFC8,
297 0x69,
298 0x32,
299 1,
300 0,
301 0x10,
302 { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
303 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
304 { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
305 85,
306 true
307};
308
309static const struct si_dte_data dte_data_aruba_pro =
310{
311 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
312 { 0x0, 0x0, 0x0, 0x0, 0x0 },
313 5,
314 45000,
315 100,
316 0xA,
317 1,
318 0,
319 0x10,
320 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
321 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
322 { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
323 90,
324 true
325};
326
327static const struct si_dte_data dte_data_malta =
328{
329 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
330 { 0x0, 0x0, 0x0, 0x0, 0x0 },
331 5,
332 45000,
333 100,
334 0xA,
335 1,
336 0,
337 0x10,
338 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
339 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
340 { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
341 90,
342 true
343};
344
345struct si_cac_config_reg cac_weights_pitcairn[] =
346{
347 { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
348 { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
349 { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
350 { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
351 { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
352 { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
353 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
354 { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
355 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
356 { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
357 { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
358 { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
359 { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
360 { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
361 { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
362 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
363 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
364 { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
365 { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
366 { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
367 { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
368 { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
369 { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
370 { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
371 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
372 { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
373 { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
374 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
375 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
376 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
377 { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
378 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
379 { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
380 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
381 { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
382 { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
383 { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
384 { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
385 { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
386 { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
387 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
388 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
389 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
390 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
391 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
392 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
393 { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
394 { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
395 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
396 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
397 { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
398 { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
399 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
400 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
401 { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
402 { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
403 { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
404 { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
405 { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
406 { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
407 { 0xFFFFFFFF }
408};
409
410static const struct si_cac_config_reg lcac_pitcairn[] =
411{
412 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
413 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
414 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
415 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
416 { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
417 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
418 { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
419 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
420 { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
421 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
422 { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
423 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
424 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
425 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
426 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
427 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
428 { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
429 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
430 { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
431 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
432 { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
433 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
434 { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
435 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
436 { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
437 { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
438 { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
439 { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
440 { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
441 { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
442 { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
443 { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
444 { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
445 { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
446 { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
447 { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
448 { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
449 { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
450 { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
451 { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
452 { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
453 { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
454 { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
455 { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
456 { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
457 { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
458 { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
459 { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
460 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
461 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
462 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
463 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
464 { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
465 { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
466 { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
467 { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
468 { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
469 { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
470 { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
471 { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
472 { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
473 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
474 { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
475 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
476 { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
477 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
478 { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
479 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
480 { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
481 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
482 { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
483 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
484 { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
485 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
486 { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
487 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
488 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
489 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
490 { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
491 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
492 { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
493 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
494 { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
495 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
496 { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
497 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
498 { 0xFFFFFFFF }
499};
500
501static const struct si_cac_config_reg cac_override_pitcairn[] =
502{
503 { 0xFFFFFFFF }
504};
505
506static const struct si_powertune_data powertune_data_pitcairn =
507{
508 ((1 << 16) | 27027),
509 5,
510 0,
511 6,
512 100,
513 {
514 51600000UL,
515 1800000UL,
516 7194395UL,
517 309631529UL,
518 -1270850L,
519 4513710L,
520 100
521 },
522 117830498UL,
523 12,
524 {
525 0,
526 0,
527 0,
528 0,
529 0,
530 0,
531 0,
532 0
533 },
534 true
535};
536
537static const struct si_dte_data dte_data_pitcairn =
538{
539 { 0, 0, 0, 0, 0 },
540 { 0, 0, 0, 0, 0 },
541 0,
542 0,
543 0,
544 0,
545 0,
546 0,
547 0,
548 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
549 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
550 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
551 0,
552 false
553};
554
555static const struct si_dte_data dte_data_curacao_xt =
556{
557 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
558 { 0x0, 0x0, 0x0, 0x0, 0x0 },
559 5,
560 45000,
561 100,
562 0xA,
563 1,
564 0,
565 0x10,
566 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
567 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
568 { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
569 90,
570 true
571};
572
573static const struct si_dte_data dte_data_curacao_pro =
574{
575 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
576 { 0x0, 0x0, 0x0, 0x0, 0x0 },
577 5,
578 45000,
579 100,
580 0xA,
581 1,
582 0,
583 0x10,
584 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
585 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
586 { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
587 90,
588 true
589};
590
591static const struct si_dte_data dte_data_neptune_xt =
592{
593 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
594 { 0x0, 0x0, 0x0, 0x0, 0x0 },
595 5,
596 45000,
597 100,
598 0xA,
599 1,
600 0,
601 0x10,
602 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
603 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
604 { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
605 90,
606 true
607};
608
609static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
610{
611 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
612 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
613 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
614 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
615 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
616 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
617 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
618 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
619 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
620 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
621 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
622 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
623 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
624 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
625 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
626 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
627 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
628 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
629 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
630 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
631 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
632 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
633 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
634 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
635 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
636 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
637 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
638 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
639 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
640 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
641 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
642 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
643 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
644 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
645 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
646 { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
647 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
648 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
649 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
650 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
651 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
652 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
653 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
654 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
655 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
656 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
657 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
658 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
659 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
660 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
661 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
662 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
663 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
664 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
665 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
666 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
667 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
668 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
669 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
670 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
671 { 0xFFFFFFFF }
672};
673
674static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
675{
676 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
677 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
678 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
679 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
680 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
681 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
682 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
683 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
684 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
685 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
686 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
687 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
688 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
689 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
690 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
691 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
692 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
693 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
694 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
695 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
696 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
697 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
698 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
699 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
700 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
701 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
702 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
703 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
704 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
705 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
706 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
707 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
708 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
709 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
710 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
711 { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
712 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
713 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
714 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
715 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
716 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
717 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
718 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
719 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
720 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
721 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
722 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
723 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
724 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
725 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
726 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
727 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
728 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
729 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
730 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
731 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
732 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
733 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
734 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
735 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
736 { 0xFFFFFFFF }
737};
738
739static const struct si_cac_config_reg cac_weights_heathrow[] =
740{
741 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
742 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
743 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
744 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
745 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
746 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
747 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
748 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
749 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
750 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
751 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
752 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
753 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
754 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
755 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
756 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
757 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
758 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
759 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
760 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
761 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
762 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
763 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
764 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
765 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
766 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
767 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
768 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
769 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
770 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
771 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
772 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
773 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
774 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
775 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
776 { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
777 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
778 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
779 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
780 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
781 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
782 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
783 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
784 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
785 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
786 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
787 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
788 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
789 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
790 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
791 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
792 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
793 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
794 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
795 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
796 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
797 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
798 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
799 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
800 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
801 { 0xFFFFFFFF }
802};
803
804static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
805{
806 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
807 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
808 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
809 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
810 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
811 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
812 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
813 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
814 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
815 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
816 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
817 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
818 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
819 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
820 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
821 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
822 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
823 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
824 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
825 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
826 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
827 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
828 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
829 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
830 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
831 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
832 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
833 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
834 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
835 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
836 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
837 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
838 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
839 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
840 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
841 { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
842 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
843 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
844 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
845 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
846 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
847 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
848 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
849 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
850 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
851 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
852 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
853 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
854 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
855 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
856 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
857 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
858 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
859 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
860 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
861 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
862 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
863 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
864 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
865 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
866 { 0xFFFFFFFF }
867};
868
869static const struct si_cac_config_reg cac_weights_cape_verde[] =
870{
871 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
872 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
873 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
874 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
875 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
876 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
877 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
878 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
879 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
880 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
881 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
882 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
883 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
884 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
885 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
886 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
887 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
888 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
889 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
890 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
891 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
892 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
893 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
894 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
895 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
896 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
897 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
898 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
899 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
900 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
901 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
902 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
903 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
904 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
905 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
906 { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
907 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
908 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
909 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
910 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
911 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
912 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
913 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
914 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
915 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
916 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
917 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
918 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
919 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
920 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
921 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
922 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
923 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
924 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
925 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
926 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
927 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
928 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
929 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
930 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
931 { 0xFFFFFFFF }
932};
933
934static const struct si_cac_config_reg lcac_cape_verde[] =
935{
936 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
937 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
938 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
939 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
940 { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
941 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
942 { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
943 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
944 { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
945 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
946 { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
947 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
948 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
949 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
950 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
951 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
952 { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
953 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
954 { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
955 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
956 { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
957 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
958 { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
959 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
960 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
961 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
962 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
963 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
964 { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
965 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
966 { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
967 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
968 { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
969 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
970 { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
971 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
972 { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
973 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
974 { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
975 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
976 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
977 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
978 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
979 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
980 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
981 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
982 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
983 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
984 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
985 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
986 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
987 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
988 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
989 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
990 { 0xFFFFFFFF }
991};
992
993static const struct si_cac_config_reg cac_override_cape_verde[] =
994{
995 { 0xFFFFFFFF }
996};
997
998static const struct si_powertune_data powertune_data_cape_verde =
999{
1000 ((1 << 16) | 0x6993),
1001 5,
1002 0,
1003 7,
1004 105,
1005 {
1006 0UL,
1007 0UL,
1008 7194395UL,
1009 309631529UL,
1010 -1270850L,
1011 4513710L,
1012 100
1013 },
1014 117830498UL,
1015 12,
1016 {
1017 0,
1018 0,
1019 0,
1020 0,
1021 0,
1022 0,
1023 0,
1024 0
1025 },
1026 true
1027};
1028
1029static const struct si_dte_data dte_data_cape_verde =
1030{
1031 { 0, 0, 0, 0, 0 },
1032 { 0, 0, 0, 0, 0 },
1033 0,
1034 0,
1035 0,
1036 0,
1037 0,
1038 0,
1039 0,
1040 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1041 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1042 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1043 0,
1044 false
1045};
1046
1047static const struct si_dte_data dte_data_venus_xtx =
1048{
1049 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1050 { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
1051 5,
1052 55000,
1053 0x69,
1054 0xA,
1055 1,
1056 0,
1057 0x3,
1058 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1059 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1060 { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1061 90,
1062 true
1063};
1064
1065static const struct si_dte_data dte_data_venus_xt =
1066{
1067 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1068 { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
1069 5,
1070 55000,
1071 0x69,
1072 0xA,
1073 1,
1074 0,
1075 0x3,
1076 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1077 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1078 { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1079 90,
1080 true
1081};
1082
1083static const struct si_dte_data dte_data_venus_pro =
1084{
1085 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1086 { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
1087 5,
1088 55000,
1089 0x69,
1090 0xA,
1091 1,
1092 0,
1093 0x3,
1094 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1095 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1096 { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1097 90,
1098 true
1099};
1100
1101struct si_cac_config_reg cac_weights_oland[] =
1102{
1103 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
1104 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
1105 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
1106 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
1107 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1108 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
1109 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
1110 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
1111 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
1112 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
1113 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
1114 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
1115 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
1116 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
1117 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
1118 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
1119 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
1120 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
1121 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
1122 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
1123 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
1124 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
1125 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
1126 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
1127 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
1128 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1129 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1130 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1131 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1132 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
1133 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1134 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
1135 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
1136 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
1137 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1138 { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
1139 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1140 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1141 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1142 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
1143 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
1144 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1145 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1146 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1147 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1148 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1149 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1150 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1151 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1152 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1153 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1154 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1155 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1156 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1157 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1158 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1159 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1160 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1161 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1162 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
1163 { 0xFFFFFFFF }
1164};
1165
1166static const struct si_cac_config_reg cac_weights_mars_pro[] =
1167{
1168 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1169 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1170 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1171 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1172 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1173 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1174 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1175 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1176 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1177 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1178 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1179 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1180 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1181 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1182 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1183 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1184 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1185 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1186 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1187 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1188 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1189 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1190 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1191 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1192 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1193 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1194 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1195 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1196 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1197 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1198 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1199 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1200 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1201 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1202 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1203 { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
1204 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1205 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1206 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1207 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1208 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1209 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1210 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1211 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1212 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1213 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1214 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1215 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1216 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1217 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1218 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1219 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1220 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1221 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1222 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1223 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1224 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1225 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1226 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1227 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1228 { 0xFFFFFFFF }
1229};
1230
1231static const struct si_cac_config_reg cac_weights_mars_xt[] =
1232{
1233 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1234 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1235 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1236 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1237 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1238 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1239 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1240 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1241 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1242 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1243 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1244 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1245 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1246 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1247 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1248 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1249 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1250 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1251 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1252 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1253 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1254 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1255 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1256 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1257 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1258 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1259 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1260 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1261 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1262 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1263 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1264 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1265 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1266 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1267 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1268 { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
1269 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1270 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1271 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1272 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1273 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1274 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1275 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1276 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1277 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1278 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1279 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1280 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1281 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1282 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1283 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1284 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1285 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1286 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1287 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1288 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1289 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1290 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1291 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1292 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1293 { 0xFFFFFFFF }
1294};
1295
1296static const struct si_cac_config_reg cac_weights_oland_pro[] =
1297{
1298 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1299 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1300 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1301 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1302 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1303 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1304 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1305 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1306 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1307 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1308 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1309 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1310 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1311 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1312 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1313 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1314 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1315 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1316 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1317 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1318 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1319 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1320 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1321 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1322 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1323 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1324 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1325 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1326 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1327 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1328 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1329 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1330 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1331 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1332 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1333 { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
1334 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1335 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1336 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1337 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1338 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1339 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1340 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1341 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1342 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1343 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1344 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1345 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1346 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1347 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1348 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1349 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1350 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1351 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1352 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1353 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1354 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1355 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1356 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1357 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1358 { 0xFFFFFFFF }
1359};
1360
1361static const struct si_cac_config_reg cac_weights_oland_xt[] =
1362{
1363 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1364 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1365 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1366 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1367 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1368 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1369 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1370 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1371 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1372 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1373 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1374 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1375 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1376 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1377 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1378 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1379 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1380 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1381 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1382 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1383 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1384 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1385 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1386 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1387 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1388 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1389 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1390 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1391 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1392 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1393 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1394 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1395 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1396 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1397 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1398 { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
1399 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1400 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1401 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1402 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1403 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1404 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1405 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1406 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1407 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1408 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1409 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1410 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1411 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1412 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1413 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1414 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1415 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1416 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1417 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1418 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1419 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1420 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1421 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1422 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1423 { 0xFFFFFFFF }
1424};
1425
1426static const struct si_cac_config_reg lcac_oland[] =
1427{
1428 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1429 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1430 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1431 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1432 { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1433 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1434 { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1435 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1436 { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1437 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1438 { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
1439 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1440 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1441 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1442 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1443 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1444 { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1445 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1446 { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1447 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1448 { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1449 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1450 { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1451 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1452 { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1453 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1454 { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1455 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1456 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1457 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1458 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1459 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1460 { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1461 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1462 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1463 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1464 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1465 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1466 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1467 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1468 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1469 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1470 { 0xFFFFFFFF }
1471};
1472
1473static const struct si_cac_config_reg lcac_mars_pro[] =
1474{
1475 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1476 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1477 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1478 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1479 { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1480 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1481 { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1482 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1483 { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1484 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1485 { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1486 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1487 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1488 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1489 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1490 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1491 { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1492 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1493 { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1494 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1495 { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1496 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1497 { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1498 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1499 { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1500 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1501 { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1502 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1503 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1504 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1505 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1506 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1507 { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1508 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1509 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1510 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1511 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1512 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1513 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1514 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1515 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1516 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1517 { 0xFFFFFFFF }
1518};
1519
1520static const struct si_cac_config_reg cac_override_oland[] =
1521{
1522 { 0xFFFFFFFF }
1523};
1524
1525static const struct si_powertune_data powertune_data_oland =
1526{
1527 ((1 << 16) | 0x6993),
1528 5,
1529 0,
1530 7,
1531 105,
1532 {
1533 0UL,
1534 0UL,
1535 7194395UL,
1536 309631529UL,
1537 -1270850L,
1538 4513710L,
1539 100
1540 },
1541 117830498UL,
1542 12,
1543 {
1544 0,
1545 0,
1546 0,
1547 0,
1548 0,
1549 0,
1550 0,
1551 0
1552 },
1553 true
1554};
1555
1556static const struct si_powertune_data powertune_data_mars_pro =
1557{
1558 ((1 << 16) | 0x6993),
1559 5,
1560 0,
1561 7,
1562 105,
1563 {
1564 0UL,
1565 0UL,
1566 7194395UL,
1567 309631529UL,
1568 -1270850L,
1569 4513710L,
1570 100
1571 },
1572 117830498UL,
1573 12,
1574 {
1575 0,
1576 0,
1577 0,
1578 0,
1579 0,
1580 0,
1581 0,
1582 0
1583 },
1584 true
1585};
1586
1587static const struct si_dte_data dte_data_oland =
1588{
1589 { 0, 0, 0, 0, 0 },
1590 { 0, 0, 0, 0, 0 },
1591 0,
1592 0,
1593 0,
1594 0,
1595 0,
1596 0,
1597 0,
1598 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1599 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1600 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1601 0,
1602 false
1603};
1604
1605static const struct si_dte_data dte_data_mars_pro =
1606{
1607 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1608 { 0x0, 0x0, 0x0, 0x0, 0x0 },
1609 5,
1610 55000,
1611 105,
1612 0xA,
1613 1,
1614 0,
1615 0x10,
1616 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1617 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1618 { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1619 90,
1620 true
1621};
1622
1623static const struct si_dte_data dte_data_sun_xt =
1624{
1625 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1626 { 0x0, 0x0, 0x0, 0x0, 0x0 },
1627 5,
1628 55000,
1629 105,
1630 0xA,
1631 1,
1632 0,
1633 0x10,
1634 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1635 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1636 { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1637 90,
1638 true
1639};
1640
1641
1642static const struct si_cac_config_reg cac_weights_hainan[] =
1643{
1644 { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
1645 { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
1646 { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
1647 { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
1648 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1649 { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
1650 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1651 { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1652 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1653 { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
1654 { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
1655 { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
1656 { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
1657 { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1658 { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
1659 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1660 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1661 { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
1662 { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
1663 { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
1664 { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
1665 { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
1666 { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
1667 { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
1668 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1669 { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
1670 { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
1671 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1672 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1673 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1674 { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
1675 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1676 { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1677 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1678 { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
1679 { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
1680 { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
1681 { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1682 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1683 { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
1684 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1685 { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
1686 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1687 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1688 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1689 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1690 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1691 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1692 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1693 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1694 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1695 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1696 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1697 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1698 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1699 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1700 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1701 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1702 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1703 { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
1704 { 0xFFFFFFFF }
1705};
1706
1707static const struct si_powertune_data powertune_data_hainan =
1708{
1709 ((1 << 16) | 0x6993),
1710 5,
1711 0,
1712 9,
1713 105,
1714 {
1715 0UL,
1716 0UL,
1717 7194395UL,
1718 309631529UL,
1719 -1270850L,
1720 4513710L,
1721 100
1722 },
1723 117830498UL,
1724 12,
1725 {
1726 0,
1727 0,
1728 0,
1729 0,
1730 0,
1731 0,
1732 0,
1733 0
1734 },
1735 true
1736};
1737
1738struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
1739struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
1740struct ni_power_info *ni_get_pi(struct radeon_device *rdev);
1741struct ni_ps *ni_get_ps(struct radeon_ps *rps);
1742
1743static int si_populate_voltage_value(struct radeon_device *rdev,
1744 const struct atom_voltage_table *table,
1745 u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
1746static int si_get_std_voltage_value(struct radeon_device *rdev,
1747 SISLANDS_SMC_VOLTAGE_VALUE *voltage,
1748 u16 *std_voltage);
1749static int si_write_smc_soft_register(struct radeon_device *rdev,
1750 u16 reg_offset, u32 value);
1751static int si_convert_power_level_to_smc(struct radeon_device *rdev,
1752 struct rv7xx_pl *pl,
1753 SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
1754static int si_calculate_sclk_params(struct radeon_device *rdev,
1755 u32 engine_clock,
1756 SISLANDS_SMC_SCLK_VALUE *sclk);
1757
1758static struct si_power_info *si_get_pi(struct radeon_device *rdev)
1759{
1760 struct si_power_info *pi = rdev->pm.dpm.priv;
1761
1762 return pi;
1763}
1764
1765static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
1766 u16 v, s32 t, u32 ileakage, u32 *leakage)
1767{
1768 s64 kt, kv, leakage_w, i_leakage, vddc;
1769 s64 temperature, t_slope, t_intercept, av, bv, t_ref;
1770
1771 i_leakage = drm_int2fixp(ileakage / 100);
1772 vddc = div64_s64(drm_int2fixp(v), 1000);
1773 temperature = div64_s64(drm_int2fixp(t), 1000);
1774
1775 t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
1776 t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
1777 av = div64_s64(drm_int2fixp(coeff->av), 100000000);
1778 bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
1779 t_ref = drm_int2fixp(coeff->t_ref);
1780
1781 kt = drm_fixp_div(drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, temperature)),
1782 drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, t_ref)));
1783 kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
1784
1785 leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
1786
1787 *leakage = drm_fixp2int(leakage_w * 1000);
1788}
1789
1790static void si_calculate_leakage_for_v_and_t(struct radeon_device *rdev,
1791 const struct ni_leakage_coeffients *coeff,
1792 u16 v,
1793 s32 t,
1794 u32 i_leakage,
1795 u32 *leakage)
1796{
1797 si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
1798}
1799
1800static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
1801 const u32 fixed_kt, u16 v,
1802 u32 ileakage, u32 *leakage)
1803{
1804 s64 kt, kv, leakage_w, i_leakage, vddc;
1805
1806 i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
1807 vddc = div64_s64(drm_int2fixp(v), 1000);
1808
1809 kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
1810 kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
1811 drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
1812
1813 leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
1814
1815 *leakage = drm_fixp2int(leakage_w * 1000);
1816}
1817
1818static void si_calculate_leakage_for_v(struct radeon_device *rdev,
1819 const struct ni_leakage_coeffients *coeff,
1820 const u32 fixed_kt,
1821 u16 v,
1822 u32 i_leakage,
1823 u32 *leakage)
1824{
1825 si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
1826}
1827
1828
1829static void si_update_dte_from_pl2(struct radeon_device *rdev,
1830 struct si_dte_data *dte_data)
1831{
1832 u32 p_limit1 = rdev->pm.dpm.tdp_limit;
1833 u32 p_limit2 = rdev->pm.dpm.near_tdp_limit;
1834 u32 k = dte_data->k;
1835 u32 t_max = dte_data->max_t;
1836 u32 t_split[5] = { 10, 15, 20, 25, 30 };
1837 u32 t_0 = dte_data->t0;
1838 u32 i;
1839
1840 if (p_limit2 != 0 && p_limit2 <= p_limit1) {
1841 dte_data->tdep_count = 3;
1842
1843 for (i = 0; i < k; i++) {
1844 dte_data->r[i] =
1845 (t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
1846 (p_limit2 * (u32)100);
1847 }
1848
1849 dte_data->tdep_r[1] = dte_data->r[4] * 2;
1850
1851 for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
1852 dte_data->tdep_r[i] = dte_data->r[4];
1853 }
1854 } else {
1855 DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
1856 }
1857}
1858
1859static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1860{
1861 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1862 struct si_power_info *si_pi = si_get_pi(rdev);
1863 bool update_dte_from_pl2 = false;
1864
1865 if (rdev->family == CHIP_TAHITI) {
1866 si_pi->cac_weights = cac_weights_tahiti;
1867 si_pi->lcac_config = lcac_tahiti;
1868 si_pi->cac_override = cac_override_tahiti;
1869 si_pi->powertune_data = &powertune_data_tahiti;
1870 si_pi->dte_data = dte_data_tahiti;
1871
1872 switch (rdev->pdev->device) {
1873 case 0x6798:
1874 si_pi->dte_data.enable_dte_by_default = true;
1875 break;
1876 case 0x6799:
1877 si_pi->dte_data = dte_data_new_zealand;
1878 break;
1879 case 0x6790:
1880 case 0x6791:
1881 case 0x6792:
1882 case 0x679E:
1883 si_pi->dte_data = dte_data_aruba_pro;
1884 update_dte_from_pl2 = true;
1885 break;
1886 case 0x679B:
1887 si_pi->dte_data = dte_data_malta;
1888 update_dte_from_pl2 = true;
1889 break;
1890 case 0x679A:
1891 si_pi->dte_data = dte_data_tahiti_pro;
1892 update_dte_from_pl2 = true;
1893 break;
1894 default:
1895 if (si_pi->dte_data.enable_dte_by_default == true)
1896 DRM_ERROR("DTE is not enabled!\n");
1897 break;
1898 }
1899 } else if (rdev->family == CHIP_PITCAIRN) {
1900 switch (rdev->pdev->device) {
1901 case 0x6810:
1902 case 0x6818:
1903 si_pi->cac_weights = cac_weights_pitcairn;
1904 si_pi->lcac_config = lcac_pitcairn;
1905 si_pi->cac_override = cac_override_pitcairn;
1906 si_pi->powertune_data = &powertune_data_pitcairn;
1907 si_pi->dte_data = dte_data_curacao_xt;
1908 update_dte_from_pl2 = true;
1909 break;
1910 case 0x6819:
1911 case 0x6811:
1912 si_pi->cac_weights = cac_weights_pitcairn;
1913 si_pi->lcac_config = lcac_pitcairn;
1914 si_pi->cac_override = cac_override_pitcairn;
1915 si_pi->powertune_data = &powertune_data_pitcairn;
1916 si_pi->dte_data = dte_data_curacao_pro;
1917 update_dte_from_pl2 = true;
1918 break;
1919 case 0x6800:
1920 case 0x6806:
1921 si_pi->cac_weights = cac_weights_pitcairn;
1922 si_pi->lcac_config = lcac_pitcairn;
1923 si_pi->cac_override = cac_override_pitcairn;
1924 si_pi->powertune_data = &powertune_data_pitcairn;
1925 si_pi->dte_data = dte_data_neptune_xt;
1926 update_dte_from_pl2 = true;
1927 break;
1928 default:
1929 si_pi->cac_weights = cac_weights_pitcairn;
1930 si_pi->lcac_config = lcac_pitcairn;
1931 si_pi->cac_override = cac_override_pitcairn;
1932 si_pi->powertune_data = &powertune_data_pitcairn;
1933 si_pi->dte_data = dte_data_pitcairn;
1934 }
1935 } else if (rdev->family == CHIP_VERDE) {
1936 si_pi->lcac_config = lcac_cape_verde;
1937 si_pi->cac_override = cac_override_cape_verde;
1938 si_pi->powertune_data = &powertune_data_cape_verde;
1939
1940 switch (rdev->pdev->device) {
1941 case 0x683B:
1942 case 0x683F:
1943 case 0x6829:
1944 si_pi->cac_weights = cac_weights_cape_verde_pro;
1945 si_pi->dte_data = dte_data_cape_verde;
1946 break;
1947 case 0x6825:
1948 case 0x6827:
1949 si_pi->cac_weights = cac_weights_heathrow;
1950 si_pi->dte_data = dte_data_cape_verde;
1951 break;
1952 case 0x6824:
1953 case 0x682D:
1954 si_pi->cac_weights = cac_weights_chelsea_xt;
1955 si_pi->dte_data = dte_data_cape_verde;
1956 break;
1957 case 0x682F:
1958 si_pi->cac_weights = cac_weights_chelsea_pro;
1959 si_pi->dte_data = dte_data_cape_verde;
1960 break;
1961 case 0x6820:
1962 si_pi->cac_weights = cac_weights_heathrow;
1963 si_pi->dte_data = dte_data_venus_xtx;
1964 break;
1965 case 0x6821:
1966 si_pi->cac_weights = cac_weights_heathrow;
1967 si_pi->dte_data = dte_data_venus_xt;
1968 break;
1969 case 0x6823:
1970 si_pi->cac_weights = cac_weights_chelsea_pro;
1971 si_pi->dte_data = dte_data_venus_pro;
1972 break;
1973 case 0x682B:
1974 si_pi->cac_weights = cac_weights_chelsea_pro;
1975 si_pi->dte_data = dte_data_venus_pro;
1976 break;
1977 default:
1978 si_pi->cac_weights = cac_weights_cape_verde;
1979 si_pi->dte_data = dte_data_cape_verde;
1980 break;
1981 }
1982 } else if (rdev->family == CHIP_OLAND) {
1983 switch (rdev->pdev->device) {
1984 case 0x6601:
1985 case 0x6621:
1986 case 0x6603:
1987 si_pi->cac_weights = cac_weights_mars_pro;
1988 si_pi->lcac_config = lcac_mars_pro;
1989 si_pi->cac_override = cac_override_oland;
1990 si_pi->powertune_data = &powertune_data_mars_pro;
1991 si_pi->dte_data = dte_data_mars_pro;
1992 update_dte_from_pl2 = true;
1993 break;
1994 case 0x6600:
1995 case 0x6606:
1996 case 0x6620:
1997 si_pi->cac_weights = cac_weights_mars_xt;
1998 si_pi->lcac_config = lcac_mars_pro;
1999 si_pi->cac_override = cac_override_oland;
2000 si_pi->powertune_data = &powertune_data_mars_pro;
2001 si_pi->dte_data = dte_data_mars_pro;
2002 update_dte_from_pl2 = true;
2003 break;
2004 case 0x6611:
2005 si_pi->cac_weights = cac_weights_oland_pro;
2006 si_pi->lcac_config = lcac_mars_pro;
2007 si_pi->cac_override = cac_override_oland;
2008 si_pi->powertune_data = &powertune_data_mars_pro;
2009 si_pi->dte_data = dte_data_mars_pro;
2010 update_dte_from_pl2 = true;
2011 break;
2012 case 0x6610:
2013 si_pi->cac_weights = cac_weights_oland_xt;
2014 si_pi->lcac_config = lcac_mars_pro;
2015 si_pi->cac_override = cac_override_oland;
2016 si_pi->powertune_data = &powertune_data_mars_pro;
2017 si_pi->dte_data = dte_data_mars_pro;
2018 update_dte_from_pl2 = true;
2019 break;
2020 default:
2021 si_pi->cac_weights = cac_weights_oland;
2022 si_pi->lcac_config = lcac_oland;
2023 si_pi->cac_override = cac_override_oland;
2024 si_pi->powertune_data = &powertune_data_oland;
2025 si_pi->dte_data = dte_data_oland;
2026 break;
2027 }
2028 } else if (rdev->family == CHIP_HAINAN) {
2029 si_pi->cac_weights = cac_weights_hainan;
2030 si_pi->lcac_config = lcac_oland;
2031 si_pi->cac_override = cac_override_oland;
2032 si_pi->powertune_data = &powertune_data_hainan;
2033 si_pi->dte_data = dte_data_sun_xt;
2034 update_dte_from_pl2 = true;
2035 } else {
2036 DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
2037 return;
2038 }
2039
2040 ni_pi->enable_power_containment = false;
2041 ni_pi->enable_cac = false;
2042 ni_pi->enable_sq_ramping = false;
2043 si_pi->enable_dte = false;
2044
2045 if (si_pi->powertune_data->enable_powertune_by_default) {
2046 ni_pi->enable_power_containment= true;
2047 ni_pi->enable_cac = true;
2048 if (si_pi->dte_data.enable_dte_by_default) {
2049 si_pi->enable_dte = true;
2050 if (update_dte_from_pl2)
2051 si_update_dte_from_pl2(rdev, &si_pi->dte_data);
2052
2053 }
2054 ni_pi->enable_sq_ramping = true;
2055 }
2056
2057 ni_pi->driver_calculate_cac_leakage = true;
2058 ni_pi->cac_configuration_required = true;
2059
2060 if (ni_pi->cac_configuration_required) {
2061 ni_pi->support_cac_long_term_average = true;
2062 si_pi->dyn_powertune_data.l2_lta_window_size =
2063 si_pi->powertune_data->l2_lta_window_size_default;
2064 si_pi->dyn_powertune_data.lts_truncate =
2065 si_pi->powertune_data->lts_truncate_default;
2066 } else {
2067 ni_pi->support_cac_long_term_average = false;
2068 si_pi->dyn_powertune_data.l2_lta_window_size = 0;
2069 si_pi->dyn_powertune_data.lts_truncate = 0;
2070 }
2071
2072 si_pi->dyn_powertune_data.disable_uvd_powertune = false;
2073}
2074
2075static u32 si_get_smc_power_scaling_factor(struct radeon_device *rdev)
2076{
2077 return 1;
2078}
2079
2080static u32 si_calculate_cac_wintime(struct radeon_device *rdev)
2081{
2082 u32 xclk;
2083 u32 wintime;
2084 u32 cac_window;
2085 u32 cac_window_size;
2086
2087 xclk = radeon_get_xclk(rdev);
2088
2089 if (xclk == 0)
2090 return 0;
2091
2092 cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
2093 cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
2094
2095 wintime = (cac_window_size * 100) / xclk;
2096
2097 return wintime;
2098}
2099
2100static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
2101{
2102 return power_in_watts;
2103}
2104
2105static int si_calculate_adjusted_tdp_limits(struct radeon_device *rdev,
2106 bool adjust_polarity,
2107 u32 tdp_adjustment,
2108 u32 *tdp_limit,
2109 u32 *near_tdp_limit)
2110{
2111 u32 adjustment_delta, max_tdp_limit;
2112
2113 if (tdp_adjustment > (u32)rdev->pm.dpm.tdp_od_limit)
2114 return -EINVAL;
2115
2116 max_tdp_limit = ((100 + 100) * rdev->pm.dpm.tdp_limit) / 100;
2117
2118 if (adjust_polarity) {
2119 *tdp_limit = ((100 + tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
2120 *near_tdp_limit = rdev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - rdev->pm.dpm.tdp_limit);
2121 } else {
2122 *tdp_limit = ((100 - tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
2123 adjustment_delta = rdev->pm.dpm.tdp_limit - *tdp_limit;
2124 if (adjustment_delta < rdev->pm.dpm.near_tdp_limit_adjusted)
2125 *near_tdp_limit = rdev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
2126 else
2127 *near_tdp_limit = 0;
2128 }
2129
2130 if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
2131 return -EINVAL;
2132 if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
2133 return -EINVAL;
2134
2135 return 0;
2136}
2137
2138static int si_populate_smc_tdp_limits(struct radeon_device *rdev,
2139 struct radeon_ps *radeon_state)
2140{
2141 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2142 struct si_power_info *si_pi = si_get_pi(rdev);
2143
2144 if (ni_pi->enable_power_containment) {
2145 SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
2146 PP_SIslands_PAPMParameters *papm_parm;
2147 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
2148 u32 scaling_factor = si_get_smc_power_scaling_factor(rdev);
2149 u32 tdp_limit;
2150 u32 near_tdp_limit;
2151 int ret;
2152
2153 if (scaling_factor == 0)
2154 return -EINVAL;
2155
2156 memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
2157
2158 ret = si_calculate_adjusted_tdp_limits(rdev,
2159 false, /* ??? */
2160 rdev->pm.dpm.tdp_adjustment,
2161 &tdp_limit,
2162 &near_tdp_limit);
2163 if (ret)
2164 return ret;
2165
2166 smc_table->dpm2Params.TDPLimit =
2167 cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
2168 smc_table->dpm2Params.NearTDPLimit =
2169 cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
2170 smc_table->dpm2Params.SafePowerLimit =
2171 cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
2172
2173 ret = si_copy_bytes_to_smc(rdev,
2174 (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
2175 offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
2176 (u8 *)(&(smc_table->dpm2Params.TDPLimit)),
2177 sizeof(u32) * 3,
2178 si_pi->sram_end);
2179 if (ret)
2180 return ret;
2181
2182 if (si_pi->enable_ppm) {
2183 papm_parm = &si_pi->papm_parm;
2184 memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
2185 papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
2186 papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
2187 papm_parm->dGPU_T_Warning = cpu_to_be32(95);
2188 papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
2189 papm_parm->PlatformPowerLimit = 0xffffffff;
2190 papm_parm->NearTDPLimitPAPM = 0xffffffff;
2191
2192 ret = si_copy_bytes_to_smc(rdev, si_pi->papm_cfg_table_start,
2193 (u8 *)papm_parm,
2194 sizeof(PP_SIslands_PAPMParameters),
2195 si_pi->sram_end);
2196 if (ret)
2197 return ret;
2198 }
2199 }
2200 return 0;
2201}
2202
2203static int si_populate_smc_tdp_limits_2(struct radeon_device *rdev,
2204 struct radeon_ps *radeon_state)
2205{
2206 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2207 struct si_power_info *si_pi = si_get_pi(rdev);
2208
2209 if (ni_pi->enable_power_containment) {
2210 SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
2211 u32 scaling_factor = si_get_smc_power_scaling_factor(rdev);
2212 int ret;
2213
2214 memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
2215
2216 smc_table->dpm2Params.NearTDPLimit =
2217 cpu_to_be32(si_scale_power_for_smc(rdev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
2218 smc_table->dpm2Params.SafePowerLimit =
2219 cpu_to_be32(si_scale_power_for_smc((rdev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
2220
2221 ret = si_copy_bytes_to_smc(rdev,
2222 (si_pi->state_table_start +
2223 offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
2224 offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
2225 (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
2226 sizeof(u32) * 2,
2227 si_pi->sram_end);
2228 if (ret)
2229 return ret;
2230 }
2231
2232 return 0;
2233}
2234
2235static u16 si_calculate_power_efficiency_ratio(struct radeon_device *rdev,
2236 const u16 prev_std_vddc,
2237 const u16 curr_std_vddc)
2238{
2239 u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
2240 u64 prev_vddc = (u64)prev_std_vddc;
2241 u64 curr_vddc = (u64)curr_std_vddc;
2242 u64 pwr_efficiency_ratio, n, d;
2243
2244 if ((prev_vddc == 0) || (curr_vddc == 0))
2245 return 0;
2246
2247 n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
2248 d = prev_vddc * prev_vddc;
2249 pwr_efficiency_ratio = div64_u64(n, d);
2250
2251 if (pwr_efficiency_ratio > (u64)0xFFFF)
2252 return 0;
2253
2254 return (u16)pwr_efficiency_ratio;
2255}
2256
2257static bool si_should_disable_uvd_powertune(struct radeon_device *rdev,
2258 struct radeon_ps *radeon_state)
2259{
2260 struct si_power_info *si_pi = si_get_pi(rdev);
2261
2262 if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
2263 radeon_state->vclk && radeon_state->dclk)
2264 return true;
2265
2266 return false;
2267}
2268
2269static int si_populate_power_containment_values(struct radeon_device *rdev,
2270 struct radeon_ps *radeon_state,
2271 SISLANDS_SMC_SWSTATE *smc_state)
2272{
2273 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2274 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2275 struct ni_ps *state = ni_get_ps(radeon_state);
2276 SISLANDS_SMC_VOLTAGE_VALUE vddc;
2277 u32 prev_sclk;
2278 u32 max_sclk;
2279 u32 min_sclk;
2280 u16 prev_std_vddc;
2281 u16 curr_std_vddc;
2282 int i;
2283 u16 pwr_efficiency_ratio;
2284 u8 max_ps_percent;
2285 bool disable_uvd_power_tune;
2286 int ret;
2287
2288 if (ni_pi->enable_power_containment == false)
2289 return 0;
2290
2291 if (state->performance_level_count == 0)
2292 return -EINVAL;
2293
2294 if (smc_state->levelCount != state->performance_level_count)
2295 return -EINVAL;
2296
2297 disable_uvd_power_tune = si_should_disable_uvd_powertune(rdev, radeon_state);
2298
2299 smc_state->levels[0].dpm2.MaxPS = 0;
2300 smc_state->levels[0].dpm2.NearTDPDec = 0;
2301 smc_state->levels[0].dpm2.AboveSafeInc = 0;
2302 smc_state->levels[0].dpm2.BelowSafeInc = 0;
2303 smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
2304
2305 for (i = 1; i < state->performance_level_count; i++) {
2306 prev_sclk = state->performance_levels[i-1].sclk;
2307 max_sclk = state->performance_levels[i].sclk;
2308 if (i == 1)
2309 max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
2310 else
2311 max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
2312
2313 if (prev_sclk > max_sclk)
2314 return -EINVAL;
2315
2316 if ((max_ps_percent == 0) ||
2317 (prev_sclk == max_sclk) ||
2318 disable_uvd_power_tune) {
2319 min_sclk = max_sclk;
2320 } else if (i == 1) {
2321 min_sclk = prev_sclk;
2322 } else {
2323 min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
2324 }
2325
2326 if (min_sclk < state->performance_levels[0].sclk)
2327 min_sclk = state->performance_levels[0].sclk;
2328
2329 if (min_sclk == 0)
2330 return -EINVAL;
2331
2332 ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
2333 state->performance_levels[i-1].vddc, &vddc);
2334 if (ret)
2335 return ret;
2336
2337 ret = si_get_std_voltage_value(rdev, &vddc, &prev_std_vddc);
2338 if (ret)
2339 return ret;
2340
2341 ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
2342 state->performance_levels[i].vddc, &vddc);
2343 if (ret)
2344 return ret;
2345
2346 ret = si_get_std_voltage_value(rdev, &vddc, &curr_std_vddc);
2347 if (ret)
2348 return ret;
2349
2350 pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(rdev,
2351 prev_std_vddc, curr_std_vddc);
2352
2353 smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
2354 smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
2355 smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
2356 smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
2357 smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
2358 }
2359
2360 return 0;
2361}
2362
2363static int si_populate_sq_ramping_values(struct radeon_device *rdev,
2364 struct radeon_ps *radeon_state,
2365 SISLANDS_SMC_SWSTATE *smc_state)
2366{
2367 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2368 struct ni_ps *state = ni_get_ps(radeon_state);
2369 u32 sq_power_throttle, sq_power_throttle2;
2370 bool enable_sq_ramping = ni_pi->enable_sq_ramping;
2371 int i;
2372
2373 if (state->performance_level_count == 0)
2374 return -EINVAL;
2375
2376 if (smc_state->levelCount != state->performance_level_count)
2377 return -EINVAL;
2378
2379 if (rdev->pm.dpm.sq_ramping_threshold == 0)
2380 return -EINVAL;
2381
2382 if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
2383 enable_sq_ramping = false;
2384
2385 if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
2386 enable_sq_ramping = false;
2387
2388 if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
2389 enable_sq_ramping = false;
2390
2391 if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
2392 enable_sq_ramping = false;
2393
2394 if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
2395 enable_sq_ramping = false;
2396
2397 for (i = 0; i < state->performance_level_count; i++) {
2398 sq_power_throttle = 0;
2399 sq_power_throttle2 = 0;
2400
2401 if ((state->performance_levels[i].sclk >= rdev->pm.dpm.sq_ramping_threshold) &&
2402 enable_sq_ramping) {
2403 sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
2404 sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
2405 sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
2406 sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
2407 sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
2408 } else {
2409 sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
2410 sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
2411 }
2412
2413 smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
2414 smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
2415 }
2416
2417 return 0;
2418}
2419
2420static int si_enable_power_containment(struct radeon_device *rdev,
2421 struct radeon_ps *radeon_new_state,
2422 bool enable)
2423{
2424 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2425 PPSMC_Result smc_result;
2426 int ret = 0;
2427
2428 if (ni_pi->enable_power_containment) {
2429 if (enable) {
2430 if (!si_should_disable_uvd_powertune(rdev, radeon_new_state)) {
2431 smc_result = si_send_msg_to_smc(rdev, PPSMC_TDPClampingActive);
2432 if (smc_result != PPSMC_Result_OK) {
2433 ret = -EINVAL;
2434 ni_pi->pc_enabled = false;
2435 } else {
2436 ni_pi->pc_enabled = true;
2437 }
2438 }
2439 } else {
2440 smc_result = si_send_msg_to_smc(rdev, PPSMC_TDPClampingInactive);
2441 if (smc_result != PPSMC_Result_OK)
2442 ret = -EINVAL;
2443 ni_pi->pc_enabled = false;
2444 }
2445 }
2446
2447 return ret;
2448}
2449
2450static int si_initialize_smc_dte_tables(struct radeon_device *rdev)
2451{
2452 struct si_power_info *si_pi = si_get_pi(rdev);
2453 int ret = 0;
2454 struct si_dte_data *dte_data = &si_pi->dte_data;
2455 Smc_SIslands_DTE_Configuration *dte_tables = NULL;
2456 u32 table_size;
2457 u8 tdep_count;
2458 u32 i;
2459
2460 if (dte_data == NULL)
2461 si_pi->enable_dte = false;
2462
2463 if (si_pi->enable_dte == false)
2464 return 0;
2465
2466 if (dte_data->k <= 0)
2467 return -EINVAL;
2468
2469 dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
2470 if (dte_tables == NULL) {
2471 si_pi->enable_dte = false;
2472 return -ENOMEM;
2473 }
2474
2475 table_size = dte_data->k;
2476
2477 if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
2478 table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
2479
2480 tdep_count = dte_data->tdep_count;
2481 if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
2482 tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
2483
2484 dte_tables->K = cpu_to_be32(table_size);
2485 dte_tables->T0 = cpu_to_be32(dte_data->t0);
2486 dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
2487 dte_tables->WindowSize = dte_data->window_size;
2488 dte_tables->temp_select = dte_data->temp_select;
2489 dte_tables->DTE_mode = dte_data->dte_mode;
2490 dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
2491
2492 if (tdep_count > 0)
2493 table_size--;
2494
2495 for (i = 0; i < table_size; i++) {
2496 dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
2497 dte_tables->R[i] = cpu_to_be32(dte_data->r[i]);
2498 }
2499
2500 dte_tables->Tdep_count = tdep_count;
2501
2502 for (i = 0; i < (u32)tdep_count; i++) {
2503 dte_tables->T_limits[i] = dte_data->t_limits[i];
2504 dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
2505 dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
2506 }
2507
2508 ret = si_copy_bytes_to_smc(rdev, si_pi->dte_table_start, (u8 *)dte_tables,
2509 sizeof(Smc_SIslands_DTE_Configuration), si_pi->sram_end);
2510 kfree(dte_tables);
2511
2512 return ret;
2513}
2514
2515static int si_get_cac_std_voltage_max_min(struct radeon_device *rdev,
2516 u16 *max, u16 *min)
2517{
2518 struct si_power_info *si_pi = si_get_pi(rdev);
2519 struct radeon_cac_leakage_table *table =
2520 &rdev->pm.dpm.dyn_state.cac_leakage_table;
2521 u32 i;
2522 u32 v0_loadline;
2523
2524
2525 if (table == NULL)
2526 return -EINVAL;
2527
2528 *max = 0;
2529 *min = 0xFFFF;
2530
2531 for (i = 0; i < table->count; i++) {
2532 if (table->entries[i].vddc > *max)
2533 *max = table->entries[i].vddc;
2534 if (table->entries[i].vddc < *min)
2535 *min = table->entries[i].vddc;
2536 }
2537
2538 if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
2539 return -EINVAL;
2540
2541 v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
2542
2543 if (v0_loadline > 0xFFFFUL)
2544 return -EINVAL;
2545
2546 *min = (u16)v0_loadline;
2547
2548 if ((*min > *max) || (*max == 0) || (*min == 0))
2549 return -EINVAL;
2550
2551 return 0;
2552}
2553
2554static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
2555{
2556 return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
2557 SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
2558}
2559
2560static int si_init_dte_leakage_table(struct radeon_device *rdev,
2561 PP_SIslands_CacConfig *cac_tables,
2562 u16 vddc_max, u16 vddc_min, u16 vddc_step,
2563 u16 t0, u16 t_step)
2564{
2565 struct si_power_info *si_pi = si_get_pi(rdev);
2566 u32 leakage;
2567 unsigned int i, j;
2568 s32 t;
2569 u32 smc_leakage;
2570 u32 scaling_factor;
2571 u16 voltage;
2572
2573 scaling_factor = si_get_smc_power_scaling_factor(rdev);
2574
2575 for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
2576 t = (1000 * (i * t_step + t0));
2577
2578 for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
2579 voltage = vddc_max - (vddc_step * j);
2580
2581 si_calculate_leakage_for_v_and_t(rdev,
2582 &si_pi->powertune_data->leakage_coefficients,
2583 voltage,
2584 t,
2585 si_pi->dyn_powertune_data.cac_leakage,
2586 &leakage);
2587
2588 smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
2589
2590 if (smc_leakage > 0xFFFF)
2591 smc_leakage = 0xFFFF;
2592
2593 cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
2594 cpu_to_be16((u16)smc_leakage);
2595 }
2596 }
2597 return 0;
2598}
2599
2600static int si_init_simplified_leakage_table(struct radeon_device *rdev,
2601 PP_SIslands_CacConfig *cac_tables,
2602 u16 vddc_max, u16 vddc_min, u16 vddc_step)
2603{
2604 struct si_power_info *si_pi = si_get_pi(rdev);
2605 u32 leakage;
2606 unsigned int i, j;
2607 u32 smc_leakage;
2608 u32 scaling_factor;
2609 u16 voltage;
2610
2611 scaling_factor = si_get_smc_power_scaling_factor(rdev);
2612
2613 for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
2614 voltage = vddc_max - (vddc_step * j);
2615
2616 si_calculate_leakage_for_v(rdev,
2617 &si_pi->powertune_data->leakage_coefficients,
2618 si_pi->powertune_data->fixed_kt,
2619 voltage,
2620 si_pi->dyn_powertune_data.cac_leakage,
2621 &leakage);
2622
2623 smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
2624
2625 if (smc_leakage > 0xFFFF)
2626 smc_leakage = 0xFFFF;
2627
2628 for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
2629 cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
2630 cpu_to_be16((u16)smc_leakage);
2631 }
2632 return 0;
2633}
2634
2635static int si_initialize_smc_cac_tables(struct radeon_device *rdev)
2636{
2637 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2638 struct si_power_info *si_pi = si_get_pi(rdev);
2639 PP_SIslands_CacConfig *cac_tables = NULL;
2640 u16 vddc_max, vddc_min, vddc_step;
2641 u16 t0, t_step;
2642 u32 load_line_slope, reg;
2643 int ret = 0;
2644 u32 ticks_per_us = radeon_get_xclk(rdev) / 100;
2645
2646 if (ni_pi->enable_cac == false)
2647 return 0;
2648
2649 cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
2650 if (!cac_tables)
2651 return -ENOMEM;
2652
2653 reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
2654 reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
2655 WREG32(CG_CAC_CTRL, reg);
2656
2657 si_pi->dyn_powertune_data.cac_leakage = rdev->pm.dpm.cac_leakage;
2658 si_pi->dyn_powertune_data.dc_pwr_value =
2659 si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
2660 si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(rdev);
2661 si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
2662
2663 si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
2664
2665 ret = si_get_cac_std_voltage_max_min(rdev, &vddc_max, &vddc_min);
2666 if (ret)
2667 goto done_free;
2668
2669 vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
2670 vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
2671 t_step = 4;
2672 t0 = 60;
2673
2674 if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
2675 ret = si_init_dte_leakage_table(rdev, cac_tables,
2676 vddc_max, vddc_min, vddc_step,
2677 t0, t_step);
2678 else
2679 ret = si_init_simplified_leakage_table(rdev, cac_tables,
2680 vddc_max, vddc_min, vddc_step);
2681 if (ret)
2682 goto done_free;
2683
2684 load_line_slope = ((u32)rdev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
2685
2686 cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
2687 cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
2688 cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
2689 cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
2690 cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
2691 cac_tables->R_LL = cpu_to_be32(load_line_slope);
2692 cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
2693 cac_tables->calculation_repeats = cpu_to_be32(2);
2694 cac_tables->dc_cac = cpu_to_be32(0);
2695 cac_tables->log2_PG_LKG_SCALE = 12;
2696 cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
2697 cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
2698 cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
2699
2700 ret = si_copy_bytes_to_smc(rdev, si_pi->cac_table_start, (u8 *)cac_tables,
2701 sizeof(PP_SIslands_CacConfig), si_pi->sram_end);
2702
2703 if (ret)
2704 goto done_free;
2705
2706 ret = si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
2707
2708done_free:
2709 if (ret) {
2710 ni_pi->enable_cac = false;
2711 ni_pi->enable_power_containment = false;
2712 }
2713
2714 kfree(cac_tables);
2715
2716 return 0;
2717}
2718
2719static int si_program_cac_config_registers(struct radeon_device *rdev,
2720 const struct si_cac_config_reg *cac_config_regs)
2721{
2722 const struct si_cac_config_reg *config_regs = cac_config_regs;
2723 u32 data = 0, offset;
2724
2725 if (!config_regs)
2726 return -EINVAL;
2727
2728 while (config_regs->offset != 0xFFFFFFFF) {
2729 switch (config_regs->type) {
2730 case SISLANDS_CACCONFIG_CGIND:
2731 offset = SMC_CG_IND_START + config_regs->offset;
2732 if (offset < SMC_CG_IND_END)
2733 data = RREG32_SMC(offset);
2734 break;
2735 default:
2736 data = RREG32(config_regs->offset << 2);
2737 break;
2738 }
2739
2740 data &= ~config_regs->mask;
2741 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
2742
2743 switch (config_regs->type) {
2744 case SISLANDS_CACCONFIG_CGIND:
2745 offset = SMC_CG_IND_START + config_regs->offset;
2746 if (offset < SMC_CG_IND_END)
2747 WREG32_SMC(offset, data);
2748 break;
2749 default:
2750 WREG32(config_regs->offset << 2, data);
2751 break;
2752 }
2753 config_regs++;
2754 }
2755 return 0;
2756}
2757
2758static int si_initialize_hardware_cac_manager(struct radeon_device *rdev)
2759{
2760 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2761 struct si_power_info *si_pi = si_get_pi(rdev);
2762 int ret;
2763
2764 if ((ni_pi->enable_cac == false) ||
2765 (ni_pi->cac_configuration_required == false))
2766 return 0;
2767
2768 ret = si_program_cac_config_registers(rdev, si_pi->lcac_config);
2769 if (ret)
2770 return ret;
2771 ret = si_program_cac_config_registers(rdev, si_pi->cac_override);
2772 if (ret)
2773 return ret;
2774 ret = si_program_cac_config_registers(rdev, si_pi->cac_weights);
2775 if (ret)
2776 return ret;
2777
2778 return 0;
2779}
2780
2781static int si_enable_smc_cac(struct radeon_device *rdev,
2782 struct radeon_ps *radeon_new_state,
2783 bool enable)
2784{
2785 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2786 struct si_power_info *si_pi = si_get_pi(rdev);
2787 PPSMC_Result smc_result;
2788 int ret = 0;
2789
2790 if (ni_pi->enable_cac) {
2791 if (enable) {
2792 if (!si_should_disable_uvd_powertune(rdev, radeon_new_state)) {
2793 if (ni_pi->support_cac_long_term_average) {
2794 smc_result = si_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgEnable);
2795 if (smc_result != PPSMC_Result_OK)
2796 ni_pi->support_cac_long_term_average = false;
2797 }
2798
2799 smc_result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
2800 if (smc_result != PPSMC_Result_OK) {
2801 ret = -EINVAL;
2802 ni_pi->cac_enabled = false;
2803 } else {
2804 ni_pi->cac_enabled = true;
2805 }
2806
2807 if (si_pi->enable_dte) {
2808 smc_result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
2809 if (smc_result != PPSMC_Result_OK)
2810 ret = -EINVAL;
2811 }
2812 }
2813 } else if (ni_pi->cac_enabled) {
2814 if (si_pi->enable_dte)
2815 smc_result = si_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
2816
2817 smc_result = si_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
2818
2819 ni_pi->cac_enabled = false;
2820
2821 if (ni_pi->support_cac_long_term_average)
2822 smc_result = si_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgDisable);
2823 }
2824 }
2825 return ret;
2826}
2827
2828static int si_init_smc_spll_table(struct radeon_device *rdev)
2829{
2830 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2831 struct si_power_info *si_pi = si_get_pi(rdev);
2832 SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
2833 SISLANDS_SMC_SCLK_VALUE sclk_params;
2834 u32 fb_div, p_div;
2835 u32 clk_s, clk_v;
2836 u32 sclk = 0;
2837 int ret = 0;
2838 u32 tmp;
2839 int i;
2840
2841 if (si_pi->spll_table_start == 0)
2842 return -EINVAL;
2843
2844 spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
2845 if (spll_table == NULL)
2846 return -ENOMEM;
2847
2848 for (i = 0; i < 256; i++) {
2849 ret = si_calculate_sclk_params(rdev, sclk, &sclk_params);
2850 if (ret)
2851 break;
2852
2853 p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
2854 fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
2855 clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
2856 clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
2857
2858 fb_div &= ~0x00001FFF;
2859 fb_div >>= 1;
2860 clk_v >>= 6;
2861
2862 if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
2863 ret = -EINVAL;
2864 if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
2865 ret = -EINVAL;
2866 if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
2867 ret = -EINVAL;
2868 if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
2869 ret = -EINVAL;
2870
2871 if (ret)
2872 break;
2873
2874 tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
2875 ((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
2876 spll_table->freq[i] = cpu_to_be32(tmp);
2877
2878 tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
2879 ((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
2880 spll_table->ss[i] = cpu_to_be32(tmp);
2881
2882 sclk += 512;
2883 }
2884
2885
2886 if (!ret)
2887 ret = si_copy_bytes_to_smc(rdev, si_pi->spll_table_start,
2888 (u8 *)spll_table, sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
2889 si_pi->sram_end);
2890
2891 if (ret)
2892 ni_pi->enable_power_containment = false;
2893
2894 kfree(spll_table);
2895
2896 return ret;
2897}
2898
2899static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2900 struct radeon_ps *rps)
2901{
2902 struct ni_ps *ps = ni_get_ps(rps);
2903 struct radeon_clock_and_voltage_limits *max_limits;
2904 bool disable_mclk_switching;
2905 u32 mclk, sclk;
2906 u16 vddc, vddci;
2907 int i;
2908
2909 if (rdev->pm.dpm.new_active_crtc_count > 1)
2910 disable_mclk_switching = true;
2911 else
2912 disable_mclk_switching = false;
2913
2914 if (rdev->pm.dpm.ac_power)
2915 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2916 else
2917 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
2918
2919 for (i = ps->performance_level_count - 2; i >= 0; i--) {
2920 if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
2921 ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
2922 }
2923 if (rdev->pm.dpm.ac_power == false) {
2924 for (i = 0; i < ps->performance_level_count; i++) {
2925 if (ps->performance_levels[i].mclk > max_limits->mclk)
2926 ps->performance_levels[i].mclk = max_limits->mclk;
2927 if (ps->performance_levels[i].sclk > max_limits->sclk)
2928 ps->performance_levels[i].sclk = max_limits->sclk;
2929 if (ps->performance_levels[i].vddc > max_limits->vddc)
2930 ps->performance_levels[i].vddc = max_limits->vddc;
2931 if (ps->performance_levels[i].vddci > max_limits->vddci)
2932 ps->performance_levels[i].vddci = max_limits->vddci;
2933 }
2934 }
2935
2936 /* XXX validate the min clocks required for display */
2937
2938 if (disable_mclk_switching) {
2939 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
2940 sclk = ps->performance_levels[0].sclk;
2941 vddc = ps->performance_levels[0].vddc;
2942 vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
2943 } else {
2944 sclk = ps->performance_levels[0].sclk;
2945 mclk = ps->performance_levels[0].mclk;
2946 vddc = ps->performance_levels[0].vddc;
2947 vddci = ps->performance_levels[0].vddci;
2948 }
2949
2950 /* adjusted low state */
2951 ps->performance_levels[0].sclk = sclk;
2952 ps->performance_levels[0].mclk = mclk;
2953 ps->performance_levels[0].vddc = vddc;
2954 ps->performance_levels[0].vddci = vddci;
2955
2956 for (i = 1; i < ps->performance_level_count; i++) {
2957 if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
2958 ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
2959 if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
2960 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
2961 }
2962
2963 if (disable_mclk_switching) {
2964 mclk = ps->performance_levels[0].mclk;
2965 for (i = 1; i < ps->performance_level_count; i++) {
2966 if (mclk < ps->performance_levels[i].mclk)
2967 mclk = ps->performance_levels[i].mclk;
2968 }
2969 for (i = 0; i < ps->performance_level_count; i++) {
2970 ps->performance_levels[i].mclk = mclk;
2971 ps->performance_levels[i].vddci = vddci;
2972 }
2973 } else {
2974 for (i = 1; i < ps->performance_level_count; i++) {
2975 if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
2976 ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
2977 if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
2978 ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
2979 }
2980 }
2981
2982 for (i = 0; i < ps->performance_level_count; i++)
2983 btc_adjust_clock_combinations(rdev, max_limits,
2984 &ps->performance_levels[i]);
2985
2986 for (i = 0; i < ps->performance_level_count; i++) {
2987 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2988 ps->performance_levels[i].sclk,
2989 max_limits->vddc, &ps->performance_levels[i].vddc);
2990 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2991 ps->performance_levels[i].mclk,
2992 max_limits->vddci, &ps->performance_levels[i].vddci);
2993 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2994 ps->performance_levels[i].mclk,
2995 max_limits->vddc, &ps->performance_levels[i].vddc);
2996 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
2997 rdev->clock.current_dispclk,
2998 max_limits->vddc, &ps->performance_levels[i].vddc);
2999 }
3000
3001 for (i = 0; i < ps->performance_level_count; i++) {
3002 btc_apply_voltage_delta_rules(rdev,
3003 max_limits->vddc, max_limits->vddci,
3004 &ps->performance_levels[i].vddc,
3005 &ps->performance_levels[i].vddci);
3006 }
3007
3008 ps->dc_compatible = true;
3009 for (i = 0; i < ps->performance_level_count; i++) {
3010 if (ps->performance_levels[i].vddc > rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
3011 ps->dc_compatible = false;
3012 }
3013
3014}
3015
3016#if 0
3017static int si_read_smc_soft_register(struct radeon_device *rdev,
3018 u16 reg_offset, u32 *value)
3019{
3020 struct si_power_info *si_pi = si_get_pi(rdev);
3021
3022 return si_read_smc_sram_dword(rdev,
3023 si_pi->soft_regs_start + reg_offset, value,
3024 si_pi->sram_end);
3025}
3026#endif
3027
3028static int si_write_smc_soft_register(struct radeon_device *rdev,
3029 u16 reg_offset, u32 value)
3030{
3031 struct si_power_info *si_pi = si_get_pi(rdev);
3032
3033 return si_write_smc_sram_dword(rdev,
3034 si_pi->soft_regs_start + reg_offset,
3035 value, si_pi->sram_end);
3036}
3037
3038static bool si_is_special_1gb_platform(struct radeon_device *rdev)
3039{
3040 bool ret = false;
3041 u32 tmp, width, row, column, bank, density;
3042 bool is_memory_gddr5, is_special;
3043
3044 tmp = RREG32(MC_SEQ_MISC0);
3045 is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
3046 is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
3047 & (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
3048
3049 WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
3050 width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
3051
3052 tmp = RREG32(MC_ARB_RAMCFG);
3053 row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
3054 column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
3055 bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
3056
3057 density = (1 << (row + column - 20 + bank)) * width;
3058
3059 if ((rdev->pdev->device == 0x6819) &&
3060 is_memory_gddr5 && is_special && (density == 0x400))
3061 ret = true;
3062
3063 return ret;
3064}
3065
3066static void si_get_leakage_vddc(struct radeon_device *rdev)
3067{
3068 struct si_power_info *si_pi = si_get_pi(rdev);
3069 u16 vddc, count = 0;
3070 int i, ret;
3071
3072 for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
3073 ret = radeon_atom_get_leakage_vddc_based_on_leakage_idx(rdev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
3074
3075 if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
3076 si_pi->leakage_voltage.entries[count].voltage = vddc;
3077 si_pi->leakage_voltage.entries[count].leakage_index =
3078 SISLANDS_LEAKAGE_INDEX0 + i;
3079 count++;
3080 }
3081 }
3082 si_pi->leakage_voltage.count = count;
3083}
3084
3085static int si_get_leakage_voltage_from_leakage_index(struct radeon_device *rdev,
3086 u32 index, u16 *leakage_voltage)
3087{
3088 struct si_power_info *si_pi = si_get_pi(rdev);
3089 int i;
3090
3091 if (leakage_voltage == NULL)
3092 return -EINVAL;
3093
3094 if ((index & 0xff00) != 0xff00)
3095 return -EINVAL;
3096
3097 if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
3098 return -EINVAL;
3099
3100 if (index < SISLANDS_LEAKAGE_INDEX0)
3101 return -EINVAL;
3102
3103 for (i = 0; i < si_pi->leakage_voltage.count; i++) {
3104 if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
3105 *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
3106 return 0;
3107 }
3108 }
3109 return -EAGAIN;
3110}
3111
3112static void si_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
3113{
3114 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3115 bool want_thermal_protection;
3116 enum radeon_dpm_event_src dpm_event_src;
3117
3118 switch (sources) {
3119 case 0:
3120 default:
3121 want_thermal_protection = false;
3122 break;
3123 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
3124 want_thermal_protection = true;
3125 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
3126 break;
3127 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
3128 want_thermal_protection = true;
3129 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
3130 break;
3131 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
3132 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
3133 want_thermal_protection = true;
3134 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
3135 break;
3136 }
3137
3138 if (want_thermal_protection) {
3139 WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
3140 if (pi->thermal_protection)
3141 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
3142 } else {
3143 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
3144 }
3145}
3146
3147static void si_enable_auto_throttle_source(struct radeon_device *rdev,
3148 enum radeon_dpm_auto_throttle_src source,
3149 bool enable)
3150{
3151 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3152
3153 if (enable) {
3154 if (!(pi->active_auto_throttle_sources & (1 << source))) {
3155 pi->active_auto_throttle_sources |= 1 << source;
3156 si_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
3157 }
3158 } else {
3159 if (pi->active_auto_throttle_sources & (1 << source)) {
3160 pi->active_auto_throttle_sources &= ~(1 << source);
3161 si_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
3162 }
3163 }
3164}
3165
3166static void si_start_dpm(struct radeon_device *rdev)
3167{
3168 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
3169}
3170
3171static void si_stop_dpm(struct radeon_device *rdev)
3172{
3173 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
3174}
3175
3176static void si_enable_sclk_control(struct radeon_device *rdev, bool enable)
3177{
3178 if (enable)
3179 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
3180 else
3181 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
3182
3183}
3184
3185#if 0
3186static int si_notify_hardware_of_thermal_state(struct radeon_device *rdev,
3187 u32 thermal_level)
3188{
3189 PPSMC_Result ret;
3190
3191 if (thermal_level == 0) {
3192 ret = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
3193 if (ret == PPSMC_Result_OK)
3194 return 0;
3195 else
3196 return -EINVAL;
3197 }
3198 return 0;
3199}
3200
3201static void si_notify_hardware_vpu_recovery_event(struct radeon_device *rdev)
3202{
3203 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
3204}
3205#endif
3206
3207#if 0
3208static int si_notify_hw_of_powersource(struct radeon_device *rdev, bool ac_power)
3209{
3210 if (ac_power)
3211 return (si_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
3212 0 : -EINVAL;
3213
3214 return 0;
3215}
3216#endif
3217
3218static PPSMC_Result si_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
3219 PPSMC_Msg msg, u32 parameter)
3220{
3221 WREG32(SMC_SCRATCH0, parameter);
3222 return si_send_msg_to_smc(rdev, msg);
3223}
3224
3225static int si_restrict_performance_levels_before_switch(struct radeon_device *rdev)
3226{
3227 if (si_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
3228 return -EINVAL;
3229
3230 return (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
3231 0 : -EINVAL;
3232}
3233
3234#if 0
3235static int si_unrestrict_performance_levels_after_switch(struct radeon_device *rdev)
3236{
3237 if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
3238 return -EINVAL;
3239
3240 return (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) == PPSMC_Result_OK) ?
3241 0 : -EINVAL;
3242}
3243#endif
3244
3245static int si_set_boot_state(struct radeon_device *rdev)
3246{
3247 return (si_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
3248 0 : -EINVAL;
3249}
3250
3251static int si_set_sw_state(struct radeon_device *rdev)
3252{
3253 return (si_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
3254 0 : -EINVAL;
3255}
3256
3257static int si_halt_smc(struct radeon_device *rdev)
3258{
3259 if (si_send_msg_to_smc(rdev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
3260 return -EINVAL;
3261
3262 return (si_wait_for_smc_inactive(rdev) == PPSMC_Result_OK) ?
3263 0 : -EINVAL;
3264}
3265
3266static int si_resume_smc(struct radeon_device *rdev)
3267{
3268 if (si_send_msg_to_smc(rdev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
3269 return -EINVAL;
3270
3271 return (si_send_msg_to_smc(rdev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
3272 0 : -EINVAL;
3273}
3274
3275static void si_dpm_start_smc(struct radeon_device *rdev)
3276{
3277 si_program_jump_on_start(rdev);
3278 si_start_smc(rdev);
3279 si_start_smc_clock(rdev);
3280}
3281
3282static void si_dpm_stop_smc(struct radeon_device *rdev)
3283{
3284 si_reset_smc(rdev);
3285 si_stop_smc_clock(rdev);
3286}
3287
3288static int si_process_firmware_header(struct radeon_device *rdev)
3289{
3290 struct si_power_info *si_pi = si_get_pi(rdev);
3291 u32 tmp;
3292 int ret;
3293
3294 ret = si_read_smc_sram_dword(rdev,
3295 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3296 SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
3297 &tmp, si_pi->sram_end);
3298 if (ret)
3299 return ret;
3300
3301 si_pi->state_table_start = tmp;
3302
3303 ret = si_read_smc_sram_dword(rdev,
3304 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3305 SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
3306 &tmp, si_pi->sram_end);
3307 if (ret)
3308 return ret;
3309
3310 si_pi->soft_regs_start = tmp;
3311
3312 ret = si_read_smc_sram_dword(rdev,
3313 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3314 SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
3315 &tmp, si_pi->sram_end);
3316 if (ret)
3317 return ret;
3318
3319 si_pi->mc_reg_table_start = tmp;
3320
3321 ret = si_read_smc_sram_dword(rdev,
3322 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3323 SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
3324 &tmp, si_pi->sram_end);
3325 if (ret)
3326 return ret;
3327
3328 si_pi->arb_table_start = tmp;
3329
3330 ret = si_read_smc_sram_dword(rdev,
3331 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3332 SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
3333 &tmp, si_pi->sram_end);
3334 if (ret)
3335 return ret;
3336
3337 si_pi->cac_table_start = tmp;
3338
3339 ret = si_read_smc_sram_dword(rdev,
3340 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3341 SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
3342 &tmp, si_pi->sram_end);
3343 if (ret)
3344 return ret;
3345
3346 si_pi->dte_table_start = tmp;
3347
3348 ret = si_read_smc_sram_dword(rdev,
3349 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3350 SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
3351 &tmp, si_pi->sram_end);
3352 if (ret)
3353 return ret;
3354
3355 si_pi->spll_table_start = tmp;
3356
3357 ret = si_read_smc_sram_dword(rdev,
3358 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3359 SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
3360 &tmp, si_pi->sram_end);
3361 if (ret)
3362 return ret;
3363
3364 si_pi->papm_cfg_table_start = tmp;
3365
3366 return ret;
3367}
3368
3369static void si_read_clock_registers(struct radeon_device *rdev)
3370{
3371 struct si_power_info *si_pi = si_get_pi(rdev);
3372
3373 si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
3374 si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
3375 si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
3376 si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
3377 si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
3378 si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
3379 si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
3380 si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
3381 si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
3382 si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
3383 si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
3384 si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
3385 si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
3386 si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
3387 si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
3388}
3389
3390static void si_enable_thermal_protection(struct radeon_device *rdev,
3391 bool enable)
3392{
3393 if (enable)
3394 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
3395 else
3396 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
3397}
3398
3399static void si_enable_acpi_power_management(struct radeon_device *rdev)
3400{
3401 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
3402}
3403
3404#if 0
3405static int si_enter_ulp_state(struct radeon_device *rdev)
3406{
3407 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
3408
3409 udelay(25000);
3410
3411 return 0;
3412}
3413
3414static int si_exit_ulp_state(struct radeon_device *rdev)
3415{
3416 int i;
3417
3418 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
3419
3420 udelay(7000);
3421
3422 for (i = 0; i < rdev->usec_timeout; i++) {
3423 if (RREG32(SMC_RESP_0) == 1)
3424 break;
3425 udelay(1000);
3426 }
3427
3428 return 0;
3429}
3430#endif
3431
3432static int si_notify_smc_display_change(struct radeon_device *rdev,
3433 bool has_display)
3434{
3435 PPSMC_Msg msg = has_display ?
3436 PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
3437
3438 return (si_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?
3439 0 : -EINVAL;
3440}
3441
3442static void si_program_response_times(struct radeon_device *rdev)
3443{
3444 u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
3445 u32 vddc_dly, acpi_dly, vbi_dly;
3446 u32 reference_clock;
3447
3448 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
3449
3450 voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
3451 backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
3452
3453 if (voltage_response_time == 0)
3454 voltage_response_time = 1000;
3455
3456 acpi_delay_time = 15000;
3457 vbi_time_out = 100000;
3458
3459 reference_clock = radeon_get_xclk(rdev);
3460
3461 vddc_dly = (voltage_response_time * reference_clock) / 100;
3462 acpi_dly = (acpi_delay_time * reference_clock) / 100;
3463 vbi_dly = (vbi_time_out * reference_clock) / 100;
3464
3465 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
3466 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
3467 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
3468 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
3469}
3470
3471static void si_program_ds_registers(struct radeon_device *rdev)
3472{
3473 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3474 u32 tmp = 1; /* XXX: 0x10 on tahiti A0 */
3475
3476 if (eg_pi->sclk_deep_sleep) {
3477 WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
3478 WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
3479 ~AUTOSCALE_ON_SS_CLEAR);
3480 }
3481}
3482
3483static void si_program_display_gap(struct radeon_device *rdev)
3484{
3485 u32 tmp, pipe;
3486 int i;
3487
3488 tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
3489 if (rdev->pm.dpm.new_active_crtc_count > 0)
3490 tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
3491 else
3492 tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
3493
3494 if (rdev->pm.dpm.new_active_crtc_count > 1)
3495 tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
3496 else
3497 tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
3498
3499 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
3500
3501 tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
3502 pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
3503
3504 if ((rdev->pm.dpm.new_active_crtc_count > 0) &&
3505 (!(rdev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
3506 /* find the first active crtc */
3507 for (i = 0; i < rdev->num_crtc; i++) {
3508 if (rdev->pm.dpm.new_active_crtcs & (1 << i))
3509 break;
3510 }
3511 if (i == rdev->num_crtc)
3512 pipe = 0;
3513 else
3514 pipe = i;
3515
3516 tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
3517 tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
3518 WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
3519 }
3520
3521 si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
3522}
3523
3524static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
3525{
3526 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3527
3528 if (enable) {
3529 if (pi->sclk_ss)
3530 WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
3531 } else {
3532 WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
3533 WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
3534 }
3535}
3536
3537static void si_setup_bsp(struct radeon_device *rdev)
3538{
3539 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3540 u32 xclk = radeon_get_xclk(rdev);
3541
3542 r600_calculate_u_and_p(pi->asi,
3543 xclk,
3544 16,
3545 &pi->bsp,
3546 &pi->bsu);
3547
3548 r600_calculate_u_and_p(pi->pasi,
3549 xclk,
3550 16,
3551 &pi->pbsp,
3552 &pi->pbsu);
3553
3554
3555 pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
3556 pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
3557
3558 WREG32(CG_BSP, pi->dsp);
3559}
3560
3561static void si_program_git(struct radeon_device *rdev)
3562{
3563 WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
3564}
3565
3566static void si_program_tp(struct radeon_device *rdev)
3567{
3568 int i;
3569 enum r600_td td = R600_TD_DFLT;
3570
3571 for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
3572 WREG32(CG_FFCT_0 + (i * 4), (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
3573
3574 if (td == R600_TD_AUTO)
3575 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
3576 else
3577 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
3578
3579 if (td == R600_TD_UP)
3580 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
3581
3582 if (td == R600_TD_DOWN)
3583 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
3584}
3585
3586static void si_program_tpp(struct radeon_device *rdev)
3587{
3588 WREG32(CG_TPC, R600_TPC_DFLT);
3589}
3590
3591static void si_program_sstp(struct radeon_device *rdev)
3592{
3593 WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
3594}
3595
3596static void si_enable_display_gap(struct radeon_device *rdev)
3597{
3598 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
3599
3600 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
3601 tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
3602 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
3603 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
3604}
3605
3606static void si_program_vc(struct radeon_device *rdev)
3607{
3608 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3609
3610 WREG32(CG_FTV, pi->vrc);
3611}
3612
3613static void si_clear_vc(struct radeon_device *rdev)
3614{
3615 WREG32(CG_FTV, 0);
3616}
3617
3618static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
3619{
3620 u8 mc_para_index;
3621
3622 if (memory_clock < 10000)
3623 mc_para_index = 0;
3624 else if (memory_clock >= 80000)
3625 mc_para_index = 0x0f;
3626 else
3627 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
3628 return mc_para_index;
3629}
3630
3631static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
3632{
3633 u8 mc_para_index;
3634
3635 if (strobe_mode) {
3636 if (memory_clock < 12500)
3637 mc_para_index = 0x00;
3638 else if (memory_clock > 47500)
3639 mc_para_index = 0x0f;
3640 else
3641 mc_para_index = (u8)((memory_clock - 10000) / 2500);
3642 } else {
3643 if (memory_clock < 65000)
3644 mc_para_index = 0x00;
3645 else if (memory_clock > 135000)
3646 mc_para_index = 0x0f;
3647 else
3648 mc_para_index = (u8)((memory_clock - 60000) / 5000);
3649 }
3650 return mc_para_index;
3651}
3652
3653static u8 si_get_strobe_mode_settings(struct radeon_device *rdev, u32 mclk)
3654{
3655 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3656 bool strobe_mode = false;
3657 u8 result = 0;
3658
3659 if (mclk <= pi->mclk_strobe_mode_threshold)
3660 strobe_mode = true;
3661
3662 if (pi->mem_gddr5)
3663 result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
3664 else
3665 result = si_get_ddr3_mclk_frequency_ratio(mclk);
3666
3667 if (strobe_mode)
3668 result |= SISLANDS_SMC_STROBE_ENABLE;
3669
3670 return result;
3671}
3672
3673static int si_upload_firmware(struct radeon_device *rdev)
3674{
3675 struct si_power_info *si_pi = si_get_pi(rdev);
3676 int ret;
3677
3678 si_reset_smc(rdev);
3679 si_stop_smc_clock(rdev);
3680
3681 ret = si_load_smc_ucode(rdev, si_pi->sram_end);
3682
3683 return ret;
3684}
3685
3686static bool si_validate_phase_shedding_tables(struct radeon_device *rdev,
3687 const struct atom_voltage_table *table,
3688 const struct radeon_phase_shedding_limits_table *limits)
3689{
3690 u32 data, num_bits, num_levels;
3691
3692 if ((table == NULL) || (limits == NULL))
3693 return false;
3694
3695 data = table->mask_low;
3696
3697 num_bits = hweight32(data);
3698
3699 if (num_bits == 0)
3700 return false;
3701
3702 num_levels = (1 << num_bits);
3703
3704 if (table->count != num_levels)
3705 return false;
3706
3707 if (limits->count != (num_levels - 1))
3708 return false;
3709
3710 return true;
3711}
3712
3713static void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
3714 struct atom_voltage_table *voltage_table)
3715{
3716 unsigned int i, diff;
3717
3718 if (voltage_table->count <= SISLANDS_MAX_NO_VREG_STEPS)
3719 return;
3720
3721 diff = voltage_table->count - SISLANDS_MAX_NO_VREG_STEPS;
3722
3723 for (i= 0; i < SISLANDS_MAX_NO_VREG_STEPS; i++)
3724 voltage_table->entries[i] = voltage_table->entries[i + diff];
3725
3726 voltage_table->count = SISLANDS_MAX_NO_VREG_STEPS;
3727}
3728
3729static int si_construct_voltage_tables(struct radeon_device *rdev)
3730{
3731 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3732 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3733 struct si_power_info *si_pi = si_get_pi(rdev);
3734 int ret;
3735
3736 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
3737 VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
3738 if (ret)
3739 return ret;
3740
3741 if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
3742 si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddc_voltage_table);
3743
3744 if (eg_pi->vddci_control) {
3745 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
3746 VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
3747 if (ret)
3748 return ret;
3749
3750 if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
3751 si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddci_voltage_table);
3752 }
3753
3754 if (pi->mvdd_control) {
3755 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
3756 VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
3757
3758 if (ret) {
3759 pi->mvdd_control = false;
3760 return ret;
3761 }
3762
3763 if (si_pi->mvdd_voltage_table.count == 0) {
3764 pi->mvdd_control = false;
3765 return -EINVAL;
3766 }
3767
3768 if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
3769 si_trim_voltage_table_to_fit_state_table(rdev, &si_pi->mvdd_voltage_table);
3770 }
3771
3772 if (si_pi->vddc_phase_shed_control) {
3773 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
3774 VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
3775 if (ret)
3776 si_pi->vddc_phase_shed_control = false;
3777
3778 if ((si_pi->vddc_phase_shed_table.count == 0) ||
3779 (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
3780 si_pi->vddc_phase_shed_control = false;
3781 }
3782
3783 return 0;
3784}
3785
3786static void si_populate_smc_voltage_table(struct radeon_device *rdev,
3787 const struct atom_voltage_table *voltage_table,
3788 SISLANDS_SMC_STATETABLE *table)
3789{
3790 unsigned int i;
3791
3792 for (i = 0; i < voltage_table->count; i++)
3793 table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
3794}
3795
3796static int si_populate_smc_voltage_tables(struct radeon_device *rdev,
3797 SISLANDS_SMC_STATETABLE *table)
3798{
3799 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3800 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3801 struct si_power_info *si_pi = si_get_pi(rdev);
3802 u8 i;
3803
3804 if (eg_pi->vddc_voltage_table.count) {
3805 si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
3806 table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
3807 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
3808
3809 for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
3810 if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
3811 table->maxVDDCIndexInPPTable = i;
3812 break;
3813 }
3814 }
3815 }
3816
3817 if (eg_pi->vddci_voltage_table.count) {
3818 si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
3819
3820 table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
3821 cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
3822 }
3823
3824
3825 if (si_pi->mvdd_voltage_table.count) {
3826 si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
3827
3828 table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
3829 cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
3830 }
3831
3832 if (si_pi->vddc_phase_shed_control) {
3833 if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table,
3834 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
3835 si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
3836
3837 table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
3838 cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
3839
3840 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
3841 (u32)si_pi->vddc_phase_shed_table.phase_delay);
3842 } else {
3843 si_pi->vddc_phase_shed_control = false;
3844 }
3845 }
3846
3847 return 0;
3848}
3849
3850static int si_populate_voltage_value(struct radeon_device *rdev,
3851 const struct atom_voltage_table *table,
3852 u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
3853{
3854 unsigned int i;
3855
3856 for (i = 0; i < table->count; i++) {
3857 if (value <= table->entries[i].value) {
3858 voltage->index = (u8)i;
3859 voltage->value = cpu_to_be16(table->entries[i].value);
3860 break;
3861 }
3862 }
3863
3864 if (i >= table->count)
3865 return -EINVAL;
3866
3867 return 0;
3868}
3869
3870static int si_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
3871 SISLANDS_SMC_VOLTAGE_VALUE *voltage)
3872{
3873 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3874 struct si_power_info *si_pi = si_get_pi(rdev);
3875
3876 if (pi->mvdd_control) {
3877 if (mclk <= pi->mvdd_split_frequency)
3878 voltage->index = 0;
3879 else
3880 voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
3881
3882 voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
3883 }
3884 return 0;
3885}
3886
3887static int si_get_std_voltage_value(struct radeon_device *rdev,
3888 SISLANDS_SMC_VOLTAGE_VALUE *voltage,
3889 u16 *std_voltage)
3890{
3891 u16 v_index;
3892 bool voltage_found = false;
3893 *std_voltage = be16_to_cpu(voltage->value);
3894
3895 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
3896 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
3897 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
3898 return -EINVAL;
3899
3900 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
3901 if (be16_to_cpu(voltage->value) ==
3902 (u16)rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
3903 voltage_found = true;
3904 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
3905 *std_voltage =
3906 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
3907 else
3908 *std_voltage =
3909 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[rdev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
3910 break;
3911 }
3912 }
3913
3914 if (!voltage_found) {
3915 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
3916 if (be16_to_cpu(voltage->value) <=
3917 (u16)rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
3918 voltage_found = true;
3919 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
3920 *std_voltage =
3921 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
3922 else
3923 *std_voltage =
3924 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[rdev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
3925 break;
3926 }
3927 }
3928 }
3929 } else {
3930 if ((u32)voltage->index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
3931 *std_voltage = rdev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
3932 }
3933 }
3934
3935 return 0;
3936}
3937
3938static int si_populate_std_voltage_value(struct radeon_device *rdev,
3939 u16 value, u8 index,
3940 SISLANDS_SMC_VOLTAGE_VALUE *voltage)
3941{
3942 voltage->index = index;
3943 voltage->value = cpu_to_be16(value);
3944
3945 return 0;
3946}
3947
3948static int si_populate_phase_shedding_value(struct radeon_device *rdev,
3949 const struct radeon_phase_shedding_limits_table *limits,
3950 u16 voltage, u32 sclk, u32 mclk,
3951 SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
3952{
3953 unsigned int i;
3954
3955 for (i = 0; i < limits->count; i++) {
3956 if ((voltage <= limits->entries[i].voltage) &&
3957 (sclk <= limits->entries[i].sclk) &&
3958 (mclk <= limits->entries[i].mclk))
3959 break;
3960 }
3961
3962 smc_voltage->phase_settings = (u8)i;
3963
3964 return 0;
3965}
3966
3967static int si_init_arb_table_index(struct radeon_device *rdev)
3968{
3969 struct si_power_info *si_pi = si_get_pi(rdev);
3970 u32 tmp;
3971 int ret;
3972
3973 ret = si_read_smc_sram_dword(rdev, si_pi->arb_table_start, &tmp, si_pi->sram_end);
3974 if (ret)
3975 return ret;
3976
3977 tmp &= 0x00FFFFFF;
3978 tmp |= MC_CG_ARB_FREQ_F1 << 24;
3979
3980 return si_write_smc_sram_dword(rdev, si_pi->arb_table_start, tmp, si_pi->sram_end);
3981}
3982
3983static int si_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
3984{
3985 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
3986}
3987
3988static int si_reset_to_default(struct radeon_device *rdev)
3989{
3990 return (si_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
3991 0 : -EINVAL;
3992}
3993
3994static int si_force_switch_to_arb_f0(struct radeon_device *rdev)
3995{
3996 struct si_power_info *si_pi = si_get_pi(rdev);
3997 u32 tmp;
3998 int ret;
3999
4000 ret = si_read_smc_sram_dword(rdev, si_pi->arb_table_start,
4001 &tmp, si_pi->sram_end);
4002 if (ret)
4003 return ret;
4004
4005 tmp = (tmp >> 24) & 0xff;
4006
4007 if (tmp == MC_CG_ARB_FREQ_F0)
4008 return 0;
4009
4010 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
4011}
4012
4013static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev,
4014 u32 engine_clock)
4015{
4016 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4017 u32 dram_rows;
4018 u32 dram_refresh_rate;
4019 u32 mc_arb_rfsh_rate;
4020 u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
4021
4022 if (pi->mem_gddr5)
4023 dram_rows = 1 << (tmp + 10);
4024 else
4025 dram_rows = DDR3_DRAM_ROWS;
4026
4027 dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
4028 mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
4029
4030 return mc_arb_rfsh_rate;
4031}
4032
4033static int si_populate_memory_timing_parameters(struct radeon_device *rdev,
4034 struct rv7xx_pl *pl,
4035 SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
4036{
4037 u32 dram_timing;
4038 u32 dram_timing2;
4039 u32 burst_time;
4040
4041 arb_regs->mc_arb_rfsh_rate =
4042 (u8)si_calculate_memory_refresh_rate(rdev, pl->sclk);
4043
4044 radeon_atom_set_engine_dram_timings(rdev,
4045 pl->sclk,
4046 pl->mclk);
4047
4048 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
4049 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
4050 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
4051
4052 arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing);
4053 arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
4054 arb_regs->mc_arb_burst_time = (u8)burst_time;
4055
4056 return 0;
4057}
4058
4059static int si_do_program_memory_timing_parameters(struct radeon_device *rdev,
4060 struct radeon_ps *radeon_state,
4061 unsigned int first_arb_set)
4062{
4063 struct si_power_info *si_pi = si_get_pi(rdev);
4064 struct ni_ps *state = ni_get_ps(radeon_state);
4065 SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
4066 int i, ret = 0;
4067
4068 for (i = 0; i < state->performance_level_count; i++) {
4069 ret = si_populate_memory_timing_parameters(rdev, &state->performance_levels[i], &arb_regs);
4070 if (ret)
4071 break;
4072 ret = si_copy_bytes_to_smc(rdev,
4073 si_pi->arb_table_start +
4074 offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
4075 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
4076 (u8 *)&arb_regs,
4077 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
4078 si_pi->sram_end);
4079 if (ret)
4080 break;
4081 }
4082
4083 return ret;
4084}
4085
4086static int si_program_memory_timing_parameters(struct radeon_device *rdev,
4087 struct radeon_ps *radeon_new_state)
4088{
4089 return si_do_program_memory_timing_parameters(rdev, radeon_new_state,
4090 SISLANDS_DRIVER_STATE_ARB_INDEX);
4091}
4092
4093static int si_populate_initial_mvdd_value(struct radeon_device *rdev,
4094 struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
4095{
4096 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4097 struct si_power_info *si_pi = si_get_pi(rdev);
4098
4099 if (pi->mvdd_control)
4100 return si_populate_voltage_value(rdev, &si_pi->mvdd_voltage_table,
4101 si_pi->mvdd_bootup_value, voltage);
4102
4103 return 0;
4104}
4105
4106static int si_populate_smc_initial_state(struct radeon_device *rdev,
4107 struct radeon_ps *radeon_initial_state,
4108 SISLANDS_SMC_STATETABLE *table)
4109{
4110 struct ni_ps *initial_state = ni_get_ps(radeon_initial_state);
4111 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4112 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4113 struct si_power_info *si_pi = si_get_pi(rdev);
4114 u32 reg;
4115 int ret;
4116
4117 table->initialState.levels[0].mclk.vDLL_CNTL =
4118 cpu_to_be32(si_pi->clock_registers.dll_cntl);
4119 table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
4120 cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
4121 table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
4122 cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
4123 table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
4124 cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
4125 table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
4126 cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
4127 table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
4128 cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
4129 table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
4130 cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
4131 table->initialState.levels[0].mclk.vMPLL_SS =
4132 cpu_to_be32(si_pi->clock_registers.mpll_ss1);
4133 table->initialState.levels[0].mclk.vMPLL_SS2 =
4134 cpu_to_be32(si_pi->clock_registers.mpll_ss2);
4135
4136 table->initialState.levels[0].mclk.mclk_value =
4137 cpu_to_be32(initial_state->performance_levels[0].mclk);
4138
4139 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
4140 cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
4141 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
4142 cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
4143 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
4144 cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
4145 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
4146 cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
4147 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
4148 cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
4149 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
4150 cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
4151
4152 table->initialState.levels[0].sclk.sclk_value =
4153 cpu_to_be32(initial_state->performance_levels[0].sclk);
4154
4155 table->initialState.levels[0].arbRefreshState =
4156 SISLANDS_INITIAL_STATE_ARB_INDEX;
4157
4158 table->initialState.levels[0].ACIndex = 0;
4159
4160 ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
4161 initial_state->performance_levels[0].vddc,
4162 &table->initialState.levels[0].vddc);
4163
4164 if (!ret) {
4165 u16 std_vddc;
4166
4167 ret = si_get_std_voltage_value(rdev,
4168 &table->initialState.levels[0].vddc,
4169 &std_vddc);
4170 if (!ret)
4171 si_populate_std_voltage_value(rdev, std_vddc,
4172 table->initialState.levels[0].vddc.index,
4173 &table->initialState.levels[0].std_vddc);
4174 }
4175
4176 if (eg_pi->vddci_control)
4177 si_populate_voltage_value(rdev,
4178 &eg_pi->vddci_voltage_table,
4179 initial_state->performance_levels[0].vddci,
4180 &table->initialState.levels[0].vddci);
4181
4182 if (si_pi->vddc_phase_shed_control)
4183 si_populate_phase_shedding_value(rdev,
4184 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
4185 initial_state->performance_levels[0].vddc,
4186 initial_state->performance_levels[0].sclk,
4187 initial_state->performance_levels[0].mclk,
4188 &table->initialState.levels[0].vddc);
4189
4190 si_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
4191
4192 reg = CG_R(0xffff) | CG_L(0);
4193 table->initialState.levels[0].aT = cpu_to_be32(reg);
4194
4195 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
4196
4197 table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
4198
4199 if (pi->mem_gddr5) {
4200 table->initialState.levels[0].strobeMode =
4201 si_get_strobe_mode_settings(rdev,
4202 initial_state->performance_levels[0].mclk);
4203
4204 if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
4205 table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
4206 else
4207 table->initialState.levels[0].mcFlags = 0;
4208 }
4209
4210 table->initialState.levelCount = 1;
4211
4212 table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
4213
4214 table->initialState.levels[0].dpm2.MaxPS = 0;
4215 table->initialState.levels[0].dpm2.NearTDPDec = 0;
4216 table->initialState.levels[0].dpm2.AboveSafeInc = 0;
4217 table->initialState.levels[0].dpm2.BelowSafeInc = 0;
4218 table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
4219
4220 reg = MIN_POWER_MASK | MAX_POWER_MASK;
4221 table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
4222
4223 reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
4224 table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
4225
4226 return 0;
4227}
4228
4229static int si_populate_smc_acpi_state(struct radeon_device *rdev,
4230 SISLANDS_SMC_STATETABLE *table)
4231{
4232 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4233 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4234 struct si_power_info *si_pi = si_get_pi(rdev);
4235 u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
4236 u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
4237 u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
4238 u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
4239 u32 dll_cntl = si_pi->clock_registers.dll_cntl;
4240 u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
4241 u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
4242 u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
4243 u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
4244 u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
4245 u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
4246 u32 reg;
4247 int ret;
4248
4249 table->ACPIState = table->initialState;
4250
4251 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
4252
4253 if (pi->acpi_vddc) {
4254 ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
4255 pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
4256 if (!ret) {
4257 u16 std_vddc;
4258
4259 ret = si_get_std_voltage_value(rdev,
4260 &table->ACPIState.levels[0].vddc, &std_vddc);
4261 if (!ret)
4262 si_populate_std_voltage_value(rdev, std_vddc,
4263 table->ACPIState.levels[0].vddc.index,
4264 &table->ACPIState.levels[0].std_vddc);
4265 }
4266 table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
4267
4268 if (si_pi->vddc_phase_shed_control) {
4269 si_populate_phase_shedding_value(rdev,
4270 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
4271 pi->acpi_vddc,
4272 0,
4273 0,
4274 &table->ACPIState.levels[0].vddc);
4275 }
4276 } else {
4277 ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
4278 pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
4279 if (!ret) {
4280 u16 std_vddc;
4281
4282 ret = si_get_std_voltage_value(rdev,
4283 &table->ACPIState.levels[0].vddc, &std_vddc);
4284
4285 if (!ret)
4286 si_populate_std_voltage_value(rdev, std_vddc,
4287 table->ACPIState.levels[0].vddc.index,
4288 &table->ACPIState.levels[0].std_vddc);
4289 }
4290 table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(rdev,
4291 si_pi->sys_pcie_mask,
4292 si_pi->boot_pcie_gen,
4293 RADEON_PCIE_GEN1);
4294
4295 if (si_pi->vddc_phase_shed_control)
4296 si_populate_phase_shedding_value(rdev,
4297 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
4298 pi->min_vddc_in_table,
4299 0,
4300 0,
4301 &table->ACPIState.levels[0].vddc);
4302 }
4303
4304 if (pi->acpi_vddc) {
4305 if (eg_pi->acpi_vddci)
4306 si_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
4307 eg_pi->acpi_vddci,
4308 &table->ACPIState.levels[0].vddci);
4309 }
4310
4311 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
4312 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
4313
4314 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
4315
4316 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
4317 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
4318
4319 table->ACPIState.levels[0].mclk.vDLL_CNTL =
4320 cpu_to_be32(dll_cntl);
4321 table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
4322 cpu_to_be32(mclk_pwrmgt_cntl);
4323 table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
4324 cpu_to_be32(mpll_ad_func_cntl);
4325 table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
4326 cpu_to_be32(mpll_dq_func_cntl);
4327 table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
4328 cpu_to_be32(mpll_func_cntl);
4329 table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
4330 cpu_to_be32(mpll_func_cntl_1);
4331 table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
4332 cpu_to_be32(mpll_func_cntl_2);
4333 table->ACPIState.levels[0].mclk.vMPLL_SS =
4334 cpu_to_be32(si_pi->clock_registers.mpll_ss1);
4335 table->ACPIState.levels[0].mclk.vMPLL_SS2 =
4336 cpu_to_be32(si_pi->clock_registers.mpll_ss2);
4337
4338 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
4339 cpu_to_be32(spll_func_cntl);
4340 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
4341 cpu_to_be32(spll_func_cntl_2);
4342 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
4343 cpu_to_be32(spll_func_cntl_3);
4344 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
4345 cpu_to_be32(spll_func_cntl_4);
4346
4347 table->ACPIState.levels[0].mclk.mclk_value = 0;
4348 table->ACPIState.levels[0].sclk.sclk_value = 0;
4349
4350 si_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
4351
4352 if (eg_pi->dynamic_ac_timing)
4353 table->ACPIState.levels[0].ACIndex = 0;
4354
4355 table->ACPIState.levels[0].dpm2.MaxPS = 0;
4356 table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
4357 table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
4358 table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
4359 table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
4360
4361 reg = MIN_POWER_MASK | MAX_POWER_MASK;
4362 table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
4363
4364 reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
4365 table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
4366
4367 return 0;
4368}
4369
4370static int si_populate_ulv_state(struct radeon_device *rdev,
4371 SISLANDS_SMC_SWSTATE *state)
4372{
4373 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4374 struct si_power_info *si_pi = si_get_pi(rdev);
4375 struct si_ulv_param *ulv = &si_pi->ulv;
4376 u32 sclk_in_sr = 1350; /* ??? */
4377 int ret;
4378
4379 ret = si_convert_power_level_to_smc(rdev, &ulv->pl,
4380 &state->levels[0]);
4381 if (!ret) {
4382 if (eg_pi->sclk_deep_sleep) {
4383 if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
4384 state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
4385 else
4386 state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
4387 }
4388 if (ulv->one_pcie_lane_in_ulv)
4389 state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
4390 state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
4391 state->levels[0].ACIndex = 1;
4392 state->levels[0].std_vddc = state->levels[0].vddc;
4393 state->levelCount = 1;
4394
4395 state->flags |= PPSMC_SWSTATE_FLAG_DC;
4396 }
4397
4398 return ret;
4399}
4400
4401static int si_program_ulv_memory_timing_parameters(struct radeon_device *rdev)
4402{
4403 struct si_power_info *si_pi = si_get_pi(rdev);
4404 struct si_ulv_param *ulv = &si_pi->ulv;
4405 SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
4406 int ret;
4407
4408 ret = si_populate_memory_timing_parameters(rdev, &ulv->pl,
4409 &arb_regs);
4410 if (ret)
4411 return ret;
4412
4413 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
4414 ulv->volt_change_delay);
4415
4416 ret = si_copy_bytes_to_smc(rdev,
4417 si_pi->arb_table_start +
4418 offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
4419 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
4420 (u8 *)&arb_regs,
4421 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
4422 si_pi->sram_end);
4423
4424 return ret;
4425}
4426
4427static void si_get_mvdd_configuration(struct radeon_device *rdev)
4428{
4429 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4430
4431 pi->mvdd_split_frequency = 30000;
4432}
4433
4434static int si_init_smc_table(struct radeon_device *rdev)
4435{
4436 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4437 struct si_power_info *si_pi = si_get_pi(rdev);
4438 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
4439 const struct si_ulv_param *ulv = &si_pi->ulv;
4440 SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable;
4441 int ret;
4442 u32 lane_width;
4443 u32 vr_hot_gpio;
4444
4445 si_populate_smc_voltage_tables(rdev, table);
4446
4447 switch (rdev->pm.int_thermal_type) {
4448 case THERMAL_TYPE_SI:
4449 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
4450 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
4451 break;
4452 case THERMAL_TYPE_NONE:
4453 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
4454 break;
4455 default:
4456 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
4457 break;
4458 }
4459
4460 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
4461 table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
4462
4463 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
4464 if ((rdev->pdev->device != 0x6818) && (rdev->pdev->device != 0x6819))
4465 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
4466 }
4467
4468 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
4469 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
4470
4471 if (pi->mem_gddr5)
4472 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
4473
4474 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
4475 table->systemFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
4476
4477 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
4478 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
4479 vr_hot_gpio = rdev->pm.dpm.backbias_response_time;
4480 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
4481 vr_hot_gpio);
4482 }
4483
4484 ret = si_populate_smc_initial_state(rdev, radeon_boot_state, table);
4485 if (ret)
4486 return ret;
4487
4488 ret = si_populate_smc_acpi_state(rdev, table);
4489 if (ret)
4490 return ret;
4491
4492 table->driverState = table->initialState;
4493
4494 ret = si_do_program_memory_timing_parameters(rdev, radeon_boot_state,
4495 SISLANDS_INITIAL_STATE_ARB_INDEX);
4496 if (ret)
4497 return ret;
4498
4499 if (ulv->supported && ulv->pl.vddc) {
4500 ret = si_populate_ulv_state(rdev, &table->ULVState);
4501 if (ret)
4502 return ret;
4503
4504 ret = si_program_ulv_memory_timing_parameters(rdev);
4505 if (ret)
4506 return ret;
4507
4508 WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
4509 WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
4510
4511 lane_width = radeon_get_pcie_lanes(rdev);
4512 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
4513 } else {
4514 table->ULVState = table->initialState;
4515 }
4516
4517 return si_copy_bytes_to_smc(rdev, si_pi->state_table_start,
4518 (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
4519 si_pi->sram_end);
4520}
4521
4522static int si_calculate_sclk_params(struct radeon_device *rdev,
4523 u32 engine_clock,
4524 SISLANDS_SMC_SCLK_VALUE *sclk)
4525{
4526 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4527 struct si_power_info *si_pi = si_get_pi(rdev);
4528 struct atom_clock_dividers dividers;
4529 u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
4530 u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
4531 u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
4532 u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
4533 u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
4534 u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
4535 u64 tmp;
4536 u32 reference_clock = rdev->clock.spll.reference_freq;
4537 u32 reference_divider;
4538 u32 fbdiv;
4539 int ret;
4540
4541 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
4542 engine_clock, false, &dividers);
4543 if (ret)
4544 return ret;
4545
4546 reference_divider = 1 + dividers.ref_div;
4547
4548 tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
4549 do_div(tmp, reference_clock);
4550 fbdiv = (u32) tmp;
4551
4552 spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
4553 spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
4554 spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
4555
4556 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
4557 spll_func_cntl_2 |= SCLK_MUX_SEL(2);
4558
4559 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
4560 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
4561 spll_func_cntl_3 |= SPLL_DITHEN;
4562
4563 if (pi->sclk_ss) {
4564 struct radeon_atom_ss ss;
4565 u32 vco_freq = engine_clock * dividers.post_div;
4566
4567 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
4568 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
4569 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
4570 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
4571
4572 cg_spll_spread_spectrum &= ~CLK_S_MASK;
4573 cg_spll_spread_spectrum |= CLK_S(clk_s);
4574 cg_spll_spread_spectrum |= SSEN;
4575
4576 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
4577 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
4578 }
4579 }
4580
4581 sclk->sclk_value = engine_clock;
4582 sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
4583 sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
4584 sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
4585 sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
4586 sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
4587 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
4588
4589 return 0;
4590}
4591
4592static int si_populate_sclk_value(struct radeon_device *rdev,
4593 u32 engine_clock,
4594 SISLANDS_SMC_SCLK_VALUE *sclk)
4595{
4596 SISLANDS_SMC_SCLK_VALUE sclk_tmp;
4597 int ret;
4598
4599 ret = si_calculate_sclk_params(rdev, engine_clock, &sclk_tmp);
4600 if (!ret) {
4601 sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
4602 sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
4603 sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
4604 sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
4605 sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
4606 sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
4607 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
4608 }
4609
4610 return ret;
4611}
4612
4613static int si_populate_mclk_value(struct radeon_device *rdev,
4614 u32 engine_clock,
4615 u32 memory_clock,
4616 SISLANDS_SMC_MCLK_VALUE *mclk,
4617 bool strobe_mode,
4618 bool dll_state_on)
4619{
4620 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4621 struct si_power_info *si_pi = si_get_pi(rdev);
4622 u32 dll_cntl = si_pi->clock_registers.dll_cntl;
4623 u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
4624 u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
4625 u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
4626 u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
4627 u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
4628 u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
4629 u32 mpll_ss1 = si_pi->clock_registers.mpll_ss1;
4630 u32 mpll_ss2 = si_pi->clock_registers.mpll_ss2;
4631 struct atom_mpll_param mpll_param;
4632 int ret;
4633
4634 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
4635 if (ret)
4636 return ret;
4637
4638 mpll_func_cntl &= ~BWCTRL_MASK;
4639 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
4640
4641 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
4642 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
4643 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
4644
4645 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
4646 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
4647
4648 if (pi->mem_gddr5) {
4649 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
4650 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
4651 YCLK_POST_DIV(mpll_param.post_div);
4652 }
4653
4654 if (pi->mclk_ss) {
4655 struct radeon_atom_ss ss;
4656 u32 freq_nom;
4657 u32 tmp;
4658 u32 reference_clock = rdev->clock.mpll.reference_freq;
4659
4660 if (pi->mem_gddr5)
4661 freq_nom = memory_clock * 4;
4662 else
4663 freq_nom = memory_clock * 2;
4664
4665 tmp = freq_nom / reference_clock;
4666 tmp = tmp * tmp;
4667 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
4668 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
4669 u32 clks = reference_clock * 5 / ss.rate;
4670 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
4671
4672 mpll_ss1 &= ~CLKV_MASK;
4673 mpll_ss1 |= CLKV(clkv);
4674
4675 mpll_ss2 &= ~CLKS_MASK;
4676 mpll_ss2 |= CLKS(clks);
4677 }
4678 }
4679
4680 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
4681 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
4682
4683 if (dll_state_on)
4684 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
4685 else
4686 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
4687
4688 mclk->mclk_value = cpu_to_be32(memory_clock);
4689 mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
4690 mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
4691 mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
4692 mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
4693 mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
4694 mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
4695 mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
4696 mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
4697 mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
4698
4699 return 0;
4700}
4701
4702static void si_populate_smc_sp(struct radeon_device *rdev,
4703 struct radeon_ps *radeon_state,
4704 SISLANDS_SMC_SWSTATE *smc_state)
4705{
4706 struct ni_ps *ps = ni_get_ps(radeon_state);
4707 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4708 int i;
4709
4710 for (i = 0; i < ps->performance_level_count - 1; i++)
4711 smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
4712
4713 smc_state->levels[ps->performance_level_count - 1].bSP =
4714 cpu_to_be32(pi->psp);
4715}
4716
4717static int si_convert_power_level_to_smc(struct radeon_device *rdev,
4718 struct rv7xx_pl *pl,
4719 SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
4720{
4721 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4722 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4723 struct si_power_info *si_pi = si_get_pi(rdev);
4724 int ret;
4725 bool dll_state_on;
4726 u16 std_vddc;
4727 bool gmc_pg = false;
4728
4729 if (eg_pi->pcie_performance_request &&
4730 (si_pi->force_pcie_gen != RADEON_PCIE_GEN_INVALID))
4731 level->gen2PCIE = (u8)si_pi->force_pcie_gen;
4732 else
4733 level->gen2PCIE = (u8)pl->pcie_gen;
4734
4735 ret = si_populate_sclk_value(rdev, pl->sclk, &level->sclk);
4736 if (ret)
4737 return ret;
4738
4739 level->mcFlags = 0;
4740
4741 if (pi->mclk_stutter_mode_threshold &&
4742 (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
4743 !eg_pi->uvd_enabled &&
4744 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
4745 (rdev->pm.dpm.new_active_crtc_count <= 2)) {
4746 level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
4747
4748 if (gmc_pg)
4749 level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
4750 }
4751
4752 if (pi->mem_gddr5) {
4753 if (pl->mclk > pi->mclk_edc_enable_threshold)
4754 level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
4755
4756 if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
4757 level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
4758
4759 level->strobeMode = si_get_strobe_mode_settings(rdev, pl->mclk);
4760
4761 if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
4762 if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
4763 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
4764 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
4765 else
4766 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
4767 } else {
4768 dll_state_on = false;
4769 }
4770 } else {
4771 level->strobeMode = si_get_strobe_mode_settings(rdev,
4772 pl->mclk);
4773
4774 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
4775 }
4776
4777 ret = si_populate_mclk_value(rdev,
4778 pl->sclk,
4779 pl->mclk,
4780 &level->mclk,
4781 (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
4782 if (ret)
4783 return ret;
4784
4785 ret = si_populate_voltage_value(rdev,
4786 &eg_pi->vddc_voltage_table,
4787 pl->vddc, &level->vddc);
4788 if (ret)
4789 return ret;
4790
4791
4792 ret = si_get_std_voltage_value(rdev, &level->vddc, &std_vddc);
4793 if (ret)
4794 return ret;
4795
4796 ret = si_populate_std_voltage_value(rdev, std_vddc,
4797 level->vddc.index, &level->std_vddc);
4798 if (ret)
4799 return ret;
4800
4801 if (eg_pi->vddci_control) {
4802 ret = si_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
4803 pl->vddci, &level->vddci);
4804 if (ret)
4805 return ret;
4806 }
4807
4808 if (si_pi->vddc_phase_shed_control) {
4809 ret = si_populate_phase_shedding_value(rdev,
4810 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
4811 pl->vddc,
4812 pl->sclk,
4813 pl->mclk,
4814 &level->vddc);
4815 if (ret)
4816 return ret;
4817 }
4818
4819 level->MaxPoweredUpCU = si_pi->max_cu;
4820
4821 ret = si_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
4822
4823 return ret;
4824}
4825
4826static int si_populate_smc_t(struct radeon_device *rdev,
4827 struct radeon_ps *radeon_state,
4828 SISLANDS_SMC_SWSTATE *smc_state)
4829{
4830 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
4831 struct ni_ps *state = ni_get_ps(radeon_state);
4832 u32 a_t;
4833 u32 t_l, t_h;
4834 u32 high_bsp;
4835 int i, ret;
4836
4837 if (state->performance_level_count >= 9)
4838 return -EINVAL;
4839
4840 if (state->performance_level_count < 2) {
4841 a_t = CG_R(0xffff) | CG_L(0);
4842 smc_state->levels[0].aT = cpu_to_be32(a_t);
4843 return 0;
4844 }
4845
4846 smc_state->levels[0].aT = cpu_to_be32(0);
4847
4848 for (i = 0; i <= state->performance_level_count - 2; i++) {
4849 ret = r600_calculate_at(
4850 (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
4851 100 * R600_AH_DFLT,
4852 state->performance_levels[i + 1].sclk,
4853 state->performance_levels[i].sclk,
4854 &t_l,
4855 &t_h);
4856
4857 if (ret) {
4858 t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
4859 t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
4860 }
4861
4862 a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
4863 a_t |= CG_R(t_l * pi->bsp / 20000);
4864 smc_state->levels[i].aT = cpu_to_be32(a_t);
4865
4866 high_bsp = (i == state->performance_level_count - 2) ?
4867 pi->pbsp : pi->bsp;
4868 a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
4869 smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
4870 }
4871
4872 return 0;
4873}
4874
4875static int si_disable_ulv(struct radeon_device *rdev)
4876{
4877 struct si_power_info *si_pi = si_get_pi(rdev);
4878 struct si_ulv_param *ulv = &si_pi->ulv;
4879
4880 if (ulv->supported)
4881 return (si_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
4882 0 : -EINVAL;
4883
4884 return 0;
4885}
4886
4887static bool si_is_state_ulv_compatible(struct radeon_device *rdev,
4888 struct radeon_ps *radeon_state)
4889{
4890 const struct si_power_info *si_pi = si_get_pi(rdev);
4891 const struct si_ulv_param *ulv = &si_pi->ulv;
4892 const struct ni_ps *state = ni_get_ps(radeon_state);
4893 int i;
4894
4895 if (state->performance_levels[0].mclk != ulv->pl.mclk)
4896 return false;
4897
4898 /* XXX validate against display requirements! */
4899
4900 for (i = 0; i < rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
4901 if (rdev->clock.current_dispclk <=
4902 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
4903 if (ulv->pl.vddc <
4904 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
4905 return false;
4906 }
4907 }
4908
4909 if ((radeon_state->vclk != 0) || (radeon_state->dclk != 0))
4910 return false;
4911
4912 return true;
4913}
4914
4915static int si_set_power_state_conditionally_enable_ulv(struct radeon_device *rdev,
4916 struct radeon_ps *radeon_new_state)
4917{
4918 const struct si_power_info *si_pi = si_get_pi(rdev);
4919 const struct si_ulv_param *ulv = &si_pi->ulv;
4920
4921 if (ulv->supported) {
4922 if (si_is_state_ulv_compatible(rdev, radeon_new_state))
4923 return (si_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
4924 0 : -EINVAL;
4925 }
4926 return 0;
4927}
4928
4929static int si_convert_power_state_to_smc(struct radeon_device *rdev,
4930 struct radeon_ps *radeon_state,
4931 SISLANDS_SMC_SWSTATE *smc_state)
4932{
4933 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4934 struct ni_power_info *ni_pi = ni_get_pi(rdev);
4935 struct si_power_info *si_pi = si_get_pi(rdev);
4936 struct ni_ps *state = ni_get_ps(radeon_state);
4937 int i, ret;
4938 u32 threshold;
4939 u32 sclk_in_sr = 1350; /* ??? */
4940
4941 if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
4942 return -EINVAL;
4943
4944 threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
4945
4946 if (radeon_state->vclk && radeon_state->dclk) {
4947 eg_pi->uvd_enabled = true;
4948 if (eg_pi->smu_uvd_hs)
4949 smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
4950 } else {
4951 eg_pi->uvd_enabled = false;
4952 }
4953
4954 if (state->dc_compatible)
4955 smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
4956
4957 smc_state->levelCount = 0;
4958 for (i = 0; i < state->performance_level_count; i++) {
4959 if (eg_pi->sclk_deep_sleep) {
4960 if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
4961 if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
4962 smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
4963 else
4964 smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
4965 }
4966 }
4967
4968 ret = si_convert_power_level_to_smc(rdev, &state->performance_levels[i],
4969 &smc_state->levels[i]);
4970 smc_state->levels[i].arbRefreshState =
4971 (u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
4972
4973 if (ret)
4974 return ret;
4975
4976 if (ni_pi->enable_power_containment)
4977 smc_state->levels[i].displayWatermark =
4978 (state->performance_levels[i].sclk < threshold) ?
4979 PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
4980 else
4981 smc_state->levels[i].displayWatermark = (i < 2) ?
4982 PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
4983
4984 if (eg_pi->dynamic_ac_timing)
4985 smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
4986 else
4987 smc_state->levels[i].ACIndex = 0;
4988
4989 smc_state->levelCount++;
4990 }
4991
4992 si_write_smc_soft_register(rdev,
4993 SI_SMC_SOFT_REGISTER_watermark_threshold,
4994 threshold / 512);
4995
4996 si_populate_smc_sp(rdev, radeon_state, smc_state);
4997
4998 ret = si_populate_power_containment_values(rdev, radeon_state, smc_state);
4999 if (ret)
5000 ni_pi->enable_power_containment = false;
5001
5002 ret = si_populate_sq_ramping_values(rdev, radeon_state, smc_state);
5003 if (ret)
5004 ni_pi->enable_sq_ramping = false;
5005
5006 return si_populate_smc_t(rdev, radeon_state, smc_state);
5007}
5008
5009static int si_upload_sw_state(struct radeon_device *rdev,
5010 struct radeon_ps *radeon_new_state)
5011{
5012 struct si_power_info *si_pi = si_get_pi(rdev);
5013 struct ni_ps *new_state = ni_get_ps(radeon_new_state);
5014 int ret;
5015 u32 address = si_pi->state_table_start +
5016 offsetof(SISLANDS_SMC_STATETABLE, driverState);
5017 u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
5018 ((new_state->performance_level_count - 1) *
5019 sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
5020 SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
5021
5022 memset(smc_state, 0, state_size);
5023
5024 ret = si_convert_power_state_to_smc(rdev, radeon_new_state, smc_state);
5025 if (ret)
5026 return ret;
5027
5028 ret = si_copy_bytes_to_smc(rdev, address, (u8 *)smc_state,
5029 state_size, si_pi->sram_end);
5030
5031 return ret;
5032}
5033
5034static int si_upload_ulv_state(struct radeon_device *rdev)
5035{
5036 struct si_power_info *si_pi = si_get_pi(rdev);
5037 struct si_ulv_param *ulv = &si_pi->ulv;
5038 int ret = 0;
5039
5040 if (ulv->supported && ulv->pl.vddc) {
5041 u32 address = si_pi->state_table_start +
5042 offsetof(SISLANDS_SMC_STATETABLE, ULVState);
5043 SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
5044 u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
5045
5046 memset(smc_state, 0, state_size);
5047
5048 ret = si_populate_ulv_state(rdev, smc_state);
5049 if (!ret)
5050 ret = si_copy_bytes_to_smc(rdev, address, (u8 *)smc_state,
5051 state_size, si_pi->sram_end);
5052 }
5053
5054 return ret;
5055}
5056
5057static int si_upload_smc_data(struct radeon_device *rdev)
5058{
5059 struct radeon_crtc *radeon_crtc = NULL;
5060 int i;
5061
5062 if (rdev->pm.dpm.new_active_crtc_count == 0)
5063 return 0;
5064
5065 for (i = 0; i < rdev->num_crtc; i++) {
5066 if (rdev->pm.dpm.new_active_crtcs & (1 << i)) {
5067 radeon_crtc = rdev->mode_info.crtcs[i];
5068 break;
5069 }
5070 }
5071
5072 if (radeon_crtc == NULL)
5073 return 0;
5074
5075 if (radeon_crtc->line_time <= 0)
5076 return 0;
5077
5078 if (si_write_smc_soft_register(rdev,
5079 SI_SMC_SOFT_REGISTER_crtc_index,
5080 radeon_crtc->crtc_id) != PPSMC_Result_OK)
5081 return 0;
5082
5083 if (si_write_smc_soft_register(rdev,
5084 SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
5085 radeon_crtc->wm_high / radeon_crtc->line_time) != PPSMC_Result_OK)
5086 return 0;
5087
5088 if (si_write_smc_soft_register(rdev,
5089 SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
5090 radeon_crtc->wm_low / radeon_crtc->line_time) != PPSMC_Result_OK)
5091 return 0;
5092
5093 return 0;
5094}
5095
5096static int si_set_mc_special_registers(struct radeon_device *rdev,
5097 struct si_mc_reg_table *table)
5098{
5099 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
5100 u8 i, j, k;
5101 u32 temp_reg;
5102
5103 for (i = 0, j = table->last; i < table->last; i++) {
5104 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5105 return -EINVAL;
5106 switch (table->mc_reg_address[i].s1 << 2) {
5107 case MC_SEQ_MISC1:
5108 temp_reg = RREG32(MC_PMG_CMD_EMRS);
5109 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
5110 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
5111 for (k = 0; k < table->num_entries; k++)
5112 table->mc_reg_table_entry[k].mc_data[j] =
5113 ((temp_reg & 0xffff0000)) |
5114 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
5115 j++;
5116 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5117 return -EINVAL;
5118
5119 temp_reg = RREG32(MC_PMG_CMD_MRS);
5120 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
5121 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
5122 for (k = 0; k < table->num_entries; k++) {
5123 table->mc_reg_table_entry[k].mc_data[j] =
5124 (temp_reg & 0xffff0000) |
5125 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
5126 if (!pi->mem_gddr5)
5127 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
5128 }
5129 j++;
5130 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5131 return -EINVAL;
5132
5133 if (!pi->mem_gddr5) {
5134 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
5135 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
5136 for (k = 0; k < table->num_entries; k++)
5137 table->mc_reg_table_entry[k].mc_data[j] =
5138 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
5139 j++;
5140 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5141 return -EINVAL;
5142 }
5143 break;
5144 case MC_SEQ_RESERVE_M:
5145 temp_reg = RREG32(MC_PMG_CMD_MRS1);
5146 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
5147 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
5148 for(k = 0; k < table->num_entries; k++)
5149 table->mc_reg_table_entry[k].mc_data[j] =
5150 (temp_reg & 0xffff0000) |
5151 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
5152 j++;
5153 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5154 return -EINVAL;
5155 break;
5156 default:
5157 break;
5158 }
5159 }
5160
5161 table->last = j;
5162
5163 return 0;
5164}
5165
5166static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
5167{
5168 bool result = true;
5169
5170 switch (in_reg) {
5171 case MC_SEQ_RAS_TIMING >> 2:
5172 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
5173 break;
5174 case MC_SEQ_CAS_TIMING >> 2:
5175 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
5176 break;
5177 case MC_SEQ_MISC_TIMING >> 2:
5178 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
5179 break;
5180 case MC_SEQ_MISC_TIMING2 >> 2:
5181 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
5182 break;
5183 case MC_SEQ_RD_CTL_D0 >> 2:
5184 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
5185 break;
5186 case MC_SEQ_RD_CTL_D1 >> 2:
5187 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
5188 break;
5189 case MC_SEQ_WR_CTL_D0 >> 2:
5190 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
5191 break;
5192 case MC_SEQ_WR_CTL_D1 >> 2:
5193 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
5194 break;
5195 case MC_PMG_CMD_EMRS >> 2:
5196 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
5197 break;
5198 case MC_PMG_CMD_MRS >> 2:
5199 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
5200 break;
5201 case MC_PMG_CMD_MRS1 >> 2:
5202 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
5203 break;
5204 case MC_SEQ_PMG_TIMING >> 2:
5205 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
5206 break;
5207 case MC_PMG_CMD_MRS2 >> 2:
5208 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
5209 break;
5210 case MC_SEQ_WR_CTL_2 >> 2:
5211 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
5212 break;
5213 default:
5214 result = false;
5215 break;
5216 }
5217
5218 return result;
5219}
5220
5221static void si_set_valid_flag(struct si_mc_reg_table *table)
5222{
5223 u8 i, j;
5224
5225 for (i = 0; i < table->last; i++) {
5226 for (j = 1; j < table->num_entries; j++) {
5227 if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
5228 table->valid_flag |= 1 << i;
5229 break;
5230 }
5231 }
5232 }
5233}
5234
5235static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
5236{
5237 u32 i;
5238 u16 address;
5239
5240 for (i = 0; i < table->last; i++)
5241 table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
5242 address : table->mc_reg_address[i].s1;
5243
5244}
5245
5246static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
5247 struct si_mc_reg_table *si_table)
5248{
5249 u8 i, j;
5250
5251 if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5252 return -EINVAL;
5253 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
5254 return -EINVAL;
5255
5256 for (i = 0; i < table->last; i++)
5257 si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
5258 si_table->last = table->last;
5259
5260 for (i = 0; i < table->num_entries; i++) {
5261 si_table->mc_reg_table_entry[i].mclk_max =
5262 table->mc_reg_table_entry[i].mclk_max;
5263 for (j = 0; j < table->last; j++) {
5264 si_table->mc_reg_table_entry[i].mc_data[j] =
5265 table->mc_reg_table_entry[i].mc_data[j];
5266 }
5267 }
5268 si_table->num_entries = table->num_entries;
5269
5270 return 0;
5271}
5272
5273static int si_initialize_mc_reg_table(struct radeon_device *rdev)
5274{
5275 struct si_power_info *si_pi = si_get_pi(rdev);
5276 struct atom_mc_reg_table *table;
5277 struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
5278 u8 module_index = rv770_get_memory_module_index(rdev);
5279 int ret;
5280
5281 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
5282 if (!table)
5283 return -ENOMEM;
5284
5285 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
5286 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
5287 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
5288 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
5289 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
5290 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
5291 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
5292 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
5293 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
5294 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
5295 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
5296 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
5297 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
5298 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
5299
5300 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
5301 if (ret)
5302 goto init_mc_done;
5303
5304 ret = si_copy_vbios_mc_reg_table(table, si_table);
5305 if (ret)
5306 goto init_mc_done;
5307
5308 si_set_s0_mc_reg_index(si_table);
5309
5310 ret = si_set_mc_special_registers(rdev, si_table);
5311 if (ret)
5312 goto init_mc_done;
5313
5314 si_set_valid_flag(si_table);
5315
5316init_mc_done:
5317 kfree(table);
5318
5319 return ret;
5320
5321}
5322
5323static void si_populate_mc_reg_addresses(struct radeon_device *rdev,
5324 SMC_SIslands_MCRegisters *mc_reg_table)
5325{
5326 struct si_power_info *si_pi = si_get_pi(rdev);
5327 u32 i, j;
5328
5329 for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
5330 if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
5331 if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
5332 break;
5333 mc_reg_table->address[i].s0 =
5334 cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
5335 mc_reg_table->address[i].s1 =
5336 cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
5337 i++;
5338 }
5339 }
5340 mc_reg_table->last = (u8)i;
5341}
5342
5343static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
5344 SMC_SIslands_MCRegisterSet *data,
5345 u32 num_entries, u32 valid_flag)
5346{
5347 u32 i, j;
5348
5349 for(i = 0, j = 0; j < num_entries; j++) {
5350 if (valid_flag & (1 << j)) {
5351 data->value[i] = cpu_to_be32(entry->mc_data[j]);
5352 i++;
5353 }
5354 }
5355}
5356
5357static void si_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
5358 struct rv7xx_pl *pl,
5359 SMC_SIslands_MCRegisterSet *mc_reg_table_data)
5360{
5361 struct si_power_info *si_pi = si_get_pi(rdev);
5362 u32 i = 0;
5363
5364 for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
5365 if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
5366 break;
5367 }
5368
5369 if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
5370 --i;
5371
5372 si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
5373 mc_reg_table_data, si_pi->mc_reg_table.last,
5374 si_pi->mc_reg_table.valid_flag);
5375}
5376
5377static void si_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
5378 struct radeon_ps *radeon_state,
5379 SMC_SIslands_MCRegisters *mc_reg_table)
5380{
5381 struct ni_ps *state = ni_get_ps(radeon_state);
5382 int i;
5383
5384 for (i = 0; i < state->performance_level_count; i++) {
5385 si_convert_mc_reg_table_entry_to_smc(rdev,
5386 &state->performance_levels[i],
5387 &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
5388 }
5389}
5390
5391static int si_populate_mc_reg_table(struct radeon_device *rdev,
5392 struct radeon_ps *radeon_boot_state)
5393{
5394 struct ni_ps *boot_state = ni_get_ps(radeon_boot_state);
5395 struct si_power_info *si_pi = si_get_pi(rdev);
5396 struct si_ulv_param *ulv = &si_pi->ulv;
5397 SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
5398
5399 memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
5400
5401 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_seq_index, 1);
5402
5403 si_populate_mc_reg_addresses(rdev, smc_mc_reg_table);
5404
5405 si_convert_mc_reg_table_entry_to_smc(rdev, &boot_state->performance_levels[0],
5406 &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
5407
5408 si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
5409 &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
5410 si_pi->mc_reg_table.last,
5411 si_pi->mc_reg_table.valid_flag);
5412
5413 if (ulv->supported && ulv->pl.vddc != 0)
5414 si_convert_mc_reg_table_entry_to_smc(rdev, &ulv->pl,
5415 &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
5416 else
5417 si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
5418 &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
5419 si_pi->mc_reg_table.last,
5420 si_pi->mc_reg_table.valid_flag);
5421
5422 si_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, smc_mc_reg_table);
5423
5424 return si_copy_bytes_to_smc(rdev, si_pi->mc_reg_table_start,
5425 (u8 *)smc_mc_reg_table,
5426 sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
5427}
5428
5429static int si_upload_mc_reg_table(struct radeon_device *rdev,
5430 struct radeon_ps *radeon_new_state)
5431{
5432 struct ni_ps *new_state = ni_get_ps(radeon_new_state);
5433 struct si_power_info *si_pi = si_get_pi(rdev);
5434 u32 address = si_pi->mc_reg_table_start +
5435 offsetof(SMC_SIslands_MCRegisters,
5436 data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
5437 SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
5438
5439 memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
5440
5441 si_convert_mc_reg_table_to_smc(rdev, radeon_new_state, smc_mc_reg_table);
5442
5443
5444 return si_copy_bytes_to_smc(rdev, address,
5445 (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
5446 sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
5447 si_pi->sram_end);
5448
5449}
5450
5451static void si_enable_voltage_control(struct radeon_device *rdev, bool enable)
5452{
5453 if (enable)
5454 WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
5455 else
5456 WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
5457}
5458
5459static enum radeon_pcie_gen si_get_maximum_link_speed(struct radeon_device *rdev,
5460 struct radeon_ps *radeon_state)
5461{
5462 struct ni_ps *state = ni_get_ps(radeon_state);
5463 int i;
5464 u16 pcie_speed, max_speed = 0;
5465
5466 for (i = 0; i < state->performance_level_count; i++) {
5467 pcie_speed = state->performance_levels[i].pcie_gen;
5468 if (max_speed < pcie_speed)
5469 max_speed = pcie_speed;
5470 }
5471 return max_speed;
5472}
5473
5474static u16 si_get_current_pcie_speed(struct radeon_device *rdev)
5475{
5476 u32 speed_cntl;
5477
5478 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
5479 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
5480
5481 return (u16)speed_cntl;
5482}
5483
5484static void si_request_link_speed_change_before_state_change(struct radeon_device *rdev,
5485 struct radeon_ps *radeon_new_state,
5486 struct radeon_ps *radeon_current_state)
5487{
5488 struct si_power_info *si_pi = si_get_pi(rdev);
5489 enum radeon_pcie_gen target_link_speed = si_get_maximum_link_speed(rdev, radeon_new_state);
5490 enum radeon_pcie_gen current_link_speed;
5491
5492 if (si_pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
5493 current_link_speed = si_get_maximum_link_speed(rdev, radeon_current_state);
5494 else
5495 current_link_speed = si_pi->force_pcie_gen;
5496
5497 si_pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5498 si_pi->pspp_notify_required = false;
5499 if (target_link_speed > current_link_speed) {
5500 switch (target_link_speed) {
5501#if defined(CONFIG_ACPI)
5502 case RADEON_PCIE_GEN3:
5503 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
5504 break;
5505 si_pi->force_pcie_gen = RADEON_PCIE_GEN2;
5506 if (current_link_speed == RADEON_PCIE_GEN2)
5507 break;
5508 case RADEON_PCIE_GEN2:
5509 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5510 break;
5511#endif
5512 default:
5513 si_pi->force_pcie_gen = si_get_current_pcie_speed(rdev);
5514 break;
5515 }
5516 } else {
5517 if (target_link_speed < current_link_speed)
5518 si_pi->pspp_notify_required = true;
5519 }
5520}
5521
5522static void si_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
5523 struct radeon_ps *radeon_new_state,
5524 struct radeon_ps *radeon_current_state)
5525{
5526 struct si_power_info *si_pi = si_get_pi(rdev);
5527 enum radeon_pcie_gen target_link_speed = si_get_maximum_link_speed(rdev, radeon_new_state);
5528 u8 request;
5529
5530 if (si_pi->pspp_notify_required) {
5531 if (target_link_speed == RADEON_PCIE_GEN3)
5532 request = PCIE_PERF_REQ_PECI_GEN3;
5533 else if (target_link_speed == RADEON_PCIE_GEN2)
5534 request = PCIE_PERF_REQ_PECI_GEN2;
5535 else
5536 request = PCIE_PERF_REQ_PECI_GEN1;
5537
5538 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5539 (si_get_current_pcie_speed(rdev) > 0))
5540 return;
5541
5542#if defined(CONFIG_ACPI)
5543 radeon_acpi_pcie_performance_request(rdev, request, false);
5544#endif
5545 }
5546}
5547
5548#if 0
5549static int si_ds_request(struct radeon_device *rdev,
5550 bool ds_status_on, u32 count_write)
5551{
5552 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
5553
5554 if (eg_pi->sclk_deep_sleep) {
5555 if (ds_status_on)
5556 return (si_send_msg_to_smc(rdev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
5557 PPSMC_Result_OK) ?
5558 0 : -EINVAL;
5559 else
5560 return (si_send_msg_to_smc(rdev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
5561 PPSMC_Result_OK) ? 0 : -EINVAL;
5562 }
5563 return 0;
5564}
5565#endif
5566
5567static void si_set_max_cu_value(struct radeon_device *rdev)
5568{
5569 struct si_power_info *si_pi = si_get_pi(rdev);
5570
5571 if (rdev->family == CHIP_VERDE) {
5572 switch (rdev->pdev->device) {
5573 case 0x6820:
5574 case 0x6825:
5575 case 0x6821:
5576 case 0x6823:
5577 case 0x6827:
5578 si_pi->max_cu = 10;
5579 break;
5580 case 0x682D:
5581 case 0x6824:
5582 case 0x682F:
5583 case 0x6826:
5584 si_pi->max_cu = 8;
5585 break;
5586 case 0x6828:
5587 case 0x6830:
5588 case 0x6831:
5589 case 0x6838:
5590 case 0x6839:
5591 case 0x683D:
5592 si_pi->max_cu = 10;
5593 break;
5594 case 0x683B:
5595 case 0x683F:
5596 case 0x6829:
5597 si_pi->max_cu = 8;
5598 break;
5599 default:
5600 si_pi->max_cu = 0;
5601 break;
5602 }
5603 } else {
5604 si_pi->max_cu = 0;
5605 }
5606}
5607
5608static int si_patch_single_dependency_table_based_on_leakage(struct radeon_device *rdev,
5609 struct radeon_clock_voltage_dependency_table *table)
5610{
5611 u32 i;
5612 int j;
5613 u16 leakage_voltage;
5614
5615 if (table) {
5616 for (i = 0; i < table->count; i++) {
5617 switch (si_get_leakage_voltage_from_leakage_index(rdev,
5618 table->entries[i].v,
5619 &leakage_voltage)) {
5620 case 0:
5621 table->entries[i].v = leakage_voltage;
5622 break;
5623 case -EAGAIN:
5624 return -EINVAL;
5625 case -EINVAL:
5626 default:
5627 break;
5628 }
5629 }
5630
5631 for (j = (table->count - 2); j >= 0; j--) {
5632 table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
5633 table->entries[j].v : table->entries[j + 1].v;
5634 }
5635 }
5636 return 0;
5637}
5638
5639static int si_patch_dependency_tables_based_on_leakage(struct radeon_device *rdev)
5640{
5641 int ret = 0;
5642
5643 ret = si_patch_single_dependency_table_based_on_leakage(rdev,
5644 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5645 ret = si_patch_single_dependency_table_based_on_leakage(rdev,
5646 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5647 ret = si_patch_single_dependency_table_based_on_leakage(rdev,
5648 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5649 return ret;
5650}
5651
5652static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
5653 struct radeon_ps *radeon_new_state,
5654 struct radeon_ps *radeon_current_state)
5655{
5656 u32 lane_width;
5657 u32 new_lane_width =
5658 (radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
5659 u32 current_lane_width =
5660 (radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
5661
5662 if (new_lane_width != current_lane_width) {
5663 radeon_set_pcie_lanes(rdev, new_lane_width);
5664 lane_width = radeon_get_pcie_lanes(rdev);
5665 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
5666 }
5667}
5668
5669void si_dpm_setup_asic(struct radeon_device *rdev)
5670{
5671 rv770_get_memory_type(rdev);
5672 si_read_clock_registers(rdev);
5673 si_enable_acpi_power_management(rdev);
5674}
5675
5676static int si_set_thermal_temperature_range(struct radeon_device *rdev,
5677 int min_temp, int max_temp)
5678{
5679 int low_temp = 0 * 1000;
5680 int high_temp = 255 * 1000;
5681
5682 if (low_temp < min_temp)
5683 low_temp = min_temp;
5684 if (high_temp > max_temp)
5685 high_temp = max_temp;
5686 if (high_temp < low_temp) {
5687 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
5688 return -EINVAL;
5689 }
5690
5691 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
5692 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
5693 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
5694
5695 rdev->pm.dpm.thermal.min_temp = low_temp;
5696 rdev->pm.dpm.thermal.max_temp = high_temp;
5697
5698 return 0;
5699}
5700
5701int si_dpm_enable(struct radeon_device *rdev)
5702{
5703 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
5704 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
5705 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5706 int ret;
5707
5708 if (si_is_smc_running(rdev))
5709 return -EINVAL;
5710 if (pi->voltage_control)
5711 si_enable_voltage_control(rdev, true);
5712 if (pi->mvdd_control)
5713 si_get_mvdd_configuration(rdev);
5714 if (pi->voltage_control) {
5715 ret = si_construct_voltage_tables(rdev);
5716 if (ret) {
5717 DRM_ERROR("si_construct_voltage_tables failed\n");
5718 return ret;
5719 }
5720 }
5721 if (eg_pi->dynamic_ac_timing) {
5722 ret = si_initialize_mc_reg_table(rdev);
5723 if (ret)
5724 eg_pi->dynamic_ac_timing = false;
5725 }
5726 if (pi->dynamic_ss)
5727 si_enable_spread_spectrum(rdev, true);
5728 if (pi->thermal_protection)
5729 si_enable_thermal_protection(rdev, true);
5730 si_setup_bsp(rdev);
5731 si_program_git(rdev);
5732 si_program_tp(rdev);
5733 si_program_tpp(rdev);
5734 si_program_sstp(rdev);
5735 si_enable_display_gap(rdev);
5736 si_program_vc(rdev);
5737 ret = si_upload_firmware(rdev);
5738 if (ret) {
5739 DRM_ERROR("si_upload_firmware failed\n");
5740 return ret;
5741 }
5742 ret = si_process_firmware_header(rdev);
5743 if (ret) {
5744 DRM_ERROR("si_process_firmware_header failed\n");
5745 return ret;
5746 }
5747 ret = si_initial_switch_from_arb_f0_to_f1(rdev);
5748 if (ret) {
5749 DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
5750 return ret;
5751 }
5752 ret = si_init_smc_table(rdev);
5753 if (ret) {
5754 DRM_ERROR("si_init_smc_table failed\n");
5755 return ret;
5756 }
5757 ret = si_init_smc_spll_table(rdev);
5758 if (ret) {
5759 DRM_ERROR("si_init_smc_spll_table failed\n");
5760 return ret;
5761 }
5762 ret = si_init_arb_table_index(rdev);
5763 if (ret) {
5764 DRM_ERROR("si_init_arb_table_index failed\n");
5765 return ret;
5766 }
5767 if (eg_pi->dynamic_ac_timing) {
5768 ret = si_populate_mc_reg_table(rdev, boot_ps);
5769 if (ret) {
5770 DRM_ERROR("si_populate_mc_reg_table failed\n");
5771 return ret;
5772 }
5773 }
5774 ret = si_initialize_smc_cac_tables(rdev);
5775 if (ret) {
5776 DRM_ERROR("si_initialize_smc_cac_tables failed\n");
5777 return ret;
5778 }
5779 ret = si_initialize_hardware_cac_manager(rdev);
5780 if (ret) {
5781 DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
5782 return ret;
5783 }
5784 ret = si_initialize_smc_dte_tables(rdev);
5785 if (ret) {
5786 DRM_ERROR("si_initialize_smc_dte_tables failed\n");
5787 return ret;
5788 }
5789 ret = si_populate_smc_tdp_limits(rdev, boot_ps);
5790 if (ret) {
5791 DRM_ERROR("si_populate_smc_tdp_limits failed\n");
5792 return ret;
5793 }
5794 ret = si_populate_smc_tdp_limits_2(rdev, boot_ps);
5795 if (ret) {
5796 DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
5797 return ret;
5798 }
5799 si_program_response_times(rdev);
5800 si_program_ds_registers(rdev);
5801 si_dpm_start_smc(rdev);
5802 ret = si_notify_smc_display_change(rdev, false);
5803 if (ret) {
5804 DRM_ERROR("si_notify_smc_display_change failed\n");
5805 return ret;
5806 }
5807 si_enable_sclk_control(rdev, true);
5808 si_start_dpm(rdev);
5809
5810 if (rdev->irq.installed &&
5811 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
5812 PPSMC_Result result;
5813
5814 ret = si_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
5815 if (ret)
5816 return ret;
5817 rdev->irq.dpm_thermal = true;
5818 radeon_irq_set(rdev);
5819 result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
5820
5821 if (result != PPSMC_Result_OK)
5822 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
5823 }
5824
5825 si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5826
5827 ni_update_current_ps(rdev, boot_ps);
5828
5829 return 0;
5830}
5831
5832void si_dpm_disable(struct radeon_device *rdev)
5833{
5834 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
5835 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5836
5837 if (!si_is_smc_running(rdev))
5838 return;
5839 si_disable_ulv(rdev);
5840 si_clear_vc(rdev);
5841 if (pi->thermal_protection)
5842 si_enable_thermal_protection(rdev, false);
5843 si_enable_power_containment(rdev, boot_ps, false);
5844 si_enable_smc_cac(rdev, boot_ps, false);
5845 si_enable_spread_spectrum(rdev, false);
5846 si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5847 si_stop_dpm(rdev);
5848 si_reset_to_default(rdev);
5849 si_dpm_stop_smc(rdev);
5850 si_force_switch_to_arb_f0(rdev);
5851
5852 ni_update_current_ps(rdev, boot_ps);
5853}
5854
5855int si_dpm_pre_set_power_state(struct radeon_device *rdev)
5856{
5857 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
5858 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
5859 struct radeon_ps *new_ps = &requested_ps;
5860
5861 ni_update_requested_ps(rdev, new_ps);
5862
5863 si_apply_state_adjust_rules(rdev, &eg_pi->requested_rps);
5864
5865 return 0;
5866}
5867
5868static int si_power_control_set_level(struct radeon_device *rdev)
5869{
5870 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
5871 int ret;
5872
5873 ret = si_restrict_performance_levels_before_switch(rdev);
5874 if (ret)
5875 return ret;
5876 ret = si_halt_smc(rdev);
5877 if (ret)
5878 return ret;
5879 ret = si_populate_smc_tdp_limits(rdev, new_ps);
5880 if (ret)
5881 return ret;
5882 ret = si_populate_smc_tdp_limits_2(rdev, new_ps);
5883 if (ret)
5884 return ret;
5885 ret = si_resume_smc(rdev);
5886 if (ret)
5887 return ret;
5888 ret = si_set_sw_state(rdev);
5889 if (ret)
5890 return ret;
5891 return 0;
5892}
5893
5894int si_dpm_set_power_state(struct radeon_device *rdev)
5895{
5896 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
5897 struct radeon_ps *new_ps = &eg_pi->requested_rps;
5898 struct radeon_ps *old_ps = &eg_pi->current_rps;
5899 int ret;
5900
5901 ret = si_disable_ulv(rdev);
5902 if (ret) {
5903 DRM_ERROR("si_disable_ulv failed\n");
5904 return ret;
5905 }
5906 ret = si_restrict_performance_levels_before_switch(rdev);
5907 if (ret) {
5908 DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
5909 return ret;
5910 }
5911 if (eg_pi->pcie_performance_request)
5912 si_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
5913 ni_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
5914 ret = si_enable_power_containment(rdev, new_ps, false);
5915 if (ret) {
5916 DRM_ERROR("si_enable_power_containment failed\n");
5917 return ret;
5918 }
5919 ret = si_enable_smc_cac(rdev, new_ps, false);
5920 if (ret) {
5921 DRM_ERROR("si_enable_smc_cac failed\n");
5922 return ret;
5923 }
5924 ret = si_halt_smc(rdev);
5925 if (ret) {
5926 DRM_ERROR("si_halt_smc failed\n");
5927 return ret;
5928 }
5929 ret = si_upload_sw_state(rdev, new_ps);
5930 if (ret) {
5931 DRM_ERROR("si_upload_sw_state failed\n");
5932 return ret;
5933 }
5934 ret = si_upload_smc_data(rdev);
5935 if (ret) {
5936 DRM_ERROR("si_upload_smc_data failed\n");
5937 return ret;
5938 }
5939 ret = si_upload_ulv_state(rdev);
5940 if (ret) {
5941 DRM_ERROR("si_upload_ulv_state failed\n");
5942 return ret;
5943 }
5944 if (eg_pi->dynamic_ac_timing) {
5945 ret = si_upload_mc_reg_table(rdev, new_ps);
5946 if (ret) {
5947 DRM_ERROR("si_upload_mc_reg_table failed\n");
5948 return ret;
5949 }
5950 }
5951 ret = si_program_memory_timing_parameters(rdev, new_ps);
5952 if (ret) {
5953 DRM_ERROR("si_program_memory_timing_parameters failed\n");
5954 return ret;
5955 }
5956 si_set_pcie_lane_width_in_smc(rdev, new_ps, old_ps);
5957
5958 ret = si_resume_smc(rdev);
5959 if (ret) {
5960 DRM_ERROR("si_resume_smc failed\n");
5961 return ret;
5962 }
5963 ret = si_set_sw_state(rdev);
5964 if (ret) {
5965 DRM_ERROR("si_set_sw_state failed\n");
5966 return ret;
5967 }
5968 ni_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
5969 if (eg_pi->pcie_performance_request)
5970 si_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
5971 ret = si_set_power_state_conditionally_enable_ulv(rdev, new_ps);
5972 if (ret) {
5973 DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
5974 return ret;
5975 }
5976 ret = si_enable_smc_cac(rdev, new_ps, true);
5977 if (ret) {
5978 DRM_ERROR("si_enable_smc_cac failed\n");
5979 return ret;
5980 }
5981 ret = si_enable_power_containment(rdev, new_ps, true);
5982 if (ret) {
5983 DRM_ERROR("si_enable_power_containment failed\n");
5984 return ret;
5985 }
5986
5987 ret = si_power_control_set_level(rdev);
5988 if (ret) {
5989 DRM_ERROR("si_power_control_set_level failed\n");
5990 return ret;
5991 }
5992
5993#if 0
5994 /* XXX */
5995 ret = si_unrestrict_performance_levels_after_switch(rdev);
5996 if (ret) {
5997 DRM_ERROR("si_unrestrict_performance_levels_after_switch failed\n");
5998 return ret;
5999 }
6000#endif
6001
6002 return 0;
6003}
6004
6005void si_dpm_post_set_power_state(struct radeon_device *rdev)
6006{
6007 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
6008 struct radeon_ps *new_ps = &eg_pi->requested_rps;
6009
6010 ni_update_current_ps(rdev, new_ps);
6011}
6012
6013
6014void si_dpm_reset_asic(struct radeon_device *rdev)
6015{
6016 si_restrict_performance_levels_before_switch(rdev);
6017 si_disable_ulv(rdev);
6018 si_set_boot_state(rdev);
6019}
6020
6021void si_dpm_display_configuration_changed(struct radeon_device *rdev)
6022{
6023 si_program_display_gap(rdev);
6024}
6025
6026union power_info {
6027 struct _ATOM_POWERPLAY_INFO info;
6028 struct _ATOM_POWERPLAY_INFO_V2 info_2;
6029 struct _ATOM_POWERPLAY_INFO_V3 info_3;
6030 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
6031 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
6032 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
6033};
6034
6035union pplib_clock_info {
6036 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
6037 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
6038 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
6039 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
6040 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
6041};
6042
6043union pplib_power_state {
6044 struct _ATOM_PPLIB_STATE v1;
6045 struct _ATOM_PPLIB_STATE_V2 v2;
6046};
6047
6048static void si_parse_pplib_non_clock_info(struct radeon_device *rdev,
6049 struct radeon_ps *rps,
6050 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
6051 u8 table_rev)
6052{
6053 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
6054 rps->class = le16_to_cpu(non_clock_info->usClassification);
6055 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
6056
6057 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
6058 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
6059 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
6060 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
6061 rps->vclk = RV770_DEFAULT_VCLK_FREQ;
6062 rps->dclk = RV770_DEFAULT_DCLK_FREQ;
6063 } else {
6064 rps->vclk = 0;
6065 rps->dclk = 0;
6066 }
6067
6068 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
6069 rdev->pm.dpm.boot_ps = rps;
6070 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
6071 rdev->pm.dpm.uvd_ps = rps;
6072}
6073
6074static void si_parse_pplib_clock_info(struct radeon_device *rdev,
6075 struct radeon_ps *rps, int index,
6076 union pplib_clock_info *clock_info)
6077{
6078 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
6079 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
6080 struct si_power_info *si_pi = si_get_pi(rdev);
6081 struct ni_ps *ps = ni_get_ps(rps);
6082 u16 leakage_voltage;
6083 struct rv7xx_pl *pl = &ps->performance_levels[index];
6084 int ret;
6085
6086 ps->performance_level_count = index + 1;
6087
6088 pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
6089 pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
6090 pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
6091 pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
6092
6093 pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
6094 pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
6095 pl->flags = le32_to_cpu(clock_info->si.ulFlags);
6096 pl->pcie_gen = r600_get_pcie_gen_support(rdev,
6097 si_pi->sys_pcie_mask,
6098 si_pi->boot_pcie_gen,
6099 clock_info->si.ucPCIEGen);
6100
6101 /* patch up vddc if necessary */
6102 ret = si_get_leakage_voltage_from_leakage_index(rdev, pl->vddc,
6103 &leakage_voltage);
6104 if (ret == 0)
6105 pl->vddc = leakage_voltage;
6106
6107 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
6108 pi->acpi_vddc = pl->vddc;
6109 eg_pi->acpi_vddci = pl->vddci;
6110 si_pi->acpi_pcie_gen = pl->pcie_gen;
6111 }
6112
6113 if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
6114 index == 0) {
6115 /* XXX disable for A0 tahiti */
6116 si_pi->ulv.supported = true;
6117 si_pi->ulv.pl = *pl;
6118 si_pi->ulv.one_pcie_lane_in_ulv = false;
6119 si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
6120 si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
6121 si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
6122 }
6123
6124 if (pi->min_vddc_in_table > pl->vddc)
6125 pi->min_vddc_in_table = pl->vddc;
6126
6127 if (pi->max_vddc_in_table < pl->vddc)
6128 pi->max_vddc_in_table = pl->vddc;
6129
6130 /* patch up boot state */
6131 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
6132 u16 vddc, vddci, mvdd;
6133 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
6134 pl->mclk = rdev->clock.default_mclk;
6135 pl->sclk = rdev->clock.default_sclk;
6136 pl->vddc = vddc;
6137 pl->vddci = vddci;
6138 si_pi->mvdd_bootup_value = mvdd;
6139 }
6140
6141 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
6142 ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
6143 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
6144 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
6145 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
6146 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
6147 }
6148}
6149
6150static int si_parse_power_table(struct radeon_device *rdev)
6151{
6152 struct radeon_mode_info *mode_info = &rdev->mode_info;
6153 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
6154 union pplib_power_state *power_state;
6155 int i, j, k, non_clock_array_index, clock_array_index;
6156 union pplib_clock_info *clock_info;
6157 struct _StateArray *state_array;
6158 struct _ClockInfoArray *clock_info_array;
6159 struct _NonClockInfoArray *non_clock_info_array;
6160 union power_info *power_info;
6161 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
6162 u16 data_offset;
6163 u8 frev, crev;
6164 u8 *power_state_offset;
6165 struct ni_ps *ps;
6166
6167 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
6168 &frev, &crev, &data_offset))
6169 return -EINVAL;
6170 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
6171
6172 state_array = (struct _StateArray *)
6173 (mode_info->atom_context->bios + data_offset +
6174 le16_to_cpu(power_info->pplib.usStateArrayOffset));
6175 clock_info_array = (struct _ClockInfoArray *)
6176 (mode_info->atom_context->bios + data_offset +
6177 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
6178 non_clock_info_array = (struct _NonClockInfoArray *)
6179 (mode_info->atom_context->bios + data_offset +
6180 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
6181
6182 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
6183 state_array->ucNumEntries, GFP_KERNEL);
6184 if (!rdev->pm.dpm.ps)
6185 return -ENOMEM;
6186 power_state_offset = (u8 *)state_array->states;
6187 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
6188 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
6189 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
6190 for (i = 0; i < state_array->ucNumEntries; i++) {
6191 power_state = (union pplib_power_state *)power_state_offset;
6192 non_clock_array_index = power_state->v2.nonClockInfoIndex;
6193 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
6194 &non_clock_info_array->nonClockInfo[non_clock_array_index];
6195 if (!rdev->pm.power_state[i].clock_info)
6196 return -EINVAL;
6197 ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL);
6198 if (ps == NULL) {
6199 kfree(rdev->pm.dpm.ps);
6200 return -ENOMEM;
6201 }
6202 rdev->pm.dpm.ps[i].ps_priv = ps;
6203 si_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
6204 non_clock_info,
6205 non_clock_info_array->ucEntrySize);
6206 k = 0;
6207 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
6208 clock_array_index = power_state->v2.clockInfoIndex[j];
6209 if (clock_array_index >= clock_info_array->ucNumEntries)
6210 continue;
6211 if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
6212 break;
6213 clock_info = (union pplib_clock_info *)
6214 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
6215 si_parse_pplib_clock_info(rdev,
6216 &rdev->pm.dpm.ps[i], k,
6217 clock_info);
6218 k++;
6219 }
6220 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
6221 }
6222 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
6223 return 0;
6224}
6225
6226int si_dpm_init(struct radeon_device *rdev)
6227{
6228 struct rv7xx_power_info *pi;
6229 struct evergreen_power_info *eg_pi;
6230 struct ni_power_info *ni_pi;
6231 struct si_power_info *si_pi;
6232 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
6233 u16 data_offset, size;
6234 u8 frev, crev;
6235 struct atom_clock_dividers dividers;
6236 int ret;
6237 u32 mask;
6238
6239 si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
6240 if (si_pi == NULL)
6241 return -ENOMEM;
6242 rdev->pm.dpm.priv = si_pi;
6243 ni_pi = &si_pi->ni;
6244 eg_pi = &ni_pi->eg;
6245 pi = &eg_pi->rv7xx;
6246
6247 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
6248 if (ret)
6249 si_pi->sys_pcie_mask = 0;
6250 else
6251 si_pi->sys_pcie_mask = mask;
6252 si_pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
6253 si_pi->boot_pcie_gen = si_get_current_pcie_speed(rdev);
6254
6255 si_set_max_cu_value(rdev);
6256
6257 rv770_get_max_vddc(rdev);
6258 si_get_leakage_vddc(rdev);
6259 si_patch_dependency_tables_based_on_leakage(rdev);
6260
6261 pi->acpi_vddc = 0;
6262 eg_pi->acpi_vddci = 0;
6263 pi->min_vddc_in_table = 0;
6264 pi->max_vddc_in_table = 0;
6265
6266 ret = si_parse_power_table(rdev);
6267 if (ret)
6268 return ret;
6269 ret = r600_parse_extended_power_table(rdev);
6270 if (ret)
6271 return ret;
6272
6273 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
6274 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
6275 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
6276 r600_free_extended_power_table(rdev);
6277 return -ENOMEM;
6278 }
6279 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
6280 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
6281 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
6282 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
6283 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
6284 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
6285 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
6286 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
6287 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
6288
6289 if (rdev->pm.dpm.voltage_response_time == 0)
6290 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
6291 if (rdev->pm.dpm.backbias_response_time == 0)
6292 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
6293
6294 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
6295 0, false, &dividers);
6296 if (ret)
6297 pi->ref_div = dividers.ref_div + 1;
6298 else
6299 pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
6300
6301 eg_pi->smu_uvd_hs = false;
6302
6303 pi->mclk_strobe_mode_threshold = 40000;
6304 if (si_is_special_1gb_platform(rdev))
6305 pi->mclk_stutter_mode_threshold = 0;
6306 else
6307 pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
6308 pi->mclk_edc_enable_threshold = 40000;
6309 eg_pi->mclk_edc_wr_enable_threshold = 40000;
6310
6311 ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
6312
6313 pi->voltage_control =
6314 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_GPIO_LUT);
6315
6316 pi->mvdd_control =
6317 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, VOLTAGE_OBJ_GPIO_LUT);
6318
6319 eg_pi->vddci_control =
6320 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, VOLTAGE_OBJ_GPIO_LUT);
6321
6322 si_pi->vddc_phase_shed_control =
6323 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT);
6324
6325 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
6326 &frev, &crev, &data_offset)) {
6327 pi->sclk_ss = true;
6328 pi->mclk_ss = true;
6329 pi->dynamic_ss = true;
6330 } else {
6331 pi->sclk_ss = false;
6332 pi->mclk_ss = false;
6333 pi->dynamic_ss = true;
6334 }
6335
6336 pi->asi = RV770_ASI_DFLT;
6337 pi->pasi = CYPRESS_HASI_DFLT;
6338 pi->vrc = SISLANDS_VRC_DFLT;
6339
6340 pi->gfx_clock_gating = true;
6341
6342 eg_pi->sclk_deep_sleep = true;
6343 si_pi->sclk_deep_sleep_above_low = false;
6344
6345 if (pi->gfx_clock_gating &&
6346 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
6347 pi->thermal_protection = true;
6348 else
6349 pi->thermal_protection = false;
6350
6351 eg_pi->dynamic_ac_timing = true;
6352
6353 eg_pi->light_sleep = true;
6354#if defined(CONFIG_ACPI)
6355 eg_pi->pcie_performance_request =
6356 radeon_acpi_is_pcie_performance_request_supported(rdev);
6357#else
6358 eg_pi->pcie_performance_request = false;
6359#endif
6360
6361 si_pi->sram_end = SMC_RAM_END;
6362
6363 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
6364 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
6365 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
6366 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
6367 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
6368 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
6369 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
6370
6371 si_initialize_powertune_defaults(rdev);
6372
6373 return 0;
6374}
6375
6376void si_dpm_fini(struct radeon_device *rdev)
6377{
6378 int i;
6379
6380 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
6381 kfree(rdev->pm.dpm.ps[i].ps_priv);
6382 }
6383 kfree(rdev->pm.dpm.ps);
6384 kfree(rdev->pm.dpm.priv);
6385 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
6386 r600_free_extended_power_table(rdev);
6387}
6388
6389void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
6390 struct seq_file *m)
6391{
6392 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
6393 struct ni_ps *ps = ni_get_ps(rps);
6394 struct rv7xx_pl *pl;
6395 u32 current_index =
6396 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
6397 CURRENT_STATE_INDEX_SHIFT;
6398
6399 if (current_index >= ps->performance_level_count) {
6400 seq_printf(m, "invalid dpm profile %d\n", current_index);
6401 } else {
6402 pl = &ps->performance_levels[current_index];
6403 seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6404 seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
6405 current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
6406 }
6407}
diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h
new file mode 100644
index 000000000000..4ce5032cdf49
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_dpm.h
@@ -0,0 +1,227 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __SI_DPM_H__
24#define __SI_DPM_H__
25
26#include "ni_dpm.h"
27#include "sislands_smc.h"
28
29enum si_cac_config_reg_type
30{
31 SISLANDS_CACCONFIG_MMR = 0,
32 SISLANDS_CACCONFIG_CGIND,
33 SISLANDS_CACCONFIG_MAX
34};
35
36struct si_cac_config_reg
37{
38 u32 offset;
39 u32 mask;
40 u32 shift;
41 u32 value;
42 enum si_cac_config_reg_type type;
43};
44
45struct si_powertune_data
46{
47 u32 cac_window;
48 u32 l2_lta_window_size_default;
49 u8 lts_truncate_default;
50 u8 shift_n_default;
51 u8 operating_temp;
52 struct ni_leakage_coeffients leakage_coefficients;
53 u32 fixed_kt;
54 u32 lkge_lut_v0_percent;
55 u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS];
56 bool enable_powertune_by_default;
57};
58
59struct si_dyn_powertune_data
60{
61 u32 cac_leakage;
62 s32 leakage_minimum_temperature;
63 u32 wintime;
64 u32 l2_lta_window_size;
65 u8 lts_truncate;
66 u8 shift_n;
67 u8 dc_pwr_value;
68 bool disable_uvd_powertune;
69};
70
71struct si_dte_data
72{
73 u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
74 u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
75 u32 k;
76 u32 t0;
77 u32 max_t;
78 u8 window_size;
79 u8 temp_select;
80 u8 dte_mode;
81 u8 tdep_count;
82 u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
83 u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
84 u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
85 u32 t_threshold;
86 bool enable_dte_by_default;
87};
88
89struct si_clock_registers {
90 u32 cg_spll_func_cntl;
91 u32 cg_spll_func_cntl_2;
92 u32 cg_spll_func_cntl_3;
93 u32 cg_spll_func_cntl_4;
94 u32 cg_spll_spread_spectrum;
95 u32 cg_spll_spread_spectrum_2;
96 u32 dll_cntl;
97 u32 mclk_pwrmgt_cntl;
98 u32 mpll_ad_func_cntl;
99 u32 mpll_dq_func_cntl;
100 u32 mpll_func_cntl;
101 u32 mpll_func_cntl_1;
102 u32 mpll_func_cntl_2;
103 u32 mpll_ss1;
104 u32 mpll_ss2;
105};
106
107struct si_mc_reg_entry {
108 u32 mclk_max;
109 u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
110};
111
112struct si_mc_reg_table {
113 u8 last;
114 u8 num_entries;
115 u16 valid_flag;
116 struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
117 SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
118};
119
120#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0
121#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1
122#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2
123#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 3
124
125struct si_leakage_voltage_entry
126{
127 u16 voltage;
128 u16 leakage_index;
129};
130
131#define SISLANDS_LEAKAGE_INDEX0 0xff01
132#define SISLANDS_MAX_LEAKAGE_COUNT 4
133
134struct si_leakage_voltage
135{
136 u16 count;
137 struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
138};
139
140#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5
141
142struct si_ulv_param {
143 bool supported;
144 u32 cg_ulv_control;
145 u32 cg_ulv_parameter;
146 u32 volt_change_delay;
147 struct rv7xx_pl pl;
148 bool one_pcie_lane_in_ulv;
149};
150
151struct si_power_info {
152 /* must be first! */
153 struct ni_power_info ni;
154 struct si_clock_registers clock_registers;
155 struct si_mc_reg_table mc_reg_table;
156 struct atom_voltage_table mvdd_voltage_table;
157 struct atom_voltage_table vddc_phase_shed_table;
158 struct si_leakage_voltage leakage_voltage;
159 u16 mvdd_bootup_value;
160 struct si_ulv_param ulv;
161 u32 max_cu;
162 /* pcie gen */
163 enum radeon_pcie_gen force_pcie_gen;
164 enum radeon_pcie_gen boot_pcie_gen;
165 enum radeon_pcie_gen acpi_pcie_gen;
166 u32 sys_pcie_mask;
167 /* flags */
168 bool enable_dte;
169 bool enable_ppm;
170 bool vddc_phase_shed_control;
171 bool pspp_notify_required;
172 bool sclk_deep_sleep_above_low;
173 /* smc offsets */
174 u32 sram_end;
175 u32 state_table_start;
176 u32 soft_regs_start;
177 u32 mc_reg_table_start;
178 u32 arb_table_start;
179 u32 cac_table_start;
180 u32 dte_table_start;
181 u32 spll_table_start;
182 u32 papm_cfg_table_start;
183 /* CAC stuff */
184 const struct si_cac_config_reg *cac_weights;
185 const struct si_cac_config_reg *lcac_config;
186 const struct si_cac_config_reg *cac_override;
187 const struct si_powertune_data *powertune_data;
188 struct si_dyn_powertune_data dyn_powertune_data;
189 /* DTE stuff */
190 struct si_dte_data dte_data;
191 /* scratch structs */
192 SMC_SIslands_MCRegisters smc_mc_reg_table;
193 SISLANDS_SMC_STATETABLE smc_statetable;
194 PP_SIslands_PAPMParameters papm_parm;
195};
196
197#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
198#define SISLANDS_ACPI_STATE_ARB_INDEX 1
199#define SISLANDS_ULV_STATE_ARB_INDEX 2
200#define SISLANDS_DRIVER_STATE_ARB_INDEX 3
201
202#define SISLANDS_DPM2_MAX_PULSE_SKIP 256
203
204#define SISLANDS_DPM2_NEAR_TDP_DEC 10
205#define SISLANDS_DPM2_ABOVE_SAFE_INC 5
206#define SISLANDS_DPM2_BELOW_SAFE_INC 20
207
208#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT 80
209
210#define SISLANDS_DPM2_MAXPS_PERCENT_H 99
211#define SISLANDS_DPM2_MAXPS_PERCENT_M 99
212
213#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
214#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER 0x12
215#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
216#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE 0x1E
217#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO 0xF
218
219#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN 10
220
221#define SISLANDS_VRC_DFLT 0xC000B3
222#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT 1687
223#define SISLANDS_CGULVPARAMETER_DFLT 0x00040035
224#define SISLANDS_CGULVCONTROL_DFLT 0x1f007550
225
226
227#endif
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
new file mode 100644
index 000000000000..5f524c0a541e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -0,0 +1,284 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include <linux/firmware.h>
26#include "drmP.h"
27#include "radeon.h"
28#include "sid.h"
29#include "ppsmc.h"
30#include "radeon_ucode.h"
31
32int si_set_smc_sram_address(struct radeon_device *rdev,
33 u32 smc_address, u32 limit)
34{
35 if (smc_address & 3)
36 return -EINVAL;
37 if ((smc_address + 3) > limit)
38 return -EINVAL;
39
40 WREG32(SMC_IND_INDEX_0, smc_address);
41 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
42
43 return 0;
44}
45
46int si_copy_bytes_to_smc(struct radeon_device *rdev,
47 u32 smc_start_address,
48 const u8 *src, u32 byte_count, u32 limit)
49{
50 int ret;
51 u32 data, original_data, addr, extra_shift;
52
53 if (smc_start_address & 3)
54 return -EINVAL;
55 if ((smc_start_address + byte_count) > limit)
56 return -EINVAL;
57
58 addr = smc_start_address;
59
60 while (byte_count >= 4) {
61 /* SMC address space is BE */
62 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
63
64 ret = si_set_smc_sram_address(rdev, addr, limit);
65 if (ret)
66 return ret;
67
68 WREG32(SMC_IND_DATA_0, data);
69
70 src += 4;
71 byte_count -= 4;
72 addr += 4;
73 }
74
75 /* RMW for the final bytes */
76 if (byte_count > 0) {
77 data = 0;
78
79 ret = si_set_smc_sram_address(rdev, addr, limit);
80 if (ret)
81 return ret;
82
83 original_data = RREG32(SMC_IND_DATA_0);
84
85 extra_shift = 8 * (4 - byte_count);
86
87 while (byte_count > 0) {
88 /* SMC address space is BE */
89 data = (data << 8) + *src++;
90 byte_count--;
91 }
92
93 data <<= extra_shift;
94
95 data |= (original_data & ~((~0UL) << extra_shift));
96
97 ret = si_set_smc_sram_address(rdev, addr, limit);
98 if (ret)
99 return ret;
100
101 WREG32(SMC_IND_DATA_0, data);
102 }
103 return 0;
104}
105
106void si_start_smc(struct radeon_device *rdev)
107{
108 u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
109
110 tmp &= ~RST_REG;
111
112 WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
113}
114
115void si_reset_smc(struct radeon_device *rdev)
116{
117 u32 tmp;
118
119 RREG32(CB_CGTT_SCLK_CTRL);
120 RREG32(CB_CGTT_SCLK_CTRL);
121 RREG32(CB_CGTT_SCLK_CTRL);
122 RREG32(CB_CGTT_SCLK_CTRL);
123
124 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
125 tmp |= RST_REG;
126 WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
127}
128
129int si_program_jump_on_start(struct radeon_device *rdev)
130{
131 static u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
132
133 return si_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
134}
135
136void si_stop_smc_clock(struct radeon_device *rdev)
137{
138 u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
139
140 tmp |= CK_DISABLE;
141
142 WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
143}
144
145void si_start_smc_clock(struct radeon_device *rdev)
146{
147 u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
148
149 tmp &= ~CK_DISABLE;
150
151 WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
152}
153
154bool si_is_smc_running(struct radeon_device *rdev)
155{
156 u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
157 u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
158
159 if (!(rst & RST_REG) && !(clk & CK_DISABLE))
160 return true;
161
162 return false;
163}
164
165PPSMC_Result si_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
166{
167 u32 tmp;
168 int i;
169
170 if (!si_is_smc_running(rdev))
171 return PPSMC_Result_Failed;
172
173 WREG32(SMC_MESSAGE_0, msg);
174
175 for (i = 0; i < rdev->usec_timeout; i++) {
176 tmp = RREG32(SMC_RESP_0);
177 if (tmp != 0)
178 break;
179 udelay(1);
180 }
181 tmp = RREG32(SMC_RESP_0);
182
183 return (PPSMC_Result)tmp;
184}
185
186PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev)
187{
188 u32 tmp;
189 int i;
190
191 if (!si_is_smc_running(rdev))
192 return PPSMC_Result_OK;
193
194 for (i = 0; i < rdev->usec_timeout; i++) {
195 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
196 if ((tmp & CKEN) == 0)
197 break;
198 udelay(1);
199 }
200
201 return PPSMC_Result_OK;
202}
203
204int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
205{
206 u32 ucode_start_address;
207 u32 ucode_size;
208 const u8 *src;
209 u32 data;
210
211 if (!rdev->smc_fw)
212 return -EINVAL;
213
214 switch (rdev->family) {
215 case CHIP_TAHITI:
216 ucode_start_address = TAHITI_SMC_UCODE_START;
217 ucode_size = TAHITI_SMC_UCODE_SIZE;
218 break;
219 case CHIP_PITCAIRN:
220 ucode_start_address = PITCAIRN_SMC_UCODE_START;
221 ucode_size = PITCAIRN_SMC_UCODE_SIZE;
222 break;
223 case CHIP_VERDE:
224 ucode_start_address = VERDE_SMC_UCODE_START;
225 ucode_size = VERDE_SMC_UCODE_SIZE;
226 break;
227 case CHIP_OLAND:
228 ucode_start_address = OLAND_SMC_UCODE_START;
229 ucode_size = OLAND_SMC_UCODE_SIZE;
230 break;
231 case CHIP_HAINAN:
232 ucode_start_address = HAINAN_SMC_UCODE_START;
233 ucode_size = HAINAN_SMC_UCODE_SIZE;
234 break;
235 default:
236 DRM_ERROR("unknown asic in smc ucode loader\n");
237 BUG();
238 }
239
240 if (ucode_size & 3)
241 return -EINVAL;
242
243 src = (const u8 *)rdev->smc_fw->data;
244 WREG32(SMC_IND_INDEX_0, ucode_start_address);
245 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
246 while (ucode_size >= 4) {
247 /* SMC address space is BE */
248 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
249
250 WREG32(SMC_IND_DATA_0, data);
251
252 src += 4;
253 ucode_size -= 4;
254 }
255 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
256
257 return 0;
258}
259
260int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
261 u32 *value, u32 limit)
262{
263 int ret;
264
265 ret = si_set_smc_sram_address(rdev, smc_address, limit);
266 if (ret)
267 return ret;
268
269 *value = RREG32(SMC_IND_DATA_0);
270 return 0;
271}
272
273int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
274 u32 value, u32 limit)
275{
276 int ret;
277
278 ret = si_set_smc_sram_address(rdev, smc_address, limit);
279 if (ret)
280 return ret;
281
282 WREG32(SMC_IND_DATA_0, value);
283 return 0;
284}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 8f2d7d4f9b28..12a20eb77d0c 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -30,6 +30,94 @@
30#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 30#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
31#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001 31#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
32 32
33#define SI_MAX_SH_GPRS 256
34#define SI_MAX_TEMP_GPRS 16
35#define SI_MAX_SH_THREADS 256
36#define SI_MAX_SH_STACK_ENTRIES 4096
37#define SI_MAX_FRC_EOV_CNT 16384
38#define SI_MAX_BACKENDS 8
39#define SI_MAX_BACKENDS_MASK 0xFF
40#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F
41#define SI_MAX_SIMDS 12
42#define SI_MAX_SIMDS_MASK 0x0FFF
43#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF
44#define SI_MAX_PIPES 8
45#define SI_MAX_PIPES_MASK 0xFF
46#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F
47#define SI_MAX_LDS_NUM 0xFFFF
48#define SI_MAX_TCC 16
49#define SI_MAX_TCC_MASK 0xFFFF
50
51/* SMC IND accessor regs */
52#define SMC_IND_INDEX_0 0x200
53#define SMC_IND_DATA_0 0x204
54
55#define SMC_IND_ACCESS_CNTL 0x228
56# define AUTO_INCREMENT_IND_0 (1 << 0)
57#define SMC_MESSAGE_0 0x22c
58#define SMC_RESP_0 0x230
59
60/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
61#define SMC_CG_IND_START 0xc0030000
62#define SMC_CG_IND_END 0xc0040000
63
64#define CG_CGTT_LOCAL_0 0x400
65#define CG_CGTT_LOCAL_1 0x401
66
67/* SMC IND registers */
68#define SMC_SYSCON_RESET_CNTL 0x80000000
69# define RST_REG (1 << 0)
70#define SMC_SYSCON_CLOCK_CNTL_0 0x80000004
71# define CK_DISABLE (1 << 0)
72# define CKEN (1 << 24)
73
74#define VGA_HDP_CONTROL 0x328
75#define VGA_MEMORY_DISABLE (1 << 4)
76
77#define DCCG_DISP_SLOW_SELECT_REG 0x4fc
78#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
79#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
80#define DCCG_DISP1_SLOW_SELECT_SHIFT 0
81#define DCCG_DISP2_SLOW_SELECT(x) ((x) << 4)
82#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
83#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
84
85#define CG_SPLL_FUNC_CNTL 0x600
86#define SPLL_RESET (1 << 0)
87#define SPLL_SLEEP (1 << 1)
88#define SPLL_BYPASS_EN (1 << 3)
89#define SPLL_REF_DIV(x) ((x) << 4)
90#define SPLL_REF_DIV_MASK (0x3f << 4)
91#define SPLL_PDIV_A(x) ((x) << 20)
92#define SPLL_PDIV_A_MASK (0x7f << 20)
93#define SPLL_PDIV_A_SHIFT 20
94#define CG_SPLL_FUNC_CNTL_2 0x604
95#define SCLK_MUX_SEL(x) ((x) << 0)
96#define SCLK_MUX_SEL_MASK (0x1ff << 0)
97#define CG_SPLL_FUNC_CNTL_3 0x608
98#define SPLL_FB_DIV(x) ((x) << 0)
99#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
100#define SPLL_FB_DIV_SHIFT 0
101#define SPLL_DITHEN (1 << 28)
102#define CG_SPLL_FUNC_CNTL_4 0x60c
103
104#define SPLL_CNTL_MODE 0x618
105# define SPLL_REFCLK_SEL(x) ((x) << 8)
106# define SPLL_REFCLK_SEL_MASK 0xFF00
107
108#define CG_SPLL_SPREAD_SPECTRUM 0x620
109#define SSEN (1 << 0)
110#define CLK_S(x) ((x) << 4)
111#define CLK_S_MASK (0xfff << 4)
112#define CLK_S_SHIFT 4
113#define CG_SPLL_SPREAD_SPECTRUM_2 0x624
114#define CLK_V(x) ((x) << 0)
115#define CLK_V_MASK (0x3ffffff << 0)
116#define CLK_V_SHIFT 0
117
118#define CG_SPLL_AUTOSCALE_CNTL 0x62c
119# define AUTOSCALE_ON_SS_CLEAR (1 << 9)
120
33/* discrete uvd clocks */ 121/* discrete uvd clocks */
34#define CG_UPLL_FUNC_CNTL 0x634 122#define CG_UPLL_FUNC_CNTL 0x634
35# define UPLL_RESET_MASK 0x00000001 123# define UPLL_RESET_MASK 0x00000001
@@ -59,6 +147,45 @@
59#define CG_UPLL_SPREAD_SPECTRUM 0x650 147#define CG_UPLL_SPREAD_SPECTRUM 0x650
60# define SSEN_MASK 0x00000001 148# define SSEN_MASK 0x00000001
61 149
150#define MPLL_BYPASSCLK_SEL 0x65c
151# define MPLL_CLKOUT_SEL(x) ((x) << 8)
152# define MPLL_CLKOUT_SEL_MASK 0xFF00
153
154#define CG_CLKPIN_CNTL 0x660
155# define XTALIN_DIVIDE (1 << 1)
156# define BCLK_AS_XCLK (1 << 2)
157#define CG_CLKPIN_CNTL_2 0x664
158# define FORCE_BIF_REFCLK_EN (1 << 3)
159# define MUX_TCLK_TO_XCLK (1 << 8)
160
161#define THM_CLK_CNTL 0x66c
162# define CMON_CLK_SEL(x) ((x) << 0)
163# define CMON_CLK_SEL_MASK 0xFF
164# define TMON_CLK_SEL(x) ((x) << 8)
165# define TMON_CLK_SEL_MASK 0xFF00
166#define MISC_CLK_CNTL 0x670
167# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
168# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
169# define ZCLK_SEL(x) ((x) << 8)
170# define ZCLK_SEL_MASK 0xFF00
171
172#define CG_THERMAL_CTRL 0x700
173#define DPM_EVENT_SRC(x) ((x) << 0)
174#define DPM_EVENT_SRC_MASK (7 << 0)
175#define DIG_THERM_DPM(x) ((x) << 14)
176#define DIG_THERM_DPM_MASK 0x003FC000
177#define DIG_THERM_DPM_SHIFT 14
178
179#define CG_THERMAL_INT 0x708
180#define DIG_THERM_INTH(x) ((x) << 8)
181#define DIG_THERM_INTH_MASK 0x0000FF00
182#define DIG_THERM_INTH_SHIFT 8
183#define DIG_THERM_INTL(x) ((x) << 16)
184#define DIG_THERM_INTL_MASK 0x00FF0000
185#define DIG_THERM_INTL_SHIFT 16
186#define THERM_INT_MASK_HIGH (1 << 24)
187#define THERM_INT_MASK_LOW (1 << 25)
188
62#define CG_MULT_THERMAL_STATUS 0x714 189#define CG_MULT_THERMAL_STATUS 0x714
63#define ASIC_MAX_TEMP(x) ((x) << 0) 190#define ASIC_MAX_TEMP(x) ((x) << 0)
64#define ASIC_MAX_TEMP_MASK 0x000001ff 191#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -67,31 +194,89 @@
67#define CTF_TEMP_MASK 0x0003fe00 194#define CTF_TEMP_MASK 0x0003fe00
68#define CTF_TEMP_SHIFT 9 195#define CTF_TEMP_SHIFT 9
69 196
70#define SI_MAX_SH_GPRS 256 197#define GENERAL_PWRMGT 0x780
71#define SI_MAX_TEMP_GPRS 16 198# define GLOBAL_PWRMGT_EN (1 << 0)
72#define SI_MAX_SH_THREADS 256 199# define STATIC_PM_EN (1 << 1)
73#define SI_MAX_SH_STACK_ENTRIES 4096 200# define THERMAL_PROTECTION_DIS (1 << 2)
74#define SI_MAX_FRC_EOV_CNT 16384 201# define THERMAL_PROTECTION_TYPE (1 << 3)
75#define SI_MAX_BACKENDS 8 202# define SW_SMIO_INDEX(x) ((x) << 6)
76#define SI_MAX_BACKENDS_MASK 0xFF 203# define SW_SMIO_INDEX_MASK (1 << 6)
77#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F 204# define SW_SMIO_INDEX_SHIFT 6
78#define SI_MAX_SIMDS 12 205# define VOLT_PWRMGT_EN (1 << 10)
79#define SI_MAX_SIMDS_MASK 0x0FFF 206# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
80#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF 207#define CG_TPC 0x784
81#define SI_MAX_PIPES 8 208#define SCLK_PWRMGT_CNTL 0x788
82#define SI_MAX_PIPES_MASK 0xFF 209# define SCLK_PWRMGT_OFF (1 << 0)
83#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F 210# define SCLK_LOW_D1 (1 << 1)
84#define SI_MAX_LDS_NUM 0xFFFF 211# define FIR_RESET (1 << 4)
85#define SI_MAX_TCC 16 212# define FIR_FORCE_TREND_SEL (1 << 5)
86#define SI_MAX_TCC_MASK 0xFFFF 213# define FIR_TREND_MODE (1 << 6)
87 214# define DYN_GFX_CLK_OFF_EN (1 << 7)
88#define VGA_HDP_CONTROL 0x328 215# define GFX_CLK_FORCE_ON (1 << 8)
89#define VGA_MEMORY_DISABLE (1 << 4) 216# define GFX_CLK_REQUEST_OFF (1 << 9)
90 217# define GFX_CLK_FORCE_OFF (1 << 10)
91#define CG_CLKPIN_CNTL 0x660 218# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
92# define XTALIN_DIVIDE (1 << 1) 219# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
93#define CG_CLKPIN_CNTL_2 0x664 220# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
94# define MUX_TCLK_TO_XCLK (1 << 8) 221# define DYN_LIGHT_SLEEP_EN (1 << 14)
222
223#define TARGET_AND_CURRENT_PROFILE_INDEX 0x798
224# define CURRENT_STATE_INDEX_MASK (0xf << 4)
225# define CURRENT_STATE_INDEX_SHIFT 4
226
227#define CG_FTV 0x7bc
228
229#define CG_FFCT_0 0x7c0
230# define UTC_0(x) ((x) << 0)
231# define UTC_0_MASK (0x3ff << 0)
232# define DTC_0(x) ((x) << 10)
233# define DTC_0_MASK (0x3ff << 10)
234
235#define CG_BSP 0x7fc
236# define BSP(x) ((x) << 0)
237# define BSP_MASK (0xffff << 0)
238# define BSU(x) ((x) << 16)
239# define BSU_MASK (0xf << 16)
240#define CG_AT 0x800
241# define CG_R(x) ((x) << 0)
242# define CG_R_MASK (0xffff << 0)
243# define CG_L(x) ((x) << 16)
244# define CG_L_MASK (0xffff << 16)
245
246#define CG_GIT 0x804
247# define CG_GICST(x) ((x) << 0)
248# define CG_GICST_MASK (0xffff << 0)
249# define CG_GIPOT(x) ((x) << 16)
250# define CG_GIPOT_MASK (0xffff << 16)
251
252#define CG_SSP 0x80c
253# define SST(x) ((x) << 0)
254# define SST_MASK (0xffff << 0)
255# define SSTU(x) ((x) << 16)
256# define SSTU_MASK (0xf << 16)
257
258#define CG_DISPLAY_GAP_CNTL 0x828
259# define DISP1_GAP(x) ((x) << 0)
260# define DISP1_GAP_MASK (3 << 0)
261# define DISP2_GAP(x) ((x) << 2)
262# define DISP2_GAP_MASK (3 << 2)
263# define VBI_TIMER_COUNT(x) ((x) << 4)
264# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
265# define VBI_TIMER_UNIT(x) ((x) << 20)
266# define VBI_TIMER_UNIT_MASK (7 << 20)
267# define DISP1_GAP_MCHG(x) ((x) << 24)
268# define DISP1_GAP_MCHG_MASK (3 << 24)
269# define DISP2_GAP_MCHG(x) ((x) << 26)
270# define DISP2_GAP_MCHG_MASK (3 << 26)
271
272#define CG_ULV_CONTROL 0x878
273#define CG_ULV_PARAMETER 0x87c
274
275#define SMC_SCRATCH0 0x884
276
277#define CG_CAC_CTRL 0x8b8
278# define CAC_WINDOW(x) ((x) << 0)
279# define CAC_WINDOW_MASK 0x00ffffff
95 280
96#define DMIF_ADDR_CONFIG 0xBD4 281#define DMIF_ADDR_CONFIG 0xBD4
97 282
@@ -203,6 +388,10 @@
203#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C 388#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
204#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580 389#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580
205 390
391#define VM_L2_CG 0x15c0
392#define MC_CG_ENABLE (1 << 18)
393#define MC_LS_ENABLE (1 << 19)
394
206#define MC_SHARED_CHMAP 0x2004 395#define MC_SHARED_CHMAP 0x2004
207#define NOOFCHAN_SHIFT 12 396#define NOOFCHAN_SHIFT 12
208#define NOOFCHAN_MASK 0x0000f000 397#define NOOFCHAN_MASK 0x0000f000
@@ -228,6 +417,17 @@
228 417
229#define MC_SHARED_BLACKOUT_CNTL 0x20ac 418#define MC_SHARED_BLACKOUT_CNTL 0x20ac
230 419
420#define MC_HUB_MISC_HUB_CG 0x20b8
421#define MC_HUB_MISC_VM_CG 0x20bc
422
423#define MC_HUB_MISC_SIP_CG 0x20c0
424
425#define MC_XPB_CLK_GAT 0x2478
426
427#define MC_CITF_MISC_RD_CG 0x2648
428#define MC_CITF_MISC_WR_CG 0x264c
429#define MC_CITF_MISC_VM_CG 0x2650
430
231#define MC_ARB_RAMCFG 0x2760 431#define MC_ARB_RAMCFG 0x2760
232#define NOOFBANK_SHIFT 0 432#define NOOFBANK_SHIFT 0
233#define NOOFBANK_MASK 0x00000003 433#define NOOFBANK_MASK 0x00000003
@@ -243,6 +443,23 @@
243#define NOOFGROUPS_SHIFT 12 443#define NOOFGROUPS_SHIFT 12
244#define NOOFGROUPS_MASK 0x00001000 444#define NOOFGROUPS_MASK 0x00001000
245 445
446#define MC_ARB_DRAM_TIMING 0x2774
447#define MC_ARB_DRAM_TIMING2 0x2778
448
449#define MC_ARB_BURST_TIME 0x2808
450#define STATE0(x) ((x) << 0)
451#define STATE0_MASK (0x1f << 0)
452#define STATE0_SHIFT 0
453#define STATE1(x) ((x) << 5)
454#define STATE1_MASK (0x1f << 5)
455#define STATE1_SHIFT 5
456#define STATE2(x) ((x) << 10)
457#define STATE2_MASK (0x1f << 10)
458#define STATE2_SHIFT 10
459#define STATE3(x) ((x) << 15)
460#define STATE3_MASK (0x1f << 15)
461#define STATE3_SHIFT 15
462
246#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x2808 463#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x2808
247#define TRAIN_DONE_D0 (1 << 30) 464#define TRAIN_DONE_D0 (1 << 30)
248#define TRAIN_DONE_D1 (1 << 31) 465#define TRAIN_DONE_D1 (1 << 31)
@@ -250,13 +467,105 @@
250#define MC_SEQ_SUP_CNTL 0x28c8 467#define MC_SEQ_SUP_CNTL 0x28c8
251#define RUN_MASK (1 << 0) 468#define RUN_MASK (1 << 0)
252#define MC_SEQ_SUP_PGM 0x28cc 469#define MC_SEQ_SUP_PGM 0x28cc
470#define MC_PMG_AUTO_CMD 0x28d0
253 471
254#define MC_IO_PAD_CNTL_D0 0x29d0 472#define MC_IO_PAD_CNTL_D0 0x29d0
255#define MEM_FALL_OUT_CMD (1 << 8) 473#define MEM_FALL_OUT_CMD (1 << 8)
256 474
475#define MC_SEQ_RAS_TIMING 0x28a0
476#define MC_SEQ_CAS_TIMING 0x28a4
477#define MC_SEQ_MISC_TIMING 0x28a8
478#define MC_SEQ_MISC_TIMING2 0x28ac
479#define MC_SEQ_PMG_TIMING 0x28b0
480#define MC_SEQ_RD_CTL_D0 0x28b4
481#define MC_SEQ_RD_CTL_D1 0x28b8
482#define MC_SEQ_WR_CTL_D0 0x28bc
483#define MC_SEQ_WR_CTL_D1 0x28c0
484
485#define MC_SEQ_MISC0 0x2a00
486#define MC_SEQ_MISC0_VEN_ID_SHIFT 8
487#define MC_SEQ_MISC0_VEN_ID_MASK 0x00000f00
488#define MC_SEQ_MISC0_VEN_ID_VALUE 3
489#define MC_SEQ_MISC0_REV_ID_SHIFT 12
490#define MC_SEQ_MISC0_REV_ID_MASK 0x0000f000
491#define MC_SEQ_MISC0_REV_ID_VALUE 1
492#define MC_SEQ_MISC0_GDDR5_SHIFT 28
493#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
494#define MC_SEQ_MISC0_GDDR5_VALUE 5
495#define MC_SEQ_MISC1 0x2a04
496#define MC_SEQ_RESERVE_M 0x2a08
497#define MC_PMG_CMD_EMRS 0x2a0c
498
257#define MC_SEQ_IO_DEBUG_INDEX 0x2a44 499#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
258#define MC_SEQ_IO_DEBUG_DATA 0x2a48 500#define MC_SEQ_IO_DEBUG_DATA 0x2a48
259 501
502#define MC_SEQ_MISC5 0x2a54
503#define MC_SEQ_MISC6 0x2a58
504
505#define MC_SEQ_MISC7 0x2a64
506
507#define MC_SEQ_RAS_TIMING_LP 0x2a6c
508#define MC_SEQ_CAS_TIMING_LP 0x2a70
509#define MC_SEQ_MISC_TIMING_LP 0x2a74
510#define MC_SEQ_MISC_TIMING2_LP 0x2a78
511#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
512#define MC_SEQ_WR_CTL_D1_LP 0x2a80
513#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
514#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
515
516#define MC_PMG_CMD_MRS 0x2aac
517
518#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
519#define MC_SEQ_RD_CTL_D1_LP 0x2b20
520
521#define MC_PMG_CMD_MRS1 0x2b44
522#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
523#define MC_SEQ_PMG_TIMING_LP 0x2b4c
524
525#define MC_SEQ_WR_CTL_2 0x2b54
526#define MC_SEQ_WR_CTL_2_LP 0x2b58
527#define MC_PMG_CMD_MRS2 0x2b5c
528#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
529
530#define MCLK_PWRMGT_CNTL 0x2ba0
531# define DLL_SPEED(x) ((x) << 0)
532# define DLL_SPEED_MASK (0x1f << 0)
533# define DLL_READY (1 << 6)
534# define MC_INT_CNTL (1 << 7)
535# define MRDCK0_PDNB (1 << 8)
536# define MRDCK1_PDNB (1 << 9)
537# define MRDCK0_RESET (1 << 16)
538# define MRDCK1_RESET (1 << 17)
539# define DLL_READY_READ (1 << 24)
540#define DLL_CNTL 0x2ba4
541# define MRDCK0_BYPASS (1 << 24)
542# define MRDCK1_BYPASS (1 << 25)
543
544#define MPLL_FUNC_CNTL 0x2bb4
545#define BWCTRL(x) ((x) << 20)
546#define BWCTRL_MASK (0xff << 20)
547#define MPLL_FUNC_CNTL_1 0x2bb8
548#define VCO_MODE(x) ((x) << 0)
549#define VCO_MODE_MASK (3 << 0)
550#define CLKFRAC(x) ((x) << 4)
551#define CLKFRAC_MASK (0xfff << 4)
552#define CLKF(x) ((x) << 16)
553#define CLKF_MASK (0xfff << 16)
554#define MPLL_FUNC_CNTL_2 0x2bbc
555#define MPLL_AD_FUNC_CNTL 0x2bc0
556#define YCLK_POST_DIV(x) ((x) << 0)
557#define YCLK_POST_DIV_MASK (7 << 0)
558#define MPLL_DQ_FUNC_CNTL 0x2bc4
559#define YCLK_SEL(x) ((x) << 4)
560#define YCLK_SEL_MASK (1 << 4)
561
562#define MPLL_SS1 0x2bcc
563#define CLKV(x) ((x) << 0)
564#define CLKV_MASK (0x3ffffff << 0)
565#define MPLL_SS2 0x2bd0
566#define CLKS(x) ((x) << 0)
567#define CLKS_MASK (0xfff << 0)
568
260#define HDP_HOST_PATH_CNTL 0x2C00 569#define HDP_HOST_PATH_CNTL 0x2C00
261#define HDP_NONSURFACE_BASE 0x2C04 570#define HDP_NONSURFACE_BASE 0x2C04
262#define HDP_NONSURFACE_INFO 0x2C08 571#define HDP_NONSURFACE_INFO 0x2C08
@@ -266,6 +575,8 @@
266#define HDP_MISC_CNTL 0x2F4C 575#define HDP_MISC_CNTL 0x2F4C
267#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) 576#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
268 577
578#define ATC_MISC_CG 0x3350
579
269#define IH_RB_CNTL 0x3e00 580#define IH_RB_CNTL 0x3e00
270# define IH_RB_ENABLE (1 << 0) 581# define IH_RB_ENABLE (1 << 0)
271# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ 582# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
@@ -424,6 +735,9 @@
424# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) 735# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
425# define DC_HPDx_EN (1 << 28) 736# define DC_HPDx_EN (1 << 28)
426 737
738#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
739# define STUTTER_ENABLE (1 << 0)
740
427/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */ 741/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
428#define CRTC_STATUS_FRAME_COUNT 0x6e98 742#define CRTC_STATUS_FRAME_COUNT 0x6e98
429 743
@@ -599,6 +913,24 @@
599 913
600#define SQC_CACHES 0x8C08 914#define SQC_CACHES 0x8C08
601 915
916#define SQ_POWER_THROTTLE 0x8e58
917#define MIN_POWER(x) ((x) << 0)
918#define MIN_POWER_MASK (0x3fff << 0)
919#define MIN_POWER_SHIFT 0
920#define MAX_POWER(x) ((x) << 16)
921#define MAX_POWER_MASK (0x3fff << 16)
922#define MAX_POWER_SHIFT 0
923#define SQ_POWER_THROTTLE2 0x8e5c
924#define MAX_POWER_DELTA(x) ((x) << 0)
925#define MAX_POWER_DELTA_MASK (0x3fff << 0)
926#define MAX_POWER_DELTA_SHIFT 0
927#define STI_SIZE(x) ((x) << 16)
928#define STI_SIZE_MASK (0x3ff << 16)
929#define STI_SIZE_SHIFT 16
930#define LTI_RATIO(x) ((x) << 27)
931#define LTI_RATIO_MASK (0xf << 27)
932#define LTI_RATIO_SHIFT 27
933
602#define SX_DEBUG_1 0x9060 934#define SX_DEBUG_1 0x9060
603 935
604#define SPI_STATIC_THREAD_MGMT_1 0x90E0 936#define SPI_STATIC_THREAD_MGMT_1 0x90E0
@@ -616,6 +948,11 @@
616#define CGTS_USER_TCC_DISABLE 0x914C 948#define CGTS_USER_TCC_DISABLE 0x914C
617#define TCC_DISABLE_MASK 0xFFFF0000 949#define TCC_DISABLE_MASK 0xFFFF0000
618#define TCC_DISABLE_SHIFT 16 950#define TCC_DISABLE_SHIFT 16
951#define CGTS_SM_CTRL_REG 0x9150
952#define OVERRIDE (1 << 21)
953#define LS_OVERRIDE (1 << 22)
954
955#define SPI_LB_CU_MASK 0x9354
619 956
620#define TA_CNTL_AUX 0x9508 957#define TA_CNTL_AUX 0x9508
621 958
@@ -705,6 +1042,8 @@
705#define CB_PERFCOUNTER3_SELECT0 0x9a38 1042#define CB_PERFCOUNTER3_SELECT0 0x9a38
706#define CB_PERFCOUNTER3_SELECT1 0x9a3c 1043#define CB_PERFCOUNTER3_SELECT1 0x9a3c
707 1044
1045#define CB_CGTT_SCLK_CTRL 0x9a60
1046
708#define GC_USER_RB_BACKEND_DISABLE 0x9B7C 1047#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
709#define BACKEND_DISABLE_MASK 0x00FF0000 1048#define BACKEND_DISABLE_MASK 0x00FF0000
710#define BACKEND_DISABLE_SHIFT 16 1049#define BACKEND_DISABLE_SHIFT 16
@@ -762,6 +1101,9 @@
762# define CP_RINGID1_INT_STAT (1 << 30) 1101# define CP_RINGID1_INT_STAT (1 << 30)
763# define CP_RINGID0_INT_STAT (1 << 31) 1102# define CP_RINGID0_INT_STAT (1 << 31)
764 1103
1104#define CP_MEM_SLP_CNTL 0xC1E4
1105# define CP_MEM_LS_EN (1 << 0)
1106
765#define CP_DEBUG 0xC1FC 1107#define CP_DEBUG 0xC1FC
766 1108
767#define RLC_CNTL 0xC300 1109#define RLC_CNTL 0xC300
@@ -769,6 +1111,7 @@
769#define RLC_RL_BASE 0xC304 1111#define RLC_RL_BASE 0xC304
770#define RLC_RL_SIZE 0xC308 1112#define RLC_RL_SIZE 0xC308
771#define RLC_LB_CNTL 0xC30C 1113#define RLC_LB_CNTL 0xC30C
1114# define LOAD_BALANCE_ENABLE (1 << 0)
772#define RLC_SAVE_AND_RESTORE_BASE 0xC310 1115#define RLC_SAVE_AND_RESTORE_BASE 0xC310
773#define RLC_LB_CNTR_MAX 0xC314 1116#define RLC_LB_CNTR_MAX 0xC314
774#define RLC_LB_CNTR_INIT 0xC318 1117#define RLC_LB_CNTR_INIT 0xC318
@@ -783,6 +1126,56 @@
783#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340 1126#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340
784#define RLC_MC_CNTL 0xC344 1127#define RLC_MC_CNTL 0xC344
785#define RLC_UCODE_CNTL 0xC348 1128#define RLC_UCODE_CNTL 0xC348
1129#define RLC_STAT 0xC34C
1130# define RLC_BUSY_STATUS (1 << 0)
1131# define GFX_POWER_STATUS (1 << 1)
1132# define GFX_CLOCK_STATUS (1 << 2)
1133# define GFX_LS_STATUS (1 << 3)
1134
1135#define RLC_PG_CNTL 0xC35C
1136# define GFX_PG_ENABLE (1 << 0)
1137# define GFX_PG_SRC (1 << 1)
1138
1139#define RLC_CGTT_MGCG_OVERRIDE 0xC400
1140#define RLC_CGCG_CGLS_CTRL 0xC404
1141# define CGCG_EN (1 << 0)
1142# define CGLS_EN (1 << 1)
1143
1144#define RLC_TTOP_D 0xC414
1145# define RLC_PUD(x) ((x) << 0)
1146# define RLC_PUD_MASK (0xff << 0)
1147# define RLC_PDD(x) ((x) << 8)
1148# define RLC_PDD_MASK (0xff << 8)
1149# define RLC_TTPD(x) ((x) << 16)
1150# define RLC_TTPD_MASK (0xff << 16)
1151# define RLC_MSD(x) ((x) << 24)
1152# define RLC_MSD_MASK (0xff << 24)
1153
1154#define RLC_LB_INIT_CU_MASK 0xC41C
1155
1156#define RLC_PG_AO_CU_MASK 0xC42C
1157#define RLC_MAX_PG_CU 0xC430
1158# define MAX_PU_CU(x) ((x) << 0)
1159# define MAX_PU_CU_MASK (0xff << 0)
1160#define RLC_AUTO_PG_CTRL 0xC434
1161# define AUTO_PG_EN (1 << 0)
1162# define GRBM_REG_SGIT(x) ((x) << 3)
1163# define GRBM_REG_SGIT_MASK (0xffff << 3)
1164# define PG_AFTER_GRBM_REG_ST(x) ((x) << 19)
1165# define PG_AFTER_GRBM_REG_ST_MASK (0x1fff << 19)
1166
1167#define RLC_SERDES_WR_MASTER_MASK_0 0xC454
1168#define RLC_SERDES_WR_MASTER_MASK_1 0xC458
1169#define RLC_SERDES_WR_CTRL 0xC45C
1170
1171#define RLC_SERDES_MASTER_BUSY_0 0xC464
1172#define RLC_SERDES_MASTER_BUSY_1 0xC468
1173
1174#define RLC_GCPM_GENERAL_3 0xC478
1175
1176#define DB_RENDER_CONTROL 0x28000
1177
1178#define DB_DEPTH_INFO 0x2803c
786 1179
787#define PA_SC_RASTER_CONFIG 0x28350 1180#define PA_SC_RASTER_CONFIG 0x28350
788# define RASTER_CONFIG_RB_MAP_0 0 1181# define RASTER_CONFIG_RB_MAP_0 0
@@ -829,6 +1222,146 @@
829# define THREAD_TRACE_FLUSH (54 << 0) 1222# define THREAD_TRACE_FLUSH (54 << 0)
830# define THREAD_TRACE_FINISH (55 << 0) 1223# define THREAD_TRACE_FINISH (55 << 0)
831 1224
1225/* PIF PHY0 registers idx/data 0x8/0xc */
1226#define PB0_PIF_CNTL 0x10
1227# define LS2_EXIT_TIME(x) ((x) << 17)
1228# define LS2_EXIT_TIME_MASK (0x7 << 17)
1229# define LS2_EXIT_TIME_SHIFT 17
1230#define PB0_PIF_PAIRING 0x11
1231# define MULTI_PIF (1 << 25)
1232#define PB0_PIF_PWRDOWN_0 0x12
1233# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
1234# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
1235# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
1236# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
1237# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
1238# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
1239# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
1240# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
1241# define PLL_RAMP_UP_TIME_0_SHIFT 24
1242#define PB0_PIF_PWRDOWN_1 0x13
1243# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
1244# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
1245# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
1246# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
1247# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
1248# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
1249# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
1250# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
1251# define PLL_RAMP_UP_TIME_1_SHIFT 24
1252
1253#define PB0_PIF_PWRDOWN_2 0x17
1254# define PLL_POWER_STATE_IN_TXS2_2(x) ((x) << 7)
1255# define PLL_POWER_STATE_IN_TXS2_2_MASK (0x7 << 7)
1256# define PLL_POWER_STATE_IN_TXS2_2_SHIFT 7
1257# define PLL_POWER_STATE_IN_OFF_2(x) ((x) << 10)
1258# define PLL_POWER_STATE_IN_OFF_2_MASK (0x7 << 10)
1259# define PLL_POWER_STATE_IN_OFF_2_SHIFT 10
1260# define PLL_RAMP_UP_TIME_2(x) ((x) << 24)
1261# define PLL_RAMP_UP_TIME_2_MASK (0x7 << 24)
1262# define PLL_RAMP_UP_TIME_2_SHIFT 24
1263#define PB0_PIF_PWRDOWN_3 0x18
1264# define PLL_POWER_STATE_IN_TXS2_3(x) ((x) << 7)
1265# define PLL_POWER_STATE_IN_TXS2_3_MASK (0x7 << 7)
1266# define PLL_POWER_STATE_IN_TXS2_3_SHIFT 7
1267# define PLL_POWER_STATE_IN_OFF_3(x) ((x) << 10)
1268# define PLL_POWER_STATE_IN_OFF_3_MASK (0x7 << 10)
1269# define PLL_POWER_STATE_IN_OFF_3_SHIFT 10
1270# define PLL_RAMP_UP_TIME_3(x) ((x) << 24)
1271# define PLL_RAMP_UP_TIME_3_MASK (0x7 << 24)
1272# define PLL_RAMP_UP_TIME_3_SHIFT 24
1273/* PIF PHY1 registers idx/data 0x10/0x14 */
1274#define PB1_PIF_CNTL 0x10
1275#define PB1_PIF_PAIRING 0x11
1276#define PB1_PIF_PWRDOWN_0 0x12
1277#define PB1_PIF_PWRDOWN_1 0x13
1278
1279#define PB1_PIF_PWRDOWN_2 0x17
1280#define PB1_PIF_PWRDOWN_3 0x18
1281/* PCIE registers idx/data 0x30/0x34 */
1282#define PCIE_CNTL2 0x1c /* PCIE */
1283# define SLV_MEM_LS_EN (1 << 16)
1284# define MST_MEM_LS_EN (1 << 18)
1285# define REPLAY_MEM_LS_EN (1 << 19)
1286#define PCIE_LC_STATUS1 0x28 /* PCIE */
1287# define LC_REVERSE_RCVR (1 << 0)
1288# define LC_REVERSE_XMIT (1 << 1)
1289# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
1290# define LC_OPERATING_LINK_WIDTH_SHIFT 2
1291# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
1292# define LC_DETECTED_LINK_WIDTH_SHIFT 5
1293
1294#define PCIE_P_CNTL 0x40 /* PCIE */
1295# define P_IGNORE_EDB_ERR (1 << 6)
1296
1297/* PCIE PORT registers idx/data 0x38/0x3c */
1298#define PCIE_LC_CNTL 0xa0
1299# define LC_L0S_INACTIVITY(x) ((x) << 8)
1300# define LC_L0S_INACTIVITY_MASK (0xf << 8)
1301# define LC_L0S_INACTIVITY_SHIFT 8
1302# define LC_L1_INACTIVITY(x) ((x) << 12)
1303# define LC_L1_INACTIVITY_MASK (0xf << 12)
1304# define LC_L1_INACTIVITY_SHIFT 12
1305# define LC_PMI_TO_L1_DIS (1 << 16)
1306# define LC_ASPM_TO_L1_DIS (1 << 24)
1307#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
1308# define LC_LINK_WIDTH_SHIFT 0
1309# define LC_LINK_WIDTH_MASK 0x7
1310# define LC_LINK_WIDTH_X0 0
1311# define LC_LINK_WIDTH_X1 1
1312# define LC_LINK_WIDTH_X2 2
1313# define LC_LINK_WIDTH_X4 3
1314# define LC_LINK_WIDTH_X8 4
1315# define LC_LINK_WIDTH_X16 6
1316# define LC_LINK_WIDTH_RD_SHIFT 4
1317# define LC_LINK_WIDTH_RD_MASK 0x70
1318# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
1319# define LC_RECONFIG_NOW (1 << 8)
1320# define LC_RENEGOTIATION_SUPPORT (1 << 9)
1321# define LC_RENEGOTIATE_EN (1 << 10)
1322# define LC_SHORT_RECONFIG_EN (1 << 11)
1323# define LC_UPCONFIGURE_SUPPORT (1 << 12)
1324# define LC_UPCONFIGURE_DIS (1 << 13)
1325# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
1326# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
1327# define LC_DYN_LANES_PWR_STATE_SHIFT 21
1328#define PCIE_LC_N_FTS_CNTL 0xa3 /* PCIE_P */
1329# define LC_XMIT_N_FTS(x) ((x) << 0)
1330# define LC_XMIT_N_FTS_MASK (0xff << 0)
1331# define LC_XMIT_N_FTS_SHIFT 0
1332# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
1333# define LC_N_FTS_MASK (0xff << 24)
1334#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
1335# define LC_GEN2_EN_STRAP (1 << 0)
1336# define LC_GEN3_EN_STRAP (1 << 1)
1337# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
1338# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
1339# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
1340# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
1341# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
1342# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
1343# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
1344# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
1345# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
1346# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
1347# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
1348# define LC_CURRENT_DATA_RATE_SHIFT 13
1349# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
1350# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
1351# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
1352# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
1353# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
1354
1355#define PCIE_LC_CNTL2 0xb1
1356# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
1357# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
1358
1359#define PCIE_LC_CNTL3 0xb5 /* PCIE_P */
1360# define LC_GO_TO_RECOVERY (1 << 30)
1361#define PCIE_LC_CNTL4 0xb6 /* PCIE_P */
1362# define LC_REDO_EQ (1 << 5)
1363# define LC_SET_QUIESCE (1 << 13)
1364
832/* 1365/*
833 * UVD 1366 * UVD
834 */ 1367 */
@@ -838,6 +1371,21 @@
838#define UVD_RBC_RB_RPTR 0xF690 1371#define UVD_RBC_RB_RPTR 0xF690
839#define UVD_RBC_RB_WPTR 0xF694 1372#define UVD_RBC_RB_WPTR 0xF694
840 1373
1374#define UVD_CGC_CTRL 0xF4B0
1375# define DCM (1 << 0)
1376# define CG_DT(x) ((x) << 2)
1377# define CG_DT_MASK (0xf << 2)
1378# define CLK_OD(x) ((x) << 6)
1379# define CLK_OD_MASK (0x1f << 6)
1380
1381 /* UVD CTX indirect */
1382#define UVD_CGC_MEM_CTRL 0xC0
1383#define UVD_CGC_CTRL2 0xC1
1384# define DYN_OR_EN (1 << 0)
1385# define DYN_RR_EN (1 << 1)
1386# define G_DIV_ID(x) ((x) << 2)
1387# define G_DIV_ID_MASK (0x7 << 2)
1388
841/* 1389/*
842 * PM4 1390 * PM4
843 */ 1391 */
@@ -1082,6 +1630,11 @@
1082# define DMA_IDLE (1 << 0) 1630# define DMA_IDLE (1 << 0)
1083#define DMA_TILING_CONFIG 0xd0b8 1631#define DMA_TILING_CONFIG 0xd0b8
1084 1632
1633#define DMA_PG 0xd0d4
1634# define PG_CNTL_ENABLE (1 << 0)
1635#define DMA_PGFSM_CONFIG 0xd0d8
1636#define DMA_PGFSM_WRITE 0xd0dc
1637
1085#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \ 1638#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
1086 (((b) & 0x1) << 26) | \ 1639 (((b) & 0x1) << 26) | \
1087 (((t) & 0x1) << 23) | \ 1640 (((t) & 0x1) << 23) | \
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
new file mode 100644
index 000000000000..5578e9837026
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -0,0 +1,397 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef PP_SISLANDS_SMC_H
24#define PP_SISLANDS_SMC_H
25
26#include "ppsmc.h"
27
28#pragma pack(push, 1)
29
30#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
31
32struct PP_SIslands_Dpm2PerfLevel
33{
34 uint8_t MaxPS;
35 uint8_t TgtAct;
36 uint8_t MaxPS_StepInc;
37 uint8_t MaxPS_StepDec;
38 uint8_t PSSamplingTime;
39 uint8_t NearTDPDec;
40 uint8_t AboveSafeInc;
41 uint8_t BelowSafeInc;
42 uint8_t PSDeltaLimit;
43 uint8_t PSDeltaWin;
44 uint16_t PwrEfficiencyRatio;
45 uint8_t Reserved[4];
46};
47
48typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel;
49
50struct PP_SIslands_DPM2Status
51{
52 uint32_t dpm2Flags;
53 uint8_t CurrPSkip;
54 uint8_t CurrPSkipPowerShift;
55 uint8_t CurrPSkipTDP;
56 uint8_t CurrPSkipOCP;
57 uint8_t MaxSPLLIndex;
58 uint8_t MinSPLLIndex;
59 uint8_t CurrSPLLIndex;
60 uint8_t InfSweepMode;
61 uint8_t InfSweepDir;
62 uint8_t TDPexceeded;
63 uint8_t reserved;
64 uint8_t SwitchDownThreshold;
65 uint32_t SwitchDownCounter;
66 uint32_t SysScalingFactor;
67};
68
69typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status;
70
71struct PP_SIslands_DPM2Parameters
72{
73 uint32_t TDPLimit;
74 uint32_t NearTDPLimit;
75 uint32_t SafePowerLimit;
76 uint32_t PowerBoostLimit;
77 uint32_t MinLimitDelta;
78};
79typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters;
80
81struct PP_SIslands_PAPMStatus
82{
83 uint32_t EstimatedDGPU_T;
84 uint32_t EstimatedDGPU_P;
85 uint32_t EstimatedAPU_T;
86 uint32_t EstimatedAPU_P;
87 uint8_t dGPU_T_Limit_Exceeded;
88 uint8_t reserved[3];
89};
90typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
91
92struct PP_SIslands_PAPMParameters
93{
94 uint32_t NearTDPLimitTherm;
95 uint32_t NearTDPLimitPAPM;
96 uint32_t PlatformPowerLimit;
97 uint32_t dGPU_T_Limit;
98 uint32_t dGPU_T_Warning;
99 uint32_t dGPU_T_Hysteresis;
100};
101typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
102
103struct SISLANDS_SMC_SCLK_VALUE
104{
105 uint32_t vCG_SPLL_FUNC_CNTL;
106 uint32_t vCG_SPLL_FUNC_CNTL_2;
107 uint32_t vCG_SPLL_FUNC_CNTL_3;
108 uint32_t vCG_SPLL_FUNC_CNTL_4;
109 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
110 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
111 uint32_t sclk_value;
112};
113
114typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
115
116struct SISLANDS_SMC_MCLK_VALUE
117{
118 uint32_t vMPLL_FUNC_CNTL;
119 uint32_t vMPLL_FUNC_CNTL_1;
120 uint32_t vMPLL_FUNC_CNTL_2;
121 uint32_t vMPLL_AD_FUNC_CNTL;
122 uint32_t vMPLL_DQ_FUNC_CNTL;
123 uint32_t vMCLK_PWRMGT_CNTL;
124 uint32_t vDLL_CNTL;
125 uint32_t vMPLL_SS;
126 uint32_t vMPLL_SS2;
127 uint32_t mclk_value;
128};
129
130typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
131
132struct SISLANDS_SMC_VOLTAGE_VALUE
133{
134 uint16_t value;
135 uint8_t index;
136 uint8_t phase_settings;
137};
138
139typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
140
141struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
142{
143 uint8_t ACIndex;
144 uint8_t displayWatermark;
145 uint8_t gen2PCIE;
146 uint8_t UVDWatermark;
147 uint8_t VCEWatermark;
148 uint8_t strobeMode;
149 uint8_t mcFlags;
150 uint8_t padding;
151 uint32_t aT;
152 uint32_t bSP;
153 SISLANDS_SMC_SCLK_VALUE sclk;
154 SISLANDS_SMC_MCLK_VALUE mclk;
155 SISLANDS_SMC_VOLTAGE_VALUE vddc;
156 SISLANDS_SMC_VOLTAGE_VALUE mvdd;
157 SISLANDS_SMC_VOLTAGE_VALUE vddci;
158 SISLANDS_SMC_VOLTAGE_VALUE std_vddc;
159 uint8_t hysteresisUp;
160 uint8_t hysteresisDown;
161 uint8_t stateFlags;
162 uint8_t arbRefreshState;
163 uint32_t SQPowerThrottle;
164 uint32_t SQPowerThrottle_2;
165 uint32_t MaxPoweredUpCU;
166 SISLANDS_SMC_VOLTAGE_VALUE high_temp_vddc;
167 SISLANDS_SMC_VOLTAGE_VALUE low_temp_vddc;
168 uint32_t reserved[2];
169 PP_SIslands_Dpm2PerfLevel dpm2;
170};
171
172#define SISLANDS_SMC_STROBE_RATIO 0x0F
173#define SISLANDS_SMC_STROBE_ENABLE 0x10
174
175#define SISLANDS_SMC_MC_EDC_RD_FLAG 0x01
176#define SISLANDS_SMC_MC_EDC_WR_FLAG 0x02
177#define SISLANDS_SMC_MC_RTT_ENABLE 0x04
178#define SISLANDS_SMC_MC_STUTTER_EN 0x08
179#define SISLANDS_SMC_MC_PG_EN 0x10
180
181typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
182
183struct SISLANDS_SMC_SWSTATE
184{
185 uint8_t flags;
186 uint8_t levelCount;
187 uint8_t padding2;
188 uint8_t padding3;
189 SISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1];
190};
191
192typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
193
194#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
195#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
196#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
197#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
198
199struct SISLANDS_SMC_VOLTAGEMASKTABLE
200{
201 uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
202};
203
204typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
205
206#define SISLANDS_MAX_NO_VREG_STEPS 32
207
208struct SISLANDS_SMC_STATETABLE
209{
210 uint8_t thermalProtectType;
211 uint8_t systemFlags;
212 uint8_t maxVDDCIndexInPPTable;
213 uint8_t extraFlags;
214 uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
215 SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
216 SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
217 PP_SIslands_DPM2Parameters dpm2Params;
218 SISLANDS_SMC_SWSTATE initialState;
219 SISLANDS_SMC_SWSTATE ACPIState;
220 SISLANDS_SMC_SWSTATE ULVState;
221 SISLANDS_SMC_SWSTATE driverState;
222 SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
223};
224
225typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
226
227#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0
228#define SI_SMC_SOFT_REGISTER_delay_vreg 0xC
229#define SI_SMC_SOFT_REGISTER_delay_acpi 0x28
230#define SI_SMC_SOFT_REGISTER_seq_index 0x5C
231#define SI_SMC_SOFT_REGISTER_mvdd_chg_time 0x60
232#define SI_SMC_SOFT_REGISTER_mclk_switch_lim 0x70
233#define SI_SMC_SOFT_REGISTER_watermark_threshold 0x78
234#define SI_SMC_SOFT_REGISTER_phase_shedding_delay 0x88
235#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay 0x8C
236#define SI_SMC_SOFT_REGISTER_mc_block_delay 0x98
237#define SI_SMC_SOFT_REGISTER_ticks_per_us 0xA8
238#define SI_SMC_SOFT_REGISTER_crtc_index 0xC4
239#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8
240#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC
241#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4
242#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC
243#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100
244
245#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
246#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
247
248#define SMC_SISLANDS_SCALE_I 7
249#define SMC_SISLANDS_SCALE_R 12
250
251struct PP_SIslands_CacConfig
252{
253 uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
254 uint32_t lkge_lut_V0;
255 uint32_t lkge_lut_Vstep;
256 uint32_t WinTime;
257 uint32_t R_LL;
258 uint32_t calculation_repeats;
259 uint32_t l2numWin_TDP;
260 uint32_t dc_cac;
261 uint8_t lts_truncate_n;
262 uint8_t SHIFT_N;
263 uint8_t log2_PG_LKG_SCALE;
264 uint8_t cac_temp;
265 uint32_t lkge_lut_T0;
266 uint32_t lkge_lut_Tstep;
267};
268
269typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
270
271#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
272#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
273
274struct SMC_SIslands_MCRegisterAddress
275{
276 uint16_t s0;
277 uint16_t s1;
278};
279
280typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
281
282struct SMC_SIslands_MCRegisterSet
283{
284 uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
285};
286
287typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
288
289struct SMC_SIslands_MCRegisters
290{
291 uint8_t last;
292 uint8_t reserved[3];
293 SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
294 SMC_SIslands_MCRegisterSet data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
295};
296
297typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
298
299struct SMC_SIslands_MCArbDramTimingRegisterSet
300{
301 uint32_t mc_arb_dram_timing;
302 uint32_t mc_arb_dram_timing2;
303 uint8_t mc_arb_rfsh_rate;
304 uint8_t mc_arb_burst_time;
305 uint8_t padding[2];
306};
307
308typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
309
310struct SMC_SIslands_MCArbDramTimingRegisters
311{
312 uint8_t arb_current;
313 uint8_t reserved[3];
314 SMC_SIslands_MCArbDramTimingRegisterSet data[16];
315};
316
317typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
318
319struct SMC_SISLANDS_SPLL_DIV_TABLE
320{
321 uint32_t freq[256];
322 uint32_t ss[256];
323};
324
325#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK 0x01ffffff
326#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
327#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK 0xfe000000
328#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT 25
329#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK 0x000fffff
330#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT 0
331#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK 0xfff00000
332#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT 20
333
334typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
335
336#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5
337
338#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
339
340struct Smc_SIslands_DTE_Configuration
341{
342 uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
343 uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
344 uint32_t K;
345 uint32_t T0;
346 uint32_t MaxT;
347 uint8_t WindowSize;
348 uint8_t Tdep_count;
349 uint8_t temp_select;
350 uint8_t DTE_mode;
351 uint8_t T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
352 uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
353 uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
354 uint32_t Tthreshold;
355};
356
357typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
358
359#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1
360
361#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000
362
363#define SISLANDS_SMC_FIRMWARE_HEADER_version 0x0
364#define SISLANDS_SMC_FIRMWARE_HEADER_flags 0x4
365#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters 0xC
366#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable 0x10
367#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable 0x14
368#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable 0x18
369#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable 0x24
370#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30
371#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable 0x38
372#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration 0x40
373#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters 0x48
374
375#pragma pack(pop)
376
377int si_set_smc_sram_address(struct radeon_device *rdev,
378 u32 smc_address, u32 limit);
379int si_copy_bytes_to_smc(struct radeon_device *rdev,
380 u32 smc_start_address,
381 const u8 *src, u32 byte_count, u32 limit);
382void si_start_smc(struct radeon_device *rdev);
383void si_reset_smc(struct radeon_device *rdev);
384int si_program_jump_on_start(struct radeon_device *rdev);
385void si_stop_smc_clock(struct radeon_device *rdev);
386void si_start_smc_clock(struct radeon_device *rdev);
387bool si_is_smc_running(struct radeon_device *rdev);
388PPSMC_Result si_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
389PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev);
390int si_load_smc_ucode(struct radeon_device *rdev, u32 limit);
391int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
392 u32 *value, u32 limit);
393int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
394 u32 value, u32 limit);
395
396#endif
397
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
new file mode 100644
index 000000000000..dc599060a9a4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -0,0 +1,1832 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "sumod.h"
27#include "r600_dpm.h"
28#include "cypress_dpm.h"
29#include "sumo_dpm.h"
30#include <linux/seq_file.h>
31
32#define SUMO_MAX_DEEPSLEEP_DIVIDER_ID 5
33#define SUMO_MINIMUM_ENGINE_CLOCK 800
34#define BOOST_DPM_LEVEL 7
35
36static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] =
37{
38 SUMO_UTC_DFLT_00,
39 SUMO_UTC_DFLT_01,
40 SUMO_UTC_DFLT_02,
41 SUMO_UTC_DFLT_03,
42 SUMO_UTC_DFLT_04,
43 SUMO_UTC_DFLT_05,
44 SUMO_UTC_DFLT_06,
45 SUMO_UTC_DFLT_07,
46 SUMO_UTC_DFLT_08,
47 SUMO_UTC_DFLT_09,
48 SUMO_UTC_DFLT_10,
49 SUMO_UTC_DFLT_11,
50 SUMO_UTC_DFLT_12,
51 SUMO_UTC_DFLT_13,
52 SUMO_UTC_DFLT_14,
53};
54
55static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] =
56{
57 SUMO_DTC_DFLT_00,
58 SUMO_DTC_DFLT_01,
59 SUMO_DTC_DFLT_02,
60 SUMO_DTC_DFLT_03,
61 SUMO_DTC_DFLT_04,
62 SUMO_DTC_DFLT_05,
63 SUMO_DTC_DFLT_06,
64 SUMO_DTC_DFLT_07,
65 SUMO_DTC_DFLT_08,
66 SUMO_DTC_DFLT_09,
67 SUMO_DTC_DFLT_10,
68 SUMO_DTC_DFLT_11,
69 SUMO_DTC_DFLT_12,
70 SUMO_DTC_DFLT_13,
71 SUMO_DTC_DFLT_14,
72};
73
74struct sumo_ps *sumo_get_ps(struct radeon_ps *rps)
75{
76 struct sumo_ps *ps = rps->ps_priv;
77
78 return ps;
79}
80
81struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev)
82{
83 struct sumo_power_info *pi = rdev->pm.dpm.priv;
84
85 return pi;
86}
87
88static void sumo_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
89{
90 if (enable)
91 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
92 else {
93 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
94 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
95 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
96 RREG32(GB_ADDR_CONFIG);
97 }
98}
99
100#define CGCG_CGTT_LOCAL0_MASK 0xE5BFFFFF
101#define CGCG_CGTT_LOCAL1_MASK 0xEFFF07FF
102
103static void sumo_mg_clockgating_enable(struct radeon_device *rdev, bool enable)
104{
105 u32 local0;
106 u32 local1;
107
108 local0 = RREG32(CG_CGTT_LOCAL_0);
109 local1 = RREG32(CG_CGTT_LOCAL_1);
110
111 if (enable) {
112 WREG32(CG_CGTT_LOCAL_0, (0 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
113 WREG32(CG_CGTT_LOCAL_1, (0 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
114 } else {
115 WREG32(CG_CGTT_LOCAL_0, (0xFFFFFFFF & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
116 WREG32(CG_CGTT_LOCAL_1, (0xFFFFCFFF & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
117 }
118}
119
120static void sumo_program_git(struct radeon_device *rdev)
121{
122 u32 p, u;
123 u32 xclk = radeon_get_xclk(rdev);
124
125 r600_calculate_u_and_p(SUMO_GICST_DFLT,
126 xclk, 16, &p, &u);
127
128 WREG32_P(CG_GIT, CG_GICST(p), ~CG_GICST_MASK);
129}
130
131static void sumo_program_grsd(struct radeon_device *rdev)
132{
133 u32 p, u;
134 u32 xclk = radeon_get_xclk(rdev);
135 u32 grs = 256 * 25 / 100;
136
137 r600_calculate_u_and_p(1, xclk, 14, &p, &u);
138
139 WREG32(CG_GCOOR, PHC(grs) | SDC(p) | SU(u));
140}
141
142void sumo_gfx_clockgating_initialize(struct radeon_device *rdev)
143{
144 sumo_program_git(rdev);
145 sumo_program_grsd(rdev);
146}
147
148static void sumo_gfx_powergating_initialize(struct radeon_device *rdev)
149{
150 u32 rcu_pwr_gating_cntl;
151 u32 p, u;
152 u32 p_c, p_p, d_p;
153 u32 r_t, i_t;
154 u32 xclk = radeon_get_xclk(rdev);
155
156 if (rdev->family == CHIP_PALM) {
157 p_c = 4;
158 d_p = 10;
159 r_t = 10;
160 i_t = 4;
161 p_p = 50 + 1000/200 + 6 * 32;
162 } else {
163 p_c = 16;
164 d_p = 50;
165 r_t = 50;
166 i_t = 50;
167 p_p = 113;
168 }
169
170 WREG32(CG_SCRATCH2, 0x01B60A17);
171
172 r600_calculate_u_and_p(SUMO_GFXPOWERGATINGT_DFLT,
173 xclk, 16, &p, &u);
174
175 WREG32_P(CG_PWR_GATING_CNTL, PGP(p) | PGU(u),
176 ~(PGP_MASK | PGU_MASK));
177
178 r600_calculate_u_and_p(SUMO_VOLTAGEDROPT_DFLT,
179 xclk, 16, &p, &u);
180
181 WREG32_P(CG_CG_VOLTAGE_CNTL, PGP(p) | PGU(u),
182 ~(PGP_MASK | PGU_MASK));
183
184 if (rdev->family == CHIP_PALM) {
185 WREG32_RCU(RCU_PWR_GATING_SEQ0, 0x10103210);
186 WREG32_RCU(RCU_PWR_GATING_SEQ1, 0x10101010);
187 } else {
188 WREG32_RCU(RCU_PWR_GATING_SEQ0, 0x76543210);
189 WREG32_RCU(RCU_PWR_GATING_SEQ1, 0xFEDCBA98);
190 }
191
192 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL);
193 rcu_pwr_gating_cntl &=
194 ~(RSVD_MASK | PCV_MASK | PGS_MASK);
195 rcu_pwr_gating_cntl |= PCV(p_c) | PGS(1) | PWR_GATING_EN;
196 if (rdev->family == CHIP_PALM) {
197 rcu_pwr_gating_cntl &= ~PCP_MASK;
198 rcu_pwr_gating_cntl |= PCP(0x77);
199 }
200 WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl);
201
202 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2);
203 rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK);
204 rcu_pwr_gating_cntl |= MPPU(p_p) | MPPD(50);
205 WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl);
206
207 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3);
208 rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK);
209 rcu_pwr_gating_cntl |= DPPU(d_p) | DPPD(50);
210 WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl);
211
212 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_4);
213 rcu_pwr_gating_cntl &= ~(RT_MASK | IT_MASK);
214 rcu_pwr_gating_cntl |= RT(r_t) | IT(i_t);
215 WREG32_RCU(RCU_PWR_GATING_CNTL_4, rcu_pwr_gating_cntl);
216
217 if (rdev->family == CHIP_PALM)
218 WREG32_RCU(RCU_PWR_GATING_CNTL_5, 0xA02);
219
220 sumo_smu_pg_init(rdev);
221
222 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL);
223 rcu_pwr_gating_cntl &=
224 ~(RSVD_MASK | PCV_MASK | PGS_MASK);
225 rcu_pwr_gating_cntl |= PCV(p_c) | PGS(4) | PWR_GATING_EN;
226 if (rdev->family == CHIP_PALM) {
227 rcu_pwr_gating_cntl &= ~PCP_MASK;
228 rcu_pwr_gating_cntl |= PCP(0x77);
229 }
230 WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl);
231
232 if (rdev->family == CHIP_PALM) {
233 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2);
234 rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK);
235 rcu_pwr_gating_cntl |= MPPU(113) | MPPD(50);
236 WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl);
237
238 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3);
239 rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK);
240 rcu_pwr_gating_cntl |= DPPU(16) | DPPD(50);
241 WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl);
242 }
243
244 sumo_smu_pg_init(rdev);
245
246 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL);
247 rcu_pwr_gating_cntl &=
248 ~(RSVD_MASK | PCV_MASK | PGS_MASK);
249 rcu_pwr_gating_cntl |= PGS(5) | PWR_GATING_EN;
250
251 if (rdev->family == CHIP_PALM) {
252 rcu_pwr_gating_cntl |= PCV(4);
253 rcu_pwr_gating_cntl &= ~PCP_MASK;
254 rcu_pwr_gating_cntl |= PCP(0x77);
255 } else
256 rcu_pwr_gating_cntl |= PCV(11);
257 WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl);
258
259 if (rdev->family == CHIP_PALM) {
260 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2);
261 rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK);
262 rcu_pwr_gating_cntl |= MPPU(113) | MPPD(50);
263 WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl);
264
265 rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3);
266 rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK);
267 rcu_pwr_gating_cntl |= DPPU(22) | DPPD(50);
268 WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl);
269 }
270
271 sumo_smu_pg_init(rdev);
272}
273
274static void sumo_gfx_powergating_enable(struct radeon_device *rdev, bool enable)
275{
276 if (enable)
277 WREG32_P(CG_PWR_GATING_CNTL, DYN_PWR_DOWN_EN, ~DYN_PWR_DOWN_EN);
278 else {
279 WREG32_P(CG_PWR_GATING_CNTL, 0, ~DYN_PWR_DOWN_EN);
280 RREG32(GB_ADDR_CONFIG);
281 }
282}
283
284static int sumo_enable_clock_power_gating(struct radeon_device *rdev)
285{
286 struct sumo_power_info *pi = sumo_get_pi(rdev);
287
288 if (pi->enable_gfx_clock_gating)
289 sumo_gfx_clockgating_initialize(rdev);
290 if (pi->enable_gfx_power_gating)
291 sumo_gfx_powergating_initialize(rdev);
292 if (pi->enable_mg_clock_gating)
293 sumo_mg_clockgating_enable(rdev, true);
294 if (pi->enable_gfx_clock_gating)
295 sumo_gfx_clockgating_enable(rdev, true);
296 if (pi->enable_gfx_power_gating)
297 sumo_gfx_powergating_enable(rdev, true);
298
299 return 0;
300}
301
302static void sumo_disable_clock_power_gating(struct radeon_device *rdev)
303{
304 struct sumo_power_info *pi = sumo_get_pi(rdev);
305
306 if (pi->enable_gfx_clock_gating)
307 sumo_gfx_clockgating_enable(rdev, false);
308 if (pi->enable_gfx_power_gating)
309 sumo_gfx_powergating_enable(rdev, false);
310 if (pi->enable_mg_clock_gating)
311 sumo_mg_clockgating_enable(rdev, false);
312}
313
314static void sumo_calculate_bsp(struct radeon_device *rdev,
315 u32 high_clk)
316{
317 struct sumo_power_info *pi = sumo_get_pi(rdev);
318 u32 xclk = radeon_get_xclk(rdev);
319
320 pi->pasi = 65535 * 100 / high_clk;
321 pi->asi = 65535 * 100 / high_clk;
322
323 r600_calculate_u_and_p(pi->asi,
324 xclk, 16, &pi->bsp, &pi->bsu);
325
326 r600_calculate_u_and_p(pi->pasi,
327 xclk, 16, &pi->pbsp, &pi->pbsu);
328
329 pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
330 pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
331}
332
333static void sumo_init_bsp(struct radeon_device *rdev)
334{
335 struct sumo_power_info *pi = sumo_get_pi(rdev);
336
337 WREG32(CG_BSP_0, pi->psp);
338}
339
340
341static void sumo_program_bsp(struct radeon_device *rdev,
342 struct radeon_ps *rps)
343{
344 struct sumo_power_info *pi = sumo_get_pi(rdev);
345 struct sumo_ps *ps = sumo_get_ps(rps);
346 u32 i;
347 u32 highest_engine_clock = ps->levels[ps->num_levels - 1].sclk;
348
349 if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
350 highest_engine_clock = pi->boost_pl.sclk;
351
352 sumo_calculate_bsp(rdev, highest_engine_clock);
353
354 for (i = 0; i < ps->num_levels - 1; i++)
355 WREG32(CG_BSP_0 + (i * 4), pi->dsp);
356
357 WREG32(CG_BSP_0 + (i * 4), pi->psp);
358
359 if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
360 WREG32(CG_BSP_0 + (BOOST_DPM_LEVEL * 4), pi->psp);
361}
362
363static void sumo_write_at(struct radeon_device *rdev,
364 u32 index, u32 value)
365{
366 if (index == 0)
367 WREG32(CG_AT_0, value);
368 else if (index == 1)
369 WREG32(CG_AT_1, value);
370 else if (index == 2)
371 WREG32(CG_AT_2, value);
372 else if (index == 3)
373 WREG32(CG_AT_3, value);
374 else if (index == 4)
375 WREG32(CG_AT_4, value);
376 else if (index == 5)
377 WREG32(CG_AT_5, value);
378 else if (index == 6)
379 WREG32(CG_AT_6, value);
380 else if (index == 7)
381 WREG32(CG_AT_7, value);
382}
383
384static void sumo_program_at(struct radeon_device *rdev,
385 struct radeon_ps *rps)
386{
387 struct sumo_power_info *pi = sumo_get_pi(rdev);
388 struct sumo_ps *ps = sumo_get_ps(rps);
389 u32 asi;
390 u32 i;
391 u32 m_a;
392 u32 a_t;
393 u32 r[SUMO_MAX_HARDWARE_POWERLEVELS];
394 u32 l[SUMO_MAX_HARDWARE_POWERLEVELS];
395
396 r[0] = SUMO_R_DFLT0;
397 r[1] = SUMO_R_DFLT1;
398 r[2] = SUMO_R_DFLT2;
399 r[3] = SUMO_R_DFLT3;
400 r[4] = SUMO_R_DFLT4;
401
402 l[0] = SUMO_L_DFLT0;
403 l[1] = SUMO_L_DFLT1;
404 l[2] = SUMO_L_DFLT2;
405 l[3] = SUMO_L_DFLT3;
406 l[4] = SUMO_L_DFLT4;
407
408 for (i = 0; i < ps->num_levels; i++) {
409 asi = (i == ps->num_levels - 1) ? pi->pasi : pi->asi;
410
411 m_a = asi * ps->levels[i].sclk / 100;
412
413 a_t = CG_R(m_a * r[i] / 100) | CG_L(m_a * l[i] / 100);
414
415 sumo_write_at(rdev, i, a_t);
416 }
417
418 if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) {
419 asi = pi->pasi;
420
421 m_a = asi * pi->boost_pl.sclk / 100;
422
423 a_t = CG_R(m_a * r[ps->num_levels - 1] / 100) |
424 CG_L(m_a * l[ps->num_levels - 1] / 100);
425
426 sumo_write_at(rdev, BOOST_DPM_LEVEL, a_t);
427 }
428}
429
430static void sumo_program_tp(struct radeon_device *rdev)
431{
432 int i;
433 enum r600_td td = R600_TD_DFLT;
434
435 for (i = 0; i < SUMO_PM_NUMBER_OF_TC; i++) {
436 WREG32_P(CG_FFCT_0 + (i * 4), UTC_0(sumo_utc[i]), ~UTC_0_MASK);
437 WREG32_P(CG_FFCT_0 + (i * 4), DTC_0(sumo_dtc[i]), ~DTC_0_MASK);
438 }
439
440 if (td == R600_TD_AUTO)
441 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
442 else
443 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
444
445 if (td == R600_TD_UP)
446 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
447
448 if (td == R600_TD_DOWN)
449 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
450}
451
452void sumo_program_vc(struct radeon_device *rdev, u32 vrc)
453{
454 WREG32(CG_FTV, vrc);
455}
456
457void sumo_clear_vc(struct radeon_device *rdev)
458{
459 WREG32(CG_FTV, 0);
460}
461
462void sumo_program_sstp(struct radeon_device *rdev)
463{
464 u32 p, u;
465 u32 xclk = radeon_get_xclk(rdev);
466
467 r600_calculate_u_and_p(SUMO_SST_DFLT,
468 xclk, 16, &p, &u);
469
470 WREG32(CG_SSP, SSTU(u) | SST(p));
471}
472
473static void sumo_set_divider_value(struct radeon_device *rdev,
474 u32 index, u32 divider)
475{
476 u32 reg_index = index / 4;
477 u32 field_index = index % 4;
478
479 if (field_index == 0)
480 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
481 SCLK_FSTATE_0_DIV(divider), ~SCLK_FSTATE_0_DIV_MASK);
482 else if (field_index == 1)
483 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
484 SCLK_FSTATE_1_DIV(divider), ~SCLK_FSTATE_1_DIV_MASK);
485 else if (field_index == 2)
486 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
487 SCLK_FSTATE_2_DIV(divider), ~SCLK_FSTATE_2_DIV_MASK);
488 else if (field_index == 3)
489 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
490 SCLK_FSTATE_3_DIV(divider), ~SCLK_FSTATE_3_DIV_MASK);
491}
492
493static void sumo_set_ds_dividers(struct radeon_device *rdev,
494 u32 index, u32 divider)
495{
496 struct sumo_power_info *pi = sumo_get_pi(rdev);
497
498 if (pi->enable_sclk_ds) {
499 u32 dpm_ctrl = RREG32(CG_SCLK_DPM_CTRL_6);
500
501 dpm_ctrl &= ~(0x7 << (index * 3));
502 dpm_ctrl |= (divider << (index * 3));
503 WREG32(CG_SCLK_DPM_CTRL_6, dpm_ctrl);
504 }
505}
506
507static void sumo_set_ss_dividers(struct radeon_device *rdev,
508 u32 index, u32 divider)
509{
510 struct sumo_power_info *pi = sumo_get_pi(rdev);
511
512 if (pi->enable_sclk_ds) {
513 u32 dpm_ctrl = RREG32(CG_SCLK_DPM_CTRL_11);
514
515 dpm_ctrl &= ~(0x7 << (index * 3));
516 dpm_ctrl |= (divider << (index * 3));
517 WREG32(CG_SCLK_DPM_CTRL_11, dpm_ctrl);
518 }
519}
520
521static void sumo_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
522{
523 u32 voltage_cntl = RREG32(CG_DPM_VOLTAGE_CNTL);
524
525 voltage_cntl &= ~(DPM_STATE0_LEVEL_MASK << (index * 2));
526 voltage_cntl |= (vid << (DPM_STATE0_LEVEL_SHIFT + index * 2));
527 WREG32(CG_DPM_VOLTAGE_CNTL, voltage_cntl);
528}
529
530static void sumo_set_allos_gnb_slow(struct radeon_device *rdev, u32 index, u32 gnb_slow)
531{
532 struct sumo_power_info *pi = sumo_get_pi(rdev);
533 u32 temp = gnb_slow;
534 u32 cg_sclk_dpm_ctrl_3;
535
536 if (pi->driver_nbps_policy_disable)
537 temp = 1;
538
539 cg_sclk_dpm_ctrl_3 = RREG32(CG_SCLK_DPM_CTRL_3);
540 cg_sclk_dpm_ctrl_3 &= ~(GNB_SLOW_FSTATE_0_MASK << index);
541 cg_sclk_dpm_ctrl_3 |= (temp << (GNB_SLOW_FSTATE_0_SHIFT + index));
542
543 WREG32(CG_SCLK_DPM_CTRL_3, cg_sclk_dpm_ctrl_3);
544}
545
546static void sumo_program_power_level(struct radeon_device *rdev,
547 struct sumo_pl *pl, u32 index)
548{
549 struct sumo_power_info *pi = sumo_get_pi(rdev);
550 int ret;
551 struct atom_clock_dividers dividers;
552 u32 ds_en = RREG32(DEEP_SLEEP_CNTL) & ENABLE_DS;
553
554 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
555 pl->sclk, false, &dividers);
556 if (ret)
557 return;
558
559 sumo_set_divider_value(rdev, index, dividers.post_div);
560
561 sumo_set_vid(rdev, index, pl->vddc_index);
562
563 if (pl->ss_divider_index == 0 || pl->ds_divider_index == 0) {
564 if (ds_en)
565 WREG32_P(DEEP_SLEEP_CNTL, 0, ~ENABLE_DS);
566 } else {
567 sumo_set_ss_dividers(rdev, index, pl->ss_divider_index);
568 sumo_set_ds_dividers(rdev, index, pl->ds_divider_index);
569
570 if (!ds_en)
571 WREG32_P(DEEP_SLEEP_CNTL, ENABLE_DS, ~ENABLE_DS);
572 }
573
574 sumo_set_allos_gnb_slow(rdev, index, pl->allow_gnb_slow);
575
576 if (pi->enable_boost)
577 sumo_set_tdp_limit(rdev, index, pl->sclk_dpm_tdp_limit);
578}
579
580static void sumo_power_level_enable(struct radeon_device *rdev, u32 index, bool enable)
581{
582 u32 reg_index = index / 4;
583 u32 field_index = index % 4;
584
585 if (field_index == 0)
586 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
587 enable ? SCLK_FSTATE_0_VLD : 0, ~SCLK_FSTATE_0_VLD);
588 else if (field_index == 1)
589 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
590 enable ? SCLK_FSTATE_1_VLD : 0, ~SCLK_FSTATE_1_VLD);
591 else if (field_index == 2)
592 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
593 enable ? SCLK_FSTATE_2_VLD : 0, ~SCLK_FSTATE_2_VLD);
594 else if (field_index == 3)
595 WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4),
596 enable ? SCLK_FSTATE_3_VLD : 0, ~SCLK_FSTATE_3_VLD);
597}
598
599static bool sumo_dpm_enabled(struct radeon_device *rdev)
600{
601 if (RREG32(CG_SCLK_DPM_CTRL_3) & DPM_SCLK_ENABLE)
602 return true;
603 else
604 return false;
605}
606
607static void sumo_start_dpm(struct radeon_device *rdev)
608{
609 WREG32_P(CG_SCLK_DPM_CTRL_3, DPM_SCLK_ENABLE, ~DPM_SCLK_ENABLE);
610}
611
612static void sumo_stop_dpm(struct radeon_device *rdev)
613{
614 WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~DPM_SCLK_ENABLE);
615}
616
617static void sumo_set_forced_mode(struct radeon_device *rdev, bool enable)
618{
619 if (enable)
620 WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_SCLK_STATE_EN, ~FORCE_SCLK_STATE_EN);
621 else
622 WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~FORCE_SCLK_STATE_EN);
623}
624
625static void sumo_set_forced_mode_enabled(struct radeon_device *rdev)
626{
627 int i;
628
629 sumo_set_forced_mode(rdev, true);
630 for (i = 0; i < rdev->usec_timeout; i++) {
631 if (RREG32(CG_SCLK_STATUS) & SCLK_OVERCLK_DETECT)
632 break;
633 udelay(1);
634 }
635}
636
637static void sumo_wait_for_level_0(struct radeon_device *rdev)
638{
639 int i;
640
641 for (i = 0; i < rdev->usec_timeout; i++) {
642 if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) == 0)
643 break;
644 udelay(1);
645 }
646 for (i = 0; i < rdev->usec_timeout; i++) {
647 if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) == 0)
648 break;
649 udelay(1);
650 }
651}
652
653static void sumo_set_forced_mode_disabled(struct radeon_device *rdev)
654{
655 sumo_set_forced_mode(rdev, false);
656}
657
658static void sumo_enable_power_level_0(struct radeon_device *rdev)
659{
660 sumo_power_level_enable(rdev, 0, true);
661}
662
663static void sumo_patch_boost_state(struct radeon_device *rdev,
664 struct radeon_ps *rps)
665{
666 struct sumo_power_info *pi = sumo_get_pi(rdev);
667 struct sumo_ps *new_ps = sumo_get_ps(rps);
668
669 if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) {
670 pi->boost_pl = new_ps->levels[new_ps->num_levels - 1];
671 pi->boost_pl.sclk = pi->sys_info.boost_sclk;
672 pi->boost_pl.vddc_index = pi->sys_info.boost_vid_2bit;
673 pi->boost_pl.sclk_dpm_tdp_limit = pi->sys_info.sclk_dpm_tdp_limit_boost;
674 }
675}
676
677static void sumo_pre_notify_alt_vddnb_change(struct radeon_device *rdev,
678 struct radeon_ps *new_rps,
679 struct radeon_ps *old_rps)
680{
681 struct sumo_ps *new_ps = sumo_get_ps(new_rps);
682 struct sumo_ps *old_ps = sumo_get_ps(old_rps);
683 u32 nbps1_old = 0;
684 u32 nbps1_new = 0;
685
686 if (old_ps != NULL)
687 nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0;
688
689 nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0;
690
691 if (nbps1_old == 1 && nbps1_new == 0)
692 sumo_smu_notify_alt_vddnb_change(rdev, 0, 0);
693}
694
695static void sumo_post_notify_alt_vddnb_change(struct radeon_device *rdev,
696 struct radeon_ps *new_rps,
697 struct radeon_ps *old_rps)
698{
699 struct sumo_ps *new_ps = sumo_get_ps(new_rps);
700 struct sumo_ps *old_ps = sumo_get_ps(old_rps);
701 u32 nbps1_old = 0;
702 u32 nbps1_new = 0;
703
704 if (old_ps != NULL)
705 nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0;
706
707 nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0;
708
709 if (nbps1_old == 0 && nbps1_new == 1)
710 sumo_smu_notify_alt_vddnb_change(rdev, 1, 1);
711}
712
713static void sumo_enable_boost(struct radeon_device *rdev,
714 struct radeon_ps *rps,
715 bool enable)
716{
717 struct sumo_ps *new_ps = sumo_get_ps(rps);
718
719 if (enable) {
720 if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
721 sumo_boost_state_enable(rdev, true);
722 } else
723 sumo_boost_state_enable(rdev, false);
724}
725
726static void sumo_set_forced_level(struct radeon_device *rdev, u32 index)
727{
728 WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_SCLK_STATE(index), ~FORCE_SCLK_STATE_MASK);
729}
730
731static void sumo_set_forced_level_0(struct radeon_device *rdev)
732{
733 sumo_set_forced_level(rdev, 0);
734}
735
736static void sumo_program_wl(struct radeon_device *rdev,
737 struct radeon_ps *rps)
738{
739 struct sumo_ps *new_ps = sumo_get_ps(rps);
740 u32 dpm_ctrl4 = RREG32(CG_SCLK_DPM_CTRL_4);
741
742 dpm_ctrl4 &= 0xFFFFFF00;
743 dpm_ctrl4 |= (1 << (new_ps->num_levels - 1));
744
745 if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
746 dpm_ctrl4 |= (1 << BOOST_DPM_LEVEL);
747
748 WREG32(CG_SCLK_DPM_CTRL_4, dpm_ctrl4);
749}
750
751static void sumo_program_power_levels_0_to_n(struct radeon_device *rdev,
752 struct radeon_ps *new_rps,
753 struct radeon_ps *old_rps)
754{
755 struct sumo_power_info *pi = sumo_get_pi(rdev);
756 struct sumo_ps *new_ps = sumo_get_ps(new_rps);
757 struct sumo_ps *old_ps = sumo_get_ps(old_rps);
758 u32 i;
759 u32 n_current_state_levels = (old_ps == NULL) ? 1 : old_ps->num_levels;
760
761 for (i = 0; i < new_ps->num_levels; i++) {
762 sumo_program_power_level(rdev, &new_ps->levels[i], i);
763 sumo_power_level_enable(rdev, i, true);
764 }
765
766 for (i = new_ps->num_levels; i < n_current_state_levels; i++)
767 sumo_power_level_enable(rdev, i, false);
768
769 if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
770 sumo_program_power_level(rdev, &pi->boost_pl, BOOST_DPM_LEVEL);
771}
772
773static void sumo_enable_acpi_pm(struct radeon_device *rdev)
774{
775 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
776}
777
778static void sumo_program_power_level_enter_state(struct radeon_device *rdev)
779{
780 WREG32_P(CG_SCLK_DPM_CTRL_5, SCLK_FSTATE_BOOTUP(0), ~SCLK_FSTATE_BOOTUP_MASK);
781}
782
783static void sumo_program_acpi_power_level(struct radeon_device *rdev)
784{
785 struct sumo_power_info *pi = sumo_get_pi(rdev);
786 struct atom_clock_dividers dividers;
787 int ret;
788
789 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
790 pi->acpi_pl.sclk,
791 false, &dividers);
792 if (ret)
793 return;
794
795 WREG32_P(CG_ACPI_CNTL, SCLK_ACPI_DIV(dividers.post_div), ~SCLK_ACPI_DIV_MASK);
796 WREG32_P(CG_ACPI_VOLTAGE_CNTL, 0, ~ACPI_VOLTAGE_EN);
797}
798
799static void sumo_program_bootup_state(struct radeon_device *rdev)
800{
801 struct sumo_power_info *pi = sumo_get_pi(rdev);
802 u32 dpm_ctrl4 = RREG32(CG_SCLK_DPM_CTRL_4);
803 u32 i;
804
805 sumo_program_power_level(rdev, &pi->boot_pl, 0);
806
807 dpm_ctrl4 &= 0xFFFFFF00;
808 WREG32(CG_SCLK_DPM_CTRL_4, dpm_ctrl4);
809
810 for (i = 1; i < 8; i++)
811 sumo_power_level_enable(rdev, i, false);
812}
813
814static void sumo_setup_uvd_clocks(struct radeon_device *rdev,
815 struct radeon_ps *new_rps,
816 struct radeon_ps *old_rps)
817{
818 struct sumo_power_info *pi = sumo_get_pi(rdev);
819
820 if (pi->enable_gfx_power_gating) {
821 sumo_gfx_powergating_enable(rdev, false);
822 }
823
824 radeon_set_uvd_clocks(rdev, new_rps->vclk, new_rps->dclk);
825
826 if (pi->enable_gfx_power_gating) {
827 if (!pi->disable_gfx_power_gating_in_uvd ||
828 !r600_is_uvd_state(new_rps->class, new_rps->class2))
829 sumo_gfx_powergating_enable(rdev, true);
830 }
831}
832
833static void sumo_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
834 struct radeon_ps *new_rps,
835 struct radeon_ps *old_rps)
836{
837 struct sumo_ps *new_ps = sumo_get_ps(new_rps);
838 struct sumo_ps *current_ps = sumo_get_ps(old_rps);
839
840 if ((new_rps->vclk == old_rps->vclk) &&
841 (new_rps->dclk == old_rps->dclk))
842 return;
843
844 if (new_ps->levels[new_ps->num_levels - 1].sclk >=
845 current_ps->levels[current_ps->num_levels - 1].sclk)
846 return;
847
848 sumo_setup_uvd_clocks(rdev, new_rps, old_rps);
849}
850
851static void sumo_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
852 struct radeon_ps *new_rps,
853 struct radeon_ps *old_rps)
854{
855 struct sumo_ps *new_ps = sumo_get_ps(new_rps);
856 struct sumo_ps *current_ps = sumo_get_ps(old_rps);
857
858 if ((new_rps->vclk == old_rps->vclk) &&
859 (new_rps->dclk == old_rps->dclk))
860 return;
861
862 if (new_ps->levels[new_ps->num_levels - 1].sclk <
863 current_ps->levels[current_ps->num_levels - 1].sclk)
864 return;
865
866 sumo_setup_uvd_clocks(rdev, new_rps, old_rps);
867}
868
869void sumo_take_smu_control(struct radeon_device *rdev, bool enable)
870{
871/* This bit selects who handles display phy powergating.
872 * Clear the bit to let atom handle it.
873 * Set it to let the driver handle it.
874 * For now we just let atom handle it.
875 */
876#if 0
877 u32 v = RREG32(DOUT_SCRATCH3);
878
879 if (enable)
880 v |= 0x4;
881 else
882 v &= 0xFFFFFFFB;
883
884 WREG32(DOUT_SCRATCH3, v);
885#endif
886}
887
888static void sumo_enable_sclk_ds(struct radeon_device *rdev, bool enable)
889{
890 if (enable) {
891 u32 deep_sleep_cntl = RREG32(DEEP_SLEEP_CNTL);
892 u32 deep_sleep_cntl2 = RREG32(DEEP_SLEEP_CNTL2);
893 u32 t = 1;
894
895 deep_sleep_cntl &= ~R_DIS;
896 deep_sleep_cntl &= ~HS_MASK;
897 deep_sleep_cntl |= HS(t > 4095 ? 4095 : t);
898
899 deep_sleep_cntl2 |= LB_UFP_EN;
900 deep_sleep_cntl2 &= INOUT_C_MASK;
901 deep_sleep_cntl2 |= INOUT_C(0xf);
902
903 WREG32(DEEP_SLEEP_CNTL2, deep_sleep_cntl2);
904 WREG32(DEEP_SLEEP_CNTL, deep_sleep_cntl);
905 } else
906 WREG32_P(DEEP_SLEEP_CNTL, 0, ~ENABLE_DS);
907}
908
909static void sumo_program_bootup_at(struct radeon_device *rdev)
910{
911 WREG32_P(CG_AT_0, CG_R(0xffff), ~CG_R_MASK);
912 WREG32_P(CG_AT_0, CG_L(0), ~CG_L_MASK);
913}
914
915static void sumo_reset_am(struct radeon_device *rdev)
916{
917 WREG32_P(SCLK_PWRMGT_CNTL, FIR_RESET, ~FIR_RESET);
918}
919
920static void sumo_start_am(struct radeon_device *rdev)
921{
922 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_RESET);
923}
924
925static void sumo_program_ttp(struct radeon_device *rdev)
926{
927 u32 xclk = radeon_get_xclk(rdev);
928 u32 p, u;
929 u32 cg_sclk_dpm_ctrl_5 = RREG32(CG_SCLK_DPM_CTRL_5);
930
931 r600_calculate_u_and_p(1000,
932 xclk, 16, &p, &u);
933
934 cg_sclk_dpm_ctrl_5 &= ~(TT_TP_MASK | TT_TU_MASK);
935 cg_sclk_dpm_ctrl_5 |= TT_TP(p) | TT_TU(u);
936
937 WREG32(CG_SCLK_DPM_CTRL_5, cg_sclk_dpm_ctrl_5);
938}
939
940static void sumo_program_ttt(struct radeon_device *rdev)
941{
942 u32 cg_sclk_dpm_ctrl_3 = RREG32(CG_SCLK_DPM_CTRL_3);
943 struct sumo_power_info *pi = sumo_get_pi(rdev);
944
945 cg_sclk_dpm_ctrl_3 &= ~(GNB_TT_MASK | GNB_THERMTHRO_MASK);
946 cg_sclk_dpm_ctrl_3 |= GNB_TT(pi->thermal_auto_throttling + 49);
947
948 WREG32(CG_SCLK_DPM_CTRL_3, cg_sclk_dpm_ctrl_3);
949}
950
951
952static void sumo_enable_voltage_scaling(struct radeon_device *rdev, bool enable)
953{
954 if (enable) {
955 WREG32_P(CG_DPM_VOLTAGE_CNTL, DPM_VOLTAGE_EN, ~DPM_VOLTAGE_EN);
956 WREG32_P(CG_CG_VOLTAGE_CNTL, 0, ~CG_VOLTAGE_EN);
957 } else {
958 WREG32_P(CG_CG_VOLTAGE_CNTL, CG_VOLTAGE_EN, ~CG_VOLTAGE_EN);
959 WREG32_P(CG_DPM_VOLTAGE_CNTL, 0, ~DPM_VOLTAGE_EN);
960 }
961}
962
963static void sumo_override_cnb_thermal_events(struct radeon_device *rdev)
964{
965 WREG32_P(CG_SCLK_DPM_CTRL_3, CNB_THERMTHRO_MASK_SCLK,
966 ~CNB_THERMTHRO_MASK_SCLK);
967}
968
969static void sumo_program_dc_hto(struct radeon_device *rdev)
970{
971 u32 cg_sclk_dpm_ctrl_4 = RREG32(CG_SCLK_DPM_CTRL_4);
972 u32 p, u;
973 u32 xclk = radeon_get_xclk(rdev);
974
975 r600_calculate_u_and_p(100000,
976 xclk, 14, &p, &u);
977
978 cg_sclk_dpm_ctrl_4 &= ~(DC_HDC_MASK | DC_HU_MASK);
979 cg_sclk_dpm_ctrl_4 |= DC_HDC(p) | DC_HU(u);
980
981 WREG32(CG_SCLK_DPM_CTRL_4, cg_sclk_dpm_ctrl_4);
982}
983
984static void sumo_force_nbp_state(struct radeon_device *rdev,
985 struct radeon_ps *rps)
986{
987 struct sumo_power_info *pi = sumo_get_pi(rdev);
988 struct sumo_ps *new_ps = sumo_get_ps(rps);
989
990 if (!pi->driver_nbps_policy_disable) {
991 if (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)
992 WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_NB_PSTATE_1, ~FORCE_NB_PSTATE_1);
993 else
994 WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~FORCE_NB_PSTATE_1);
995 }
996}
997
998u32 sumo_get_sleep_divider_from_id(u32 id)
999{
1000 return 1 << id;
1001}
1002
1003u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1004 u32 sclk,
1005 u32 min_sclk_in_sr)
1006{
1007 struct sumo_power_info *pi = sumo_get_pi(rdev);
1008 u32 i;
1009 u32 temp;
1010 u32 min = (min_sclk_in_sr > SUMO_MINIMUM_ENGINE_CLOCK) ?
1011 min_sclk_in_sr : SUMO_MINIMUM_ENGINE_CLOCK;
1012
1013 if (sclk < min)
1014 return 0;
1015
1016 if (!pi->enable_sclk_ds)
1017 return 0;
1018
1019 for (i = SUMO_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
1020 temp = sclk / sumo_get_sleep_divider_from_id(i);
1021
1022 if (temp >= min || i == 0)
1023 break;
1024 }
1025 return i;
1026}
1027
1028static u32 sumo_get_valid_engine_clock(struct radeon_device *rdev,
1029 u32 lower_limit)
1030{
1031 struct sumo_power_info *pi = sumo_get_pi(rdev);
1032 u32 i;
1033
1034 for (i = 0; i < pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries; i++) {
1035 if (pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency >= lower_limit)
1036 return pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency;
1037 }
1038
1039 return pi->sys_info.sclk_voltage_mapping_table.entries[pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1].sclk_frequency;
1040}
1041
1042static void sumo_patch_thermal_state(struct radeon_device *rdev,
1043 struct sumo_ps *ps,
1044 struct sumo_ps *current_ps)
1045{
1046 struct sumo_power_info *pi = sumo_get_pi(rdev);
1047 u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */
1048 u32 current_vddc;
1049 u32 current_sclk;
1050 u32 current_index = 0;
1051
1052 if (current_ps) {
1053 current_vddc = current_ps->levels[current_index].vddc_index;
1054 current_sclk = current_ps->levels[current_index].sclk;
1055 } else {
1056 current_vddc = pi->boot_pl.vddc_index;
1057 current_sclk = pi->boot_pl.sclk;
1058 }
1059
1060 ps->levels[0].vddc_index = current_vddc;
1061
1062 if (ps->levels[0].sclk > current_sclk)
1063 ps->levels[0].sclk = current_sclk;
1064
1065 ps->levels[0].ss_divider_index =
1066 sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[0].sclk, sclk_in_sr);
1067
1068 ps->levels[0].ds_divider_index =
1069 sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[0].sclk, SUMO_MINIMUM_ENGINE_CLOCK);
1070
1071 if (ps->levels[0].ds_divider_index > ps->levels[0].ss_divider_index + 1)
1072 ps->levels[0].ds_divider_index = ps->levels[0].ss_divider_index + 1;
1073
1074 if (ps->levels[0].ss_divider_index == ps->levels[0].ds_divider_index) {
1075 if (ps->levels[0].ss_divider_index > 1)
1076 ps->levels[0].ss_divider_index = ps->levels[0].ss_divider_index - 1;
1077 }
1078
1079 if (ps->levels[0].ss_divider_index == 0)
1080 ps->levels[0].ds_divider_index = 0;
1081
1082 if (ps->levels[0].ds_divider_index == 0)
1083 ps->levels[0].ss_divider_index = 0;
1084}
1085
1086static void sumo_apply_state_adjust_rules(struct radeon_device *rdev,
1087 struct radeon_ps *new_rps,
1088 struct radeon_ps *old_rps)
1089{
1090 struct sumo_ps *ps = sumo_get_ps(new_rps);
1091 struct sumo_ps *current_ps = sumo_get_ps(old_rps);
1092 struct sumo_power_info *pi = sumo_get_pi(rdev);
1093 u32 min_voltage = 0; /* ??? */
1094 u32 min_sclk = pi->sys_info.min_sclk; /* XXX check against disp reqs */
1095 u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */
1096 u32 i;
1097
1098 if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1099 return sumo_patch_thermal_state(rdev, ps, current_ps);
1100
1101 if (pi->enable_boost) {
1102 if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE)
1103 ps->flags |= SUMO_POWERSTATE_FLAGS_BOOST_STATE;
1104 }
1105
1106 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) ||
1107 (new_rps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ||
1108 (new_rps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE))
1109 ps->flags |= SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE;
1110
1111 for (i = 0; i < ps->num_levels; i++) {
1112 if (ps->levels[i].vddc_index < min_voltage)
1113 ps->levels[i].vddc_index = min_voltage;
1114
1115 if (ps->levels[i].sclk < min_sclk)
1116 ps->levels[i].sclk =
1117 sumo_get_valid_engine_clock(rdev, min_sclk);
1118
1119 ps->levels[i].ss_divider_index =
1120 sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[i].sclk, sclk_in_sr);
1121
1122 ps->levels[i].ds_divider_index =
1123 sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[i].sclk, SUMO_MINIMUM_ENGINE_CLOCK);
1124
1125 if (ps->levels[i].ds_divider_index > ps->levels[i].ss_divider_index + 1)
1126 ps->levels[i].ds_divider_index = ps->levels[i].ss_divider_index + 1;
1127
1128 if (ps->levels[i].ss_divider_index == ps->levels[i].ds_divider_index) {
1129 if (ps->levels[i].ss_divider_index > 1)
1130 ps->levels[i].ss_divider_index = ps->levels[i].ss_divider_index - 1;
1131 }
1132
1133 if (ps->levels[i].ss_divider_index == 0)
1134 ps->levels[i].ds_divider_index = 0;
1135
1136 if (ps->levels[i].ds_divider_index == 0)
1137 ps->levels[i].ss_divider_index = 0;
1138
1139 if (ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)
1140 ps->levels[i].allow_gnb_slow = 1;
1141 else if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ||
1142 (new_rps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC))
1143 ps->levels[i].allow_gnb_slow = 0;
1144 else if (i == ps->num_levels - 1)
1145 ps->levels[i].allow_gnb_slow = 0;
1146 else
1147 ps->levels[i].allow_gnb_slow = 1;
1148 }
1149}
1150
1151static void sumo_cleanup_asic(struct radeon_device *rdev)
1152{
1153 sumo_take_smu_control(rdev, false);
1154}
1155
1156static int sumo_set_thermal_temperature_range(struct radeon_device *rdev,
1157 int min_temp, int max_temp)
1158{
1159 int low_temp = 0 * 1000;
1160 int high_temp = 255 * 1000;
1161
1162 if (low_temp < min_temp)
1163 low_temp = min_temp;
1164 if (high_temp > max_temp)
1165 high_temp = max_temp;
1166 if (high_temp < low_temp) {
1167 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1168 return -EINVAL;
1169 }
1170
1171 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(49 + (high_temp / 1000)), ~DIG_THERM_INTH_MASK);
1172 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(49 + (low_temp / 1000)), ~DIG_THERM_INTL_MASK);
1173
1174 rdev->pm.dpm.thermal.min_temp = low_temp;
1175 rdev->pm.dpm.thermal.max_temp = high_temp;
1176
1177 return 0;
1178}
1179
1180static void sumo_update_current_ps(struct radeon_device *rdev,
1181 struct radeon_ps *rps)
1182{
1183 struct sumo_ps *new_ps = sumo_get_ps(rps);
1184 struct sumo_power_info *pi = sumo_get_pi(rdev);
1185
1186 pi->current_rps = *rps;
1187 pi->current_ps = *new_ps;
1188 pi->current_rps.ps_priv = &pi->current_ps;
1189}
1190
1191static void sumo_update_requested_ps(struct radeon_device *rdev,
1192 struct radeon_ps *rps)
1193{
1194 struct sumo_ps *new_ps = sumo_get_ps(rps);
1195 struct sumo_power_info *pi = sumo_get_pi(rdev);
1196
1197 pi->requested_rps = *rps;
1198 pi->requested_ps = *new_ps;
1199 pi->requested_rps.ps_priv = &pi->requested_ps;
1200}
1201
1202int sumo_dpm_enable(struct radeon_device *rdev)
1203{
1204 struct sumo_power_info *pi = sumo_get_pi(rdev);
1205 int ret;
1206
1207 if (sumo_dpm_enabled(rdev))
1208 return -EINVAL;
1209
1210 ret = sumo_enable_clock_power_gating(rdev);
1211 if (ret)
1212 return ret;
1213 sumo_program_bootup_state(rdev);
1214 sumo_init_bsp(rdev);
1215 sumo_reset_am(rdev);
1216 sumo_program_tp(rdev);
1217 sumo_program_bootup_at(rdev);
1218 sumo_start_am(rdev);
1219 if (pi->enable_auto_thermal_throttling) {
1220 sumo_program_ttp(rdev);
1221 sumo_program_ttt(rdev);
1222 }
1223 sumo_program_dc_hto(rdev);
1224 sumo_program_power_level_enter_state(rdev);
1225 sumo_enable_voltage_scaling(rdev, true);
1226 sumo_program_sstp(rdev);
1227 sumo_program_vc(rdev, SUMO_VRC_DFLT);
1228 sumo_override_cnb_thermal_events(rdev);
1229 sumo_start_dpm(rdev);
1230 sumo_wait_for_level_0(rdev);
1231 if (pi->enable_sclk_ds)
1232 sumo_enable_sclk_ds(rdev, true);
1233 if (pi->enable_boost)
1234 sumo_enable_boost_timer(rdev);
1235
1236 if (rdev->irq.installed &&
1237 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1238 ret = sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1239 if (ret)
1240 return ret;
1241 rdev->irq.dpm_thermal = true;
1242 radeon_irq_set(rdev);
1243 }
1244
1245 sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1246
1247 return 0;
1248}
1249
1250void sumo_dpm_disable(struct radeon_device *rdev)
1251{
1252 struct sumo_power_info *pi = sumo_get_pi(rdev);
1253
1254 if (!sumo_dpm_enabled(rdev))
1255 return;
1256 sumo_disable_clock_power_gating(rdev);
1257 if (pi->enable_sclk_ds)
1258 sumo_enable_sclk_ds(rdev, false);
1259 sumo_clear_vc(rdev);
1260 sumo_wait_for_level_0(rdev);
1261 sumo_stop_dpm(rdev);
1262 sumo_enable_voltage_scaling(rdev, false);
1263
1264 if (rdev->irq.installed &&
1265 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1266 rdev->irq.dpm_thermal = false;
1267 radeon_irq_set(rdev);
1268 }
1269
1270 sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1271}
1272
1273int sumo_dpm_pre_set_power_state(struct radeon_device *rdev)
1274{
1275 struct sumo_power_info *pi = sumo_get_pi(rdev);
1276 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
1277 struct radeon_ps *new_ps = &requested_ps;
1278
1279 sumo_update_requested_ps(rdev, new_ps);
1280
1281 if (pi->enable_dynamic_patch_ps)
1282 sumo_apply_state_adjust_rules(rdev,
1283 &pi->requested_rps,
1284 &pi->current_rps);
1285
1286 return 0;
1287}
1288
1289int sumo_dpm_set_power_state(struct radeon_device *rdev)
1290{
1291 struct sumo_power_info *pi = sumo_get_pi(rdev);
1292 struct radeon_ps *new_ps = &pi->requested_rps;
1293 struct radeon_ps *old_ps = &pi->current_rps;
1294
1295 if (pi->enable_dpm)
1296 sumo_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1297 if (pi->enable_boost) {
1298 sumo_enable_boost(rdev, new_ps, false);
1299 sumo_patch_boost_state(rdev, new_ps);
1300 }
1301 if (pi->enable_dpm) {
1302 sumo_pre_notify_alt_vddnb_change(rdev, new_ps, old_ps);
1303 sumo_enable_power_level_0(rdev);
1304 sumo_set_forced_level_0(rdev);
1305 sumo_set_forced_mode_enabled(rdev);
1306 sumo_wait_for_level_0(rdev);
1307 sumo_program_power_levels_0_to_n(rdev, new_ps, old_ps);
1308 sumo_program_wl(rdev, new_ps);
1309 sumo_program_bsp(rdev, new_ps);
1310 sumo_program_at(rdev, new_ps);
1311 sumo_force_nbp_state(rdev, new_ps);
1312 sumo_set_forced_mode_disabled(rdev);
1313 sumo_set_forced_mode_enabled(rdev);
1314 sumo_set_forced_mode_disabled(rdev);
1315 sumo_post_notify_alt_vddnb_change(rdev, new_ps, old_ps);
1316 }
1317 if (pi->enable_boost)
1318 sumo_enable_boost(rdev, new_ps, true);
1319 if (pi->enable_dpm)
1320 sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1321
1322 return 0;
1323}
1324
1325void sumo_dpm_post_set_power_state(struct radeon_device *rdev)
1326{
1327 struct sumo_power_info *pi = sumo_get_pi(rdev);
1328 struct radeon_ps *new_ps = &pi->requested_rps;
1329
1330 sumo_update_current_ps(rdev, new_ps);
1331}
1332
1333void sumo_dpm_reset_asic(struct radeon_device *rdev)
1334{
1335 sumo_program_bootup_state(rdev);
1336 sumo_enable_power_level_0(rdev);
1337 sumo_set_forced_level_0(rdev);
1338 sumo_set_forced_mode_enabled(rdev);
1339 sumo_wait_for_level_0(rdev);
1340 sumo_set_forced_mode_disabled(rdev);
1341 sumo_set_forced_mode_enabled(rdev);
1342 sumo_set_forced_mode_disabled(rdev);
1343}
1344
1345void sumo_dpm_setup_asic(struct radeon_device *rdev)
1346{
1347 struct sumo_power_info *pi = sumo_get_pi(rdev);
1348
1349 sumo_initialize_m3_arb(rdev);
1350 pi->fw_version = sumo_get_running_fw_version(rdev);
1351 DRM_INFO("Found smc ucode version: 0x%08x\n", pi->fw_version);
1352 sumo_program_acpi_power_level(rdev);
1353 sumo_enable_acpi_pm(rdev);
1354 sumo_take_smu_control(rdev, true);
1355}
1356
1357void sumo_dpm_display_configuration_changed(struct radeon_device *rdev)
1358{
1359
1360}
1361
1362union power_info {
1363 struct _ATOM_POWERPLAY_INFO info;
1364 struct _ATOM_POWERPLAY_INFO_V2 info_2;
1365 struct _ATOM_POWERPLAY_INFO_V3 info_3;
1366 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
1367 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
1368 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
1369};
1370
1371union pplib_clock_info {
1372 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
1373 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
1374 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
1375 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
1376};
1377
1378union pplib_power_state {
1379 struct _ATOM_PPLIB_STATE v1;
1380 struct _ATOM_PPLIB_STATE_V2 v2;
1381};
1382
1383static void sumo_patch_boot_state(struct radeon_device *rdev,
1384 struct sumo_ps *ps)
1385{
1386 struct sumo_power_info *pi = sumo_get_pi(rdev);
1387
1388 ps->num_levels = 1;
1389 ps->flags = 0;
1390 ps->levels[0] = pi->boot_pl;
1391}
1392
1393static void sumo_parse_pplib_non_clock_info(struct radeon_device *rdev,
1394 struct radeon_ps *rps,
1395 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
1396 u8 table_rev)
1397{
1398 struct sumo_ps *ps = sumo_get_ps(rps);
1399
1400 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
1401 rps->class = le16_to_cpu(non_clock_info->usClassification);
1402 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
1403
1404 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
1405 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
1406 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
1407 } else {
1408 rps->vclk = 0;
1409 rps->dclk = 0;
1410 }
1411
1412 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
1413 rdev->pm.dpm.boot_ps = rps;
1414 sumo_patch_boot_state(rdev, ps);
1415 }
1416 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
1417 rdev->pm.dpm.uvd_ps = rps;
1418}
1419
1420static void sumo_parse_pplib_clock_info(struct radeon_device *rdev,
1421 struct radeon_ps *rps, int index,
1422 union pplib_clock_info *clock_info)
1423{
1424 struct sumo_power_info *pi = sumo_get_pi(rdev);
1425 struct sumo_ps *ps = sumo_get_ps(rps);
1426 struct sumo_pl *pl = &ps->levels[index];
1427 u32 sclk;
1428
1429 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
1430 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
1431 pl->sclk = sclk;
1432 pl->vddc_index = clock_info->sumo.vddcIndex;
1433 pl->sclk_dpm_tdp_limit = clock_info->sumo.tdpLimit;
1434
1435 ps->num_levels = index + 1;
1436
1437 if (pi->enable_sclk_ds) {
1438 pl->ds_divider_index = 5;
1439 pl->ss_divider_index = 4;
1440 }
1441}
1442
1443static int sumo_parse_power_table(struct radeon_device *rdev)
1444{
1445 struct radeon_mode_info *mode_info = &rdev->mode_info;
1446 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
1447 union pplib_power_state *power_state;
1448 int i, j, k, non_clock_array_index, clock_array_index;
1449 union pplib_clock_info *clock_info;
1450 struct _StateArray *state_array;
1451 struct _ClockInfoArray *clock_info_array;
1452 struct _NonClockInfoArray *non_clock_info_array;
1453 union power_info *power_info;
1454 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
1455 u16 data_offset;
1456 u8 frev, crev;
1457 u8 *power_state_offset;
1458 struct sumo_ps *ps;
1459
1460 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
1461 &frev, &crev, &data_offset))
1462 return -EINVAL;
1463 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1464
1465 state_array = (struct _StateArray *)
1466 (mode_info->atom_context->bios + data_offset +
1467 le16_to_cpu(power_info->pplib.usStateArrayOffset));
1468 clock_info_array = (struct _ClockInfoArray *)
1469 (mode_info->atom_context->bios + data_offset +
1470 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
1471 non_clock_info_array = (struct _NonClockInfoArray *)
1472 (mode_info->atom_context->bios + data_offset +
1473 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
1474
1475 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
1476 state_array->ucNumEntries, GFP_KERNEL);
1477 if (!rdev->pm.dpm.ps)
1478 return -ENOMEM;
1479 power_state_offset = (u8 *)state_array->states;
1480 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
1481 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
1482 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
1483 for (i = 0; i < state_array->ucNumEntries; i++) {
1484 power_state = (union pplib_power_state *)power_state_offset;
1485 non_clock_array_index = power_state->v2.nonClockInfoIndex;
1486 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
1487 &non_clock_info_array->nonClockInfo[non_clock_array_index];
1488 if (!rdev->pm.power_state[i].clock_info)
1489 return -EINVAL;
1490 ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
1491 if (ps == NULL) {
1492 kfree(rdev->pm.dpm.ps);
1493 return -ENOMEM;
1494 }
1495 rdev->pm.dpm.ps[i].ps_priv = ps;
1496 k = 0;
1497 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
1498 clock_array_index = power_state->v2.clockInfoIndex[j];
1499 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
1500 break;
1501 clock_info = (union pplib_clock_info *)
1502 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
1503 sumo_parse_pplib_clock_info(rdev,
1504 &rdev->pm.dpm.ps[i], k,
1505 clock_info);
1506 k++;
1507 }
1508 sumo_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
1509 non_clock_info,
1510 non_clock_info_array->ucEntrySize);
1511 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
1512 }
1513 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
1514 return 0;
1515}
1516
1517u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
1518 struct sumo_vid_mapping_table *vid_mapping_table,
1519 u32 vid_2bit)
1520{
1521 u32 i;
1522
1523 for (i = 0; i < vid_mapping_table->num_entries; i++) {
1524 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
1525 return vid_mapping_table->entries[i].vid_7bit;
1526 }
1527
1528 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
1529}
1530
1531static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev,
1532 u32 vid_2bit)
1533{
1534 struct sumo_power_info *pi = sumo_get_pi(rdev);
1535 u32 vid_7bit = sumo_convert_vid2_to_vid7(rdev, &pi->sys_info.vid_mapping_table, vid_2bit);
1536
1537 if (vid_7bit > 0x7C)
1538 return 0;
1539
1540 return (15500 - vid_7bit * 125 + 5) / 10;
1541}
1542
1543static void sumo_construct_display_voltage_mapping_table(struct radeon_device *rdev,
1544 struct sumo_disp_clock_voltage_mapping_table *disp_clk_voltage_mapping_table,
1545 ATOM_CLK_VOLT_CAPABILITY *table)
1546{
1547 u32 i;
1548
1549 for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
1550 if (table[i].ulMaximumSupportedCLK == 0)
1551 break;
1552
1553 disp_clk_voltage_mapping_table->display_clock_frequency[i] =
1554 table[i].ulMaximumSupportedCLK;
1555 }
1556
1557 disp_clk_voltage_mapping_table->num_max_voltage_levels = i;
1558
1559 if (disp_clk_voltage_mapping_table->num_max_voltage_levels == 0) {
1560 disp_clk_voltage_mapping_table->display_clock_frequency[0] = 80000;
1561 disp_clk_voltage_mapping_table->num_max_voltage_levels = 1;
1562 }
1563}
1564
1565void sumo_construct_sclk_voltage_mapping_table(struct radeon_device *rdev,
1566 struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
1567 ATOM_AVAILABLE_SCLK_LIST *table)
1568{
1569 u32 i;
1570 u32 n = 0;
1571 u32 prev_sclk = 0;
1572
1573 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
1574 if (table[i].ulSupportedSCLK > prev_sclk) {
1575 sclk_voltage_mapping_table->entries[n].sclk_frequency =
1576 table[i].ulSupportedSCLK;
1577 sclk_voltage_mapping_table->entries[n].vid_2bit =
1578 table[i].usVoltageIndex;
1579 prev_sclk = table[i].ulSupportedSCLK;
1580 n++;
1581 }
1582 }
1583
1584 sclk_voltage_mapping_table->num_max_dpm_entries = n;
1585}
1586
1587void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
1588 struct sumo_vid_mapping_table *vid_mapping_table,
1589 ATOM_AVAILABLE_SCLK_LIST *table)
1590{
1591 u32 i, j;
1592
1593 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
1594 if (table[i].ulSupportedSCLK != 0) {
1595 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
1596 table[i].usVoltageID;
1597 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
1598 table[i].usVoltageIndex;
1599 }
1600 }
1601
1602 for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
1603 if (vid_mapping_table->entries[i].vid_7bit == 0) {
1604 for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {
1605 if (vid_mapping_table->entries[j].vid_7bit != 0) {
1606 vid_mapping_table->entries[i] =
1607 vid_mapping_table->entries[j];
1608 vid_mapping_table->entries[j].vid_7bit = 0;
1609 break;
1610 }
1611 }
1612
1613 if (j == SUMO_MAX_NUMBER_VOLTAGES)
1614 break;
1615 }
1616 }
1617
1618 vid_mapping_table->num_entries = i;
1619}
1620
1621union igp_info {
1622 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
1623 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
1624 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
1625 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
1626};
1627
1628static int sumo_parse_sys_info_table(struct radeon_device *rdev)
1629{
1630 struct sumo_power_info *pi = sumo_get_pi(rdev);
1631 struct radeon_mode_info *mode_info = &rdev->mode_info;
1632 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
1633 union igp_info *igp_info;
1634 u8 frev, crev;
1635 u16 data_offset;
1636 int i;
1637
1638 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1639 &frev, &crev, &data_offset)) {
1640 igp_info = (union igp_info *)(mode_info->atom_context->bios +
1641 data_offset);
1642
1643 if (crev != 6) {
1644 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
1645 return -EINVAL;
1646 }
1647 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_6.ulBootUpEngineClock);
1648 pi->sys_info.min_sclk = le32_to_cpu(igp_info->info_6.ulMinEngineClock);
1649 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_6.ulBootUpUMAClock);
1650 pi->sys_info.bootup_nb_voltage_index =
1651 le16_to_cpu(igp_info->info_6.usBootUpNBVoltage);
1652 if (igp_info->info_6.ucHtcTmpLmt == 0)
1653 pi->sys_info.htc_tmp_lmt = 203;
1654 else
1655 pi->sys_info.htc_tmp_lmt = igp_info->info_6.ucHtcTmpLmt;
1656 if (igp_info->info_6.ucHtcHystLmt == 0)
1657 pi->sys_info.htc_hyst_lmt = 5;
1658 else
1659 pi->sys_info.htc_hyst_lmt = igp_info->info_6.ucHtcHystLmt;
1660 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
1661 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
1662 }
1663 for (i = 0; i < NUMBER_OF_M3ARB_PARAM_SETS; i++) {
1664 pi->sys_info.csr_m3_arb_cntl_default[i] =
1665 le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_DEFAULT[i]);
1666 pi->sys_info.csr_m3_arb_cntl_uvd[i] =
1667 le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_UVD[i]);
1668 pi->sys_info.csr_m3_arb_cntl_fs3d[i] =
1669 le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_FS3D[i]);
1670 }
1671 pi->sys_info.sclk_dpm_boost_margin =
1672 le32_to_cpu(igp_info->info_6.SclkDpmBoostMargin);
1673 pi->sys_info.sclk_dpm_throttle_margin =
1674 le32_to_cpu(igp_info->info_6.SclkDpmThrottleMargin);
1675 pi->sys_info.sclk_dpm_tdp_limit_pg =
1676 le16_to_cpu(igp_info->info_6.SclkDpmTdpLimitPG);
1677 pi->sys_info.gnb_tdp_limit = le16_to_cpu(igp_info->info_6.GnbTdpLimit);
1678 pi->sys_info.sclk_dpm_tdp_limit_boost =
1679 le16_to_cpu(igp_info->info_6.SclkDpmTdpLimitBoost);
1680 pi->sys_info.boost_sclk = le32_to_cpu(igp_info->info_6.ulBoostEngineCLock);
1681 pi->sys_info.boost_vid_2bit = igp_info->info_6.ulBoostVid_2bit;
1682 if (igp_info->info_6.EnableBoost)
1683 pi->sys_info.enable_boost = true;
1684 else
1685 pi->sys_info.enable_boost = false;
1686 sumo_construct_display_voltage_mapping_table(rdev,
1687 &pi->sys_info.disp_clk_voltage_mapping_table,
1688 igp_info->info_6.sDISPCLK_Voltage);
1689 sumo_construct_sclk_voltage_mapping_table(rdev,
1690 &pi->sys_info.sclk_voltage_mapping_table,
1691 igp_info->info_6.sAvail_SCLK);
1692 sumo_construct_vid_mapping_table(rdev, &pi->sys_info.vid_mapping_table,
1693 igp_info->info_6.sAvail_SCLK);
1694
1695 }
1696 return 0;
1697}
1698
1699static void sumo_construct_boot_and_acpi_state(struct radeon_device *rdev)
1700{
1701 struct sumo_power_info *pi = sumo_get_pi(rdev);
1702
1703 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
1704 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
1705 pi->boot_pl.ds_divider_index = 0;
1706 pi->boot_pl.ss_divider_index = 0;
1707 pi->boot_pl.allow_gnb_slow = 1;
1708 pi->acpi_pl = pi->boot_pl;
1709 pi->current_ps.num_levels = 1;
1710 pi->current_ps.levels[0] = pi->boot_pl;
1711}
1712
1713int sumo_dpm_init(struct radeon_device *rdev)
1714{
1715 struct sumo_power_info *pi;
1716 u32 hw_rev = (RREG32(HW_REV) & ATI_REV_ID_MASK) >> ATI_REV_ID_SHIFT;
1717 int ret;
1718
1719 pi = kzalloc(sizeof(struct sumo_power_info), GFP_KERNEL);
1720 if (pi == NULL)
1721 return -ENOMEM;
1722 rdev->pm.dpm.priv = pi;
1723
1724 pi->driver_nbps_policy_disable = false;
1725 if ((rdev->family == CHIP_PALM) && (hw_rev < 3))
1726 pi->disable_gfx_power_gating_in_uvd = true;
1727 else
1728 pi->disable_gfx_power_gating_in_uvd = false;
1729 pi->enable_alt_vddnb = true;
1730 pi->enable_sclk_ds = true;
1731 pi->enable_dynamic_m3_arbiter = false;
1732 pi->enable_dynamic_patch_ps = true;
1733 pi->enable_gfx_power_gating = true;
1734 pi->enable_gfx_clock_gating = true;
1735 pi->enable_mg_clock_gating = true;
1736 pi->enable_auto_thermal_throttling = true;
1737
1738 ret = sumo_parse_sys_info_table(rdev);
1739 if (ret)
1740 return ret;
1741
1742 sumo_construct_boot_and_acpi_state(rdev);
1743
1744 ret = sumo_parse_power_table(rdev);
1745 if (ret)
1746 return ret;
1747
1748 pi->pasi = CYPRESS_HASI_DFLT;
1749 pi->asi = RV770_ASI_DFLT;
1750 pi->thermal_auto_throttling = pi->sys_info.htc_tmp_lmt;
1751 pi->enable_boost = pi->sys_info.enable_boost;
1752 pi->enable_dpm = true;
1753
1754 return 0;
1755}
1756
1757void sumo_dpm_print_power_state(struct radeon_device *rdev,
1758 struct radeon_ps *rps)
1759{
1760 int i;
1761 struct sumo_ps *ps = sumo_get_ps(rps);
1762
1763 r600_dpm_print_class_info(rps->class, rps->class2);
1764 r600_dpm_print_cap_info(rps->caps);
1765 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
1766 for (i = 0; i < ps->num_levels; i++) {
1767 struct sumo_pl *pl = &ps->levels[i];
1768 printk("\t\tpower level %d sclk: %u vddc: %u\n",
1769 i, pl->sclk,
1770 sumo_convert_voltage_index_to_value(rdev, pl->vddc_index));
1771 }
1772 r600_dpm_print_ps_status(rdev, rps);
1773}
1774
1775void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
1776 struct seq_file *m)
1777{
1778 struct sumo_power_info *pi = sumo_get_pi(rdev);
1779 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
1780 struct sumo_ps *ps = sumo_get_ps(rps);
1781 struct sumo_pl *pl;
1782 u32 current_index =
1783 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) >>
1784 CURR_INDEX_SHIFT;
1785
1786 if (current_index == BOOST_DPM_LEVEL) {
1787 pl = &pi->boost_pl;
1788 seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
1789 seq_printf(m, "power level %d sclk: %u vddc: %u\n",
1790 current_index, pl->sclk,
1791 sumo_convert_voltage_index_to_value(rdev, pl->vddc_index));
1792 } else if (current_index >= ps->num_levels) {
1793 seq_printf(m, "invalid dpm profile %d\n", current_index);
1794 } else {
1795 pl = &ps->levels[current_index];
1796 seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
1797 seq_printf(m, "power level %d sclk: %u vddc: %u\n",
1798 current_index, pl->sclk,
1799 sumo_convert_voltage_index_to_value(rdev, pl->vddc_index));
1800 }
1801}
1802
1803void sumo_dpm_fini(struct radeon_device *rdev)
1804{
1805 int i;
1806
1807 sumo_cleanup_asic(rdev); /* ??? */
1808
1809 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1810 kfree(rdev->pm.dpm.ps[i].ps_priv);
1811 }
1812 kfree(rdev->pm.dpm.ps);
1813 kfree(rdev->pm.dpm.priv);
1814}
1815
1816u32 sumo_dpm_get_sclk(struct radeon_device *rdev, bool low)
1817{
1818 struct sumo_power_info *pi = sumo_get_pi(rdev);
1819 struct sumo_ps *requested_state = sumo_get_ps(&pi->requested_rps);
1820
1821 if (low)
1822 return requested_state->levels[0].sclk;
1823 else
1824 return requested_state->levels[requested_state->num_levels - 1].sclk;
1825}
1826
1827u32 sumo_dpm_get_mclk(struct radeon_device *rdev, bool low)
1828{
1829 struct sumo_power_info *pi = sumo_get_pi(rdev);
1830
1831 return pi->sys_info.bootup_uma_clk;
1832}
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.h b/drivers/gpu/drm/radeon/sumo_dpm.h
new file mode 100644
index 000000000000..07dda299c784
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sumo_dpm.h
@@ -0,0 +1,220 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __SUMO_DPM_H__
24#define __SUMO_DPM_H__
25
26#include "atom.h"
27
28#define SUMO_MAX_HARDWARE_POWERLEVELS 5
29#define SUMO_PM_NUMBER_OF_TC 15
30
31struct sumo_pl {
32 u32 sclk;
33 u32 vddc_index;
34 u32 ds_divider_index;
35 u32 ss_divider_index;
36 u32 allow_gnb_slow;
37 u32 sclk_dpm_tdp_limit;
38};
39
40/* used for the flags field */
41#define SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE (1 << 0)
42#define SUMO_POWERSTATE_FLAGS_BOOST_STATE (1 << 1)
43
44struct sumo_ps {
45 struct sumo_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
46 u32 num_levels;
47 /* flags */
48 u32 flags;
49};
50
51#define NUMBER_OF_M3ARB_PARAM_SETS 10
52#define SUMO_MAX_NUMBER_VOLTAGES 4
53
54struct sumo_disp_clock_voltage_mapping_table {
55 u32 num_max_voltage_levels;
56 u32 display_clock_frequency[SUMO_MAX_NUMBER_VOLTAGES];
57};
58
59struct sumo_vid_mapping_entry {
60 u16 vid_2bit;
61 u16 vid_7bit;
62};
63
64struct sumo_vid_mapping_table {
65 u32 num_entries;
66 struct sumo_vid_mapping_entry entries[SUMO_MAX_NUMBER_VOLTAGES];
67};
68
69struct sumo_sclk_voltage_mapping_entry {
70 u32 sclk_frequency;
71 u16 vid_2bit;
72 u16 rsv;
73};
74
75struct sumo_sclk_voltage_mapping_table {
76 u32 num_max_dpm_entries;
77 struct sumo_sclk_voltage_mapping_entry entries[SUMO_MAX_HARDWARE_POWERLEVELS];
78};
79
80struct sumo_sys_info {
81 u32 bootup_sclk;
82 u32 min_sclk;
83 u32 bootup_uma_clk;
84 u16 bootup_nb_voltage_index;
85 u8 htc_tmp_lmt;
86 u8 htc_hyst_lmt;
87 struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
88 struct sumo_disp_clock_voltage_mapping_table disp_clk_voltage_mapping_table;
89 struct sumo_vid_mapping_table vid_mapping_table;
90 u32 csr_m3_arb_cntl_default[NUMBER_OF_M3ARB_PARAM_SETS];
91 u32 csr_m3_arb_cntl_uvd[NUMBER_OF_M3ARB_PARAM_SETS];
92 u32 csr_m3_arb_cntl_fs3d[NUMBER_OF_M3ARB_PARAM_SETS];
93 u32 sclk_dpm_boost_margin;
94 u32 sclk_dpm_throttle_margin;
95 u32 sclk_dpm_tdp_limit_pg;
96 u32 gnb_tdp_limit;
97 u32 sclk_dpm_tdp_limit_boost;
98 u32 boost_sclk;
99 u32 boost_vid_2bit;
100 bool enable_boost;
101};
102
103struct sumo_power_info {
104 u32 asi;
105 u32 pasi;
106 u32 bsp;
107 u32 bsu;
108 u32 pbsp;
109 u32 pbsu;
110 u32 dsp;
111 u32 psp;
112 u32 thermal_auto_throttling;
113 u32 uvd_m3_arbiter;
114 u32 fw_version;
115 struct sumo_sys_info sys_info;
116 struct sumo_pl acpi_pl;
117 struct sumo_pl boot_pl;
118 struct sumo_pl boost_pl;
119 bool disable_gfx_power_gating_in_uvd;
120 bool driver_nbps_policy_disable;
121 bool enable_alt_vddnb;
122 bool enable_dynamic_m3_arbiter;
123 bool enable_gfx_clock_gating;
124 bool enable_gfx_power_gating;
125 bool enable_mg_clock_gating;
126 bool enable_sclk_ds;
127 bool enable_auto_thermal_throttling;
128 bool enable_dynamic_patch_ps;
129 bool enable_dpm;
130 bool enable_boost;
131 struct radeon_ps current_rps;
132 struct sumo_ps current_ps;
133 struct radeon_ps requested_rps;
134 struct sumo_ps requested_ps;
135};
136
137#define SUMO_UTC_DFLT_00 0x48
138#define SUMO_UTC_DFLT_01 0x44
139#define SUMO_UTC_DFLT_02 0x44
140#define SUMO_UTC_DFLT_03 0x44
141#define SUMO_UTC_DFLT_04 0x44
142#define SUMO_UTC_DFLT_05 0x44
143#define SUMO_UTC_DFLT_06 0x44
144#define SUMO_UTC_DFLT_07 0x44
145#define SUMO_UTC_DFLT_08 0x44
146#define SUMO_UTC_DFLT_09 0x44
147#define SUMO_UTC_DFLT_10 0x44
148#define SUMO_UTC_DFLT_11 0x44
149#define SUMO_UTC_DFLT_12 0x44
150#define SUMO_UTC_DFLT_13 0x44
151#define SUMO_UTC_DFLT_14 0x44
152
153#define SUMO_DTC_DFLT_00 0x48
154#define SUMO_DTC_DFLT_01 0x44
155#define SUMO_DTC_DFLT_02 0x44
156#define SUMO_DTC_DFLT_03 0x44
157#define SUMO_DTC_DFLT_04 0x44
158#define SUMO_DTC_DFLT_05 0x44
159#define SUMO_DTC_DFLT_06 0x44
160#define SUMO_DTC_DFLT_07 0x44
161#define SUMO_DTC_DFLT_08 0x44
162#define SUMO_DTC_DFLT_09 0x44
163#define SUMO_DTC_DFLT_10 0x44
164#define SUMO_DTC_DFLT_11 0x44
165#define SUMO_DTC_DFLT_12 0x44
166#define SUMO_DTC_DFLT_13 0x44
167#define SUMO_DTC_DFLT_14 0x44
168
169#define SUMO_AH_DFLT 5
170
171#define SUMO_R_DFLT0 70
172#define SUMO_R_DFLT1 70
173#define SUMO_R_DFLT2 70
174#define SUMO_R_DFLT3 70
175#define SUMO_R_DFLT4 100
176
177#define SUMO_L_DFLT0 0
178#define SUMO_L_DFLT1 20
179#define SUMO_L_DFLT2 20
180#define SUMO_L_DFLT3 20
181#define SUMO_L_DFLT4 20
182#define SUMO_VRC_DFLT 0x30033
183#define SUMO_MGCGTTLOCAL0_DFLT 0
184#define SUMO_MGCGTTLOCAL1_DFLT 0
185#define SUMO_GICST_DFLT 19
186#define SUMO_SST_DFLT 8
187#define SUMO_VOLTAGEDROPT_DFLT 1
188#define SUMO_GFXPOWERGATINGT_DFLT 100
189
190/* sumo_dpm.c */
191void sumo_gfx_clockgating_initialize(struct radeon_device *rdev);
192void sumo_program_vc(struct radeon_device *rdev, u32 vrc);
193void sumo_clear_vc(struct radeon_device *rdev);
194void sumo_program_sstp(struct radeon_device *rdev);
195void sumo_take_smu_control(struct radeon_device *rdev, bool enable);
196void sumo_construct_sclk_voltage_mapping_table(struct radeon_device *rdev,
197 struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
198 ATOM_AVAILABLE_SCLK_LIST *table);
199void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
200 struct sumo_vid_mapping_table *vid_mapping_table,
201 ATOM_AVAILABLE_SCLK_LIST *table);
202u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
203 struct sumo_vid_mapping_table *vid_mapping_table,
204 u32 vid_2bit);
205u32 sumo_get_sleep_divider_from_id(u32 id);
206u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
207 u32 sclk,
208 u32 min_sclk_in_sr);
209
210/* sumo_smc.c */
211void sumo_initialize_m3_arb(struct radeon_device *rdev);
212void sumo_smu_pg_init(struct radeon_device *rdev);
213void sumo_set_tdp_limit(struct radeon_device *rdev, u32 index, u32 tdp_limit);
214void sumo_smu_notify_alt_vddnb_change(struct radeon_device *rdev,
215 bool powersaving, bool force_nbps1);
216void sumo_boost_state_enable(struct radeon_device *rdev, bool enable);
217void sumo_enable_boost_timer(struct radeon_device *rdev);
218u32 sumo_get_running_fw_version(struct radeon_device *rdev);
219
220#endif
diff --git a/drivers/gpu/drm/radeon/sumo_smc.c b/drivers/gpu/drm/radeon/sumo_smc.c
new file mode 100644
index 000000000000..18abba5b5810
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sumo_smc.c
@@ -0,0 +1,222 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "sumod.h"
27#include "sumo_dpm.h"
28#include "ppsmc.h"
29
30#define SUMO_SMU_SERVICE_ROUTINE_PG_INIT 1
31#define SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY 27
32#define SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20 20
33
34struct sumo_ps *sumo_get_ps(struct radeon_ps *rps);
35struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev);
36
37static void sumo_send_msg_to_smu(struct radeon_device *rdev, u32 id)
38{
39 u32 gfx_int_req;
40 int i;
41
42 for (i = 0; i < rdev->usec_timeout; i++) {
43 if (RREG32(GFX_INT_STATUS) & INT_DONE)
44 break;
45 udelay(1);
46 }
47
48 gfx_int_req = SERV_INDEX(id) | INT_REQ;
49 WREG32(GFX_INT_REQ, gfx_int_req);
50
51 for (i = 0; i < rdev->usec_timeout; i++) {
52 if (RREG32(GFX_INT_REQ) & INT_REQ)
53 break;
54 udelay(1);
55 }
56
57 for (i = 0; i < rdev->usec_timeout; i++) {
58 if (RREG32(GFX_INT_STATUS) & INT_ACK)
59 break;
60 udelay(1);
61 }
62
63 for (i = 0; i < rdev->usec_timeout; i++) {
64 if (RREG32(GFX_INT_STATUS) & INT_DONE)
65 break;
66 udelay(1);
67 }
68
69 gfx_int_req &= ~INT_REQ;
70 WREG32(GFX_INT_REQ, gfx_int_req);
71}
72
73void sumo_initialize_m3_arb(struct radeon_device *rdev)
74{
75 struct sumo_power_info *pi = sumo_get_pi(rdev);
76 u32 i;
77
78 if (!pi->enable_dynamic_m3_arbiter)
79 return;
80
81 for (i = 0; i < NUMBER_OF_M3ARB_PARAM_SETS; i++)
82 WREG32_RCU(MCU_M3ARB_PARAMS + (i * 4),
83 pi->sys_info.csr_m3_arb_cntl_default[i]);
84
85 for (; i < NUMBER_OF_M3ARB_PARAM_SETS * 2; i++)
86 WREG32_RCU(MCU_M3ARB_PARAMS + (i * 4),
87 pi->sys_info.csr_m3_arb_cntl_uvd[i % NUMBER_OF_M3ARB_PARAM_SETS]);
88
89 for (; i < NUMBER_OF_M3ARB_PARAM_SETS * 3; i++)
90 WREG32_RCU(MCU_M3ARB_PARAMS + (i * 4),
91 pi->sys_info.csr_m3_arb_cntl_fs3d[i % NUMBER_OF_M3ARB_PARAM_SETS]);
92}
93
94static bool sumo_is_alt_vddnb_supported(struct radeon_device *rdev)
95{
96 struct sumo_power_info *pi = sumo_get_pi(rdev);
97 bool return_code = false;
98
99 if (!pi->enable_alt_vddnb)
100 return return_code;
101
102 if ((rdev->family == CHIP_SUMO) || (rdev->family == CHIP_SUMO2)) {
103 if (pi->fw_version >= 0x00010C00)
104 return_code = true;
105 }
106
107 return return_code;
108}
109
110void sumo_smu_notify_alt_vddnb_change(struct radeon_device *rdev,
111 bool powersaving, bool force_nbps1)
112{
113 u32 param = 0;
114
115 if (!sumo_is_alt_vddnb_supported(rdev))
116 return;
117
118 if (powersaving)
119 param |= 1;
120
121 if (force_nbps1)
122 param |= 2;
123
124 WREG32_RCU(RCU_ALTVDDNB_NOTIFY, param);
125
126 sumo_send_msg_to_smu(rdev, SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY);
127}
128
129void sumo_smu_pg_init(struct radeon_device *rdev)
130{
131 sumo_send_msg_to_smu(rdev, SUMO_SMU_SERVICE_ROUTINE_PG_INIT);
132}
133
134static u32 sumo_power_of_4(u32 unit)
135{
136 u32 ret = 1;
137 u32 i;
138
139 for (i = 0; i < unit; i++)
140 ret *= 4;
141
142 return ret;
143}
144
145void sumo_enable_boost_timer(struct radeon_device *rdev)
146{
147 struct sumo_power_info *pi = sumo_get_pi(rdev);
148 u32 period, unit, timer_value;
149 u32 xclk = radeon_get_xclk(rdev);
150
151 unit = (RREG32_RCU(RCU_LCLK_SCALING_CNTL) & LCLK_SCALING_TIMER_PRESCALER_MASK)
152 >> LCLK_SCALING_TIMER_PRESCALER_SHIFT;
153
154 period = 100 * (xclk / 100 / sumo_power_of_4(unit));
155
156 timer_value = (period << 16) | (unit << 4);
157
158 WREG32_RCU(RCU_GNB_PWR_REP_TIMER_CNTL, timer_value);
159 WREG32_RCU(RCU_BOOST_MARGIN, pi->sys_info.sclk_dpm_boost_margin);
160 WREG32_RCU(RCU_THROTTLE_MARGIN, pi->sys_info.sclk_dpm_throttle_margin);
161 WREG32_RCU(GNB_TDP_LIMIT, pi->sys_info.gnb_tdp_limit);
162 WREG32_RCU(RCU_SclkDpmTdpLimitPG, pi->sys_info.sclk_dpm_tdp_limit_pg);
163
164 sumo_send_msg_to_smu(rdev, SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20);
165}
166
167void sumo_set_tdp_limit(struct radeon_device *rdev, u32 index, u32 tdp_limit)
168{
169 u32 regoffset = 0;
170 u32 shift = 0;
171 u32 mask = 0xFFF;
172 u32 sclk_dpm_tdp_limit;
173
174 switch (index) {
175 case 0:
176 regoffset = RCU_SclkDpmTdpLimit01;
177 shift = 16;
178 break;
179 case 1:
180 regoffset = RCU_SclkDpmTdpLimit01;
181 shift = 0;
182 break;
183 case 2:
184 regoffset = RCU_SclkDpmTdpLimit23;
185 shift = 16;
186 break;
187 case 3:
188 regoffset = RCU_SclkDpmTdpLimit23;
189 shift = 0;
190 break;
191 case 4:
192 regoffset = RCU_SclkDpmTdpLimit47;
193 shift = 16;
194 break;
195 case 7:
196 regoffset = RCU_SclkDpmTdpLimit47;
197 shift = 0;
198 break;
199 default:
200 break;
201 }
202
203 sclk_dpm_tdp_limit = RREG32_RCU(regoffset);
204 sclk_dpm_tdp_limit &= ~(mask << shift);
205 sclk_dpm_tdp_limit |= (tdp_limit << shift);
206 WREG32_RCU(regoffset, sclk_dpm_tdp_limit);
207}
208
209void sumo_boost_state_enable(struct radeon_device *rdev, bool enable)
210{
211 u32 boost_disable = RREG32_RCU(RCU_GPU_BOOST_DISABLE);
212
213 boost_disable &= 0xFFFFFFFE;
214 boost_disable |= (enable ? 0 : 1);
215 WREG32_RCU(RCU_GPU_BOOST_DISABLE, boost_disable);
216}
217
218u32 sumo_get_running_fw_version(struct radeon_device *rdev)
219{
220 return RREG32_RCU(RCU_FW_VERSION);
221}
222
diff --git a/drivers/gpu/drm/radeon/sumod.h b/drivers/gpu/drm/radeon/sumod.h
new file mode 100644
index 000000000000..7c9c2d4b86c0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sumod.h
@@ -0,0 +1,372 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef _SUMOD_H_
25#define _SUMOD_H_
26
27/* pm registers */
28
29/* rcu */
30#define RCU_FW_VERSION 0x30c
31
32#define RCU_PWR_GATING_SEQ0 0x408
33#define RCU_PWR_GATING_SEQ1 0x40c
34#define RCU_PWR_GATING_CNTL 0x410
35# define PWR_GATING_EN (1 << 0)
36# define RSVD_MASK (0x3 << 1)
37# define PCV(x) ((x) << 3)
38# define PCV_MASK (0x1f << 3)
39# define PCV_SHIFT 3
40# define PCP(x) ((x) << 8)
41# define PCP_MASK (0xf << 8)
42# define PCP_SHIFT 8
43# define RPW(x) ((x) << 16)
44# define RPW_MASK (0xf << 16)
45# define RPW_SHIFT 16
46# define ID(x) ((x) << 24)
47# define ID_MASK (0xf << 24)
48# define ID_SHIFT 24
49# define PGS(x) ((x) << 28)
50# define PGS_MASK (0xf << 28)
51# define PGS_SHIFT 28
52
53#define RCU_ALTVDDNB_NOTIFY 0x430
54#define RCU_LCLK_SCALING_CNTL 0x434
55# define LCLK_SCALING_EN (1 << 0)
56# define LCLK_SCALING_TYPE (1 << 1)
57# define LCLK_SCALING_TIMER_PRESCALER(x) ((x) << 4)
58# define LCLK_SCALING_TIMER_PRESCALER_MASK (0xf << 4)
59# define LCLK_SCALING_TIMER_PRESCALER_SHIFT 4
60# define LCLK_SCALING_TIMER_PERIOD(x) ((x) << 16)
61# define LCLK_SCALING_TIMER_PERIOD_MASK (0xf << 16)
62# define LCLK_SCALING_TIMER_PERIOD_SHIFT 16
63
64#define RCU_PWR_GATING_CNTL_2 0x4a0
65# define MPPU(x) ((x) << 0)
66# define MPPU_MASK (0xffff << 0)
67# define MPPU_SHIFT 0
68# define MPPD(x) ((x) << 16)
69# define MPPD_MASK (0xffff << 16)
70# define MPPD_SHIFT 16
71#define RCU_PWR_GATING_CNTL_3 0x4a4
72# define DPPU(x) ((x) << 0)
73# define DPPU_MASK (0xffff << 0)
74# define DPPU_SHIFT 0
75# define DPPD(x) ((x) << 16)
76# define DPPD_MASK (0xffff << 16)
77# define DPPD_SHIFT 16
78#define RCU_PWR_GATING_CNTL_4 0x4a8
79# define RT(x) ((x) << 0)
80# define RT_MASK (0xffff << 0)
81# define RT_SHIFT 0
82# define IT(x) ((x) << 16)
83# define IT_MASK (0xffff << 16)
84# define IT_SHIFT 16
85
86/* yes these two have the same address */
87#define RCU_PWR_GATING_CNTL_5 0x504
88#define RCU_GPU_BOOST_DISABLE 0x508
89
90#define MCU_M3ARB_INDEX 0x504
91#define MCU_M3ARB_PARAMS 0x508
92
93#define RCU_GNB_PWR_REP_TIMER_CNTL 0x50C
94
95#define RCU_SclkDpmTdpLimit01 0x514
96#define RCU_SclkDpmTdpLimit23 0x518
97#define RCU_SclkDpmTdpLimit47 0x51C
98#define RCU_SclkDpmTdpLimitPG 0x520
99
100#define GNB_TDP_LIMIT 0x540
101#define RCU_BOOST_MARGIN 0x544
102#define RCU_THROTTLE_MARGIN 0x548
103
104#define SMU_PCIE_PG_ARGS 0x58C
105#define SMU_PCIE_PG_ARGS_2 0x598
106#define SMU_PCIE_PG_ARGS_3 0x59C
107
108/* mmio */
109#define RCU_STATUS 0x11c
110# define GMC_PWR_GATER_BUSY (1 << 8)
111# define GFX_PWR_GATER_BUSY (1 << 9)
112# define UVD_PWR_GATER_BUSY (1 << 10)
113# define PCIE_PWR_GATER_BUSY (1 << 11)
114# define GMC_PWR_GATER_STATE (1 << 12)
115# define GFX_PWR_GATER_STATE (1 << 13)
116# define UVD_PWR_GATER_STATE (1 << 14)
117# define PCIE_PWR_GATER_STATE (1 << 15)
118# define GFX1_PWR_GATER_BUSY (1 << 16)
119# define GFX2_PWR_GATER_BUSY (1 << 17)
120# define GFX1_PWR_GATER_STATE (1 << 18)
121# define GFX2_PWR_GATER_STATE (1 << 19)
122
123#define GFX_INT_REQ 0x120
124# define INT_REQ (1 << 0)
125# define SERV_INDEX(x) ((x) << 1)
126# define SERV_INDEX_MASK (0xff << 1)
127# define SERV_INDEX_SHIFT 1
128#define GFX_INT_STATUS 0x124
129# define INT_ACK (1 << 0)
130# define INT_DONE (1 << 1)
131
132#define CG_SCLK_CNTL 0x600
133# define SCLK_DIVIDER(x) ((x) << 0)
134# define SCLK_DIVIDER_MASK (0x7f << 0)
135# define SCLK_DIVIDER_SHIFT 0
136#define CG_SCLK_STATUS 0x604
137# define SCLK_OVERCLK_DETECT (1 << 2)
138
139#define CG_DCLK_CNTL 0x610
140# define DCLK_DIVIDER_MASK 0x7f
141# define DCLK_DIR_CNTL_EN (1 << 8)
142#define CG_DCLK_STATUS 0x614
143# define DCLK_STATUS (1 << 0)
144#define CG_VCLK_CNTL 0x618
145# define VCLK_DIVIDER_MASK 0x7f
146# define VCLK_DIR_CNTL_EN (1 << 8)
147#define CG_VCLK_STATUS 0x61c
148
149#define GENERAL_PWRMGT 0x63c
150# define STATIC_PM_EN (1 << 1)
151
152#define SCLK_PWRMGT_CNTL 0x644
153# define SCLK_PWRMGT_OFF (1 << 0)
154# define SCLK_LOW_D1 (1 << 1)
155# define FIR_RESET (1 << 4)
156# define FIR_FORCE_TREND_SEL (1 << 5)
157# define FIR_TREND_MODE (1 << 6)
158# define DYN_GFX_CLK_OFF_EN (1 << 7)
159# define GFX_CLK_FORCE_ON (1 << 8)
160# define GFX_CLK_REQUEST_OFF (1 << 9)
161# define GFX_CLK_FORCE_OFF (1 << 10)
162# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
163# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
164# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
165# define GFX_VOLTAGE_CHANGE_EN (1 << 16)
166# define GFX_VOLTAGE_CHANGE_MODE (1 << 17)
167
168#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
169# define TARG_SCLK_INDEX(x) ((x) << 6)
170# define TARG_SCLK_INDEX_MASK (0x7 << 6)
171# define TARG_SCLK_INDEX_SHIFT 6
172# define CURR_SCLK_INDEX(x) ((x) << 9)
173# define CURR_SCLK_INDEX_MASK (0x7 << 9)
174# define CURR_SCLK_INDEX_SHIFT 9
175# define TARG_INDEX(x) ((x) << 12)
176# define TARG_INDEX_MASK (0x7 << 12)
177# define TARG_INDEX_SHIFT 12
178# define CURR_INDEX(x) ((x) << 15)
179# define CURR_INDEX_MASK (0x7 << 15)
180# define CURR_INDEX_SHIFT 15
181
182#define CG_SCLK_DPM_CTRL 0x684
183# define SCLK_FSTATE_0_DIV(x) ((x) << 0)
184# define SCLK_FSTATE_0_DIV_MASK (0x7f << 0)
185# define SCLK_FSTATE_0_DIV_SHIFT 0
186# define SCLK_FSTATE_0_VLD (1 << 7)
187# define SCLK_FSTATE_1_DIV(x) ((x) << 8)
188# define SCLK_FSTATE_1_DIV_MASK (0x7f << 8)
189# define SCLK_FSTATE_1_DIV_SHIFT 8
190# define SCLK_FSTATE_1_VLD (1 << 15)
191# define SCLK_FSTATE_2_DIV(x) ((x) << 16)
192# define SCLK_FSTATE_2_DIV_MASK (0x7f << 16)
193# define SCLK_FSTATE_2_DIV_SHIFT 16
194# define SCLK_FSTATE_2_VLD (1 << 23)
195# define SCLK_FSTATE_3_DIV(x) ((x) << 24)
196# define SCLK_FSTATE_3_DIV_MASK (0x7f << 24)
197# define SCLK_FSTATE_3_DIV_SHIFT 24
198# define SCLK_FSTATE_3_VLD (1 << 31)
199#define CG_SCLK_DPM_CTRL_2 0x688
200#define CG_GCOOR 0x68c
201# define PHC(x) ((x) << 0)
202# define PHC_MASK (0x1f << 0)
203# define PHC_SHIFT 0
204# define SDC(x) ((x) << 9)
205# define SDC_MASK (0x3ff << 9)
206# define SDC_SHIFT 9
207# define SU(x) ((x) << 23)
208# define SU_MASK (0xf << 23)
209# define SU_SHIFT 23
210# define DIV_ID(x) ((x) << 28)
211# define DIV_ID_MASK (0x7 << 28)
212# define DIV_ID_SHIFT 28
213
214#define CG_FTV 0x690
215#define CG_FFCT_0 0x694
216# define UTC_0(x) ((x) << 0)
217# define UTC_0_MASK (0x3ff << 0)
218# define UTC_0_SHIFT 0
219# define DTC_0(x) ((x) << 10)
220# define DTC_0_MASK (0x3ff << 10)
221# define DTC_0_SHIFT 10
222
223#define CG_GIT 0x6d8
224# define CG_GICST(x) ((x) << 0)
225# define CG_GICST_MASK (0xffff << 0)
226# define CG_GICST_SHIFT 0
227# define CG_GIPOT(x) ((x) << 16)
228# define CG_GIPOT_MASK (0xffff << 16)
229# define CG_GIPOT_SHIFT 16
230
231#define CG_SCLK_DPM_CTRL_3 0x6e0
232# define FORCE_SCLK_STATE(x) ((x) << 0)
233# define FORCE_SCLK_STATE_MASK (0x7 << 0)
234# define FORCE_SCLK_STATE_SHIFT 0
235# define FORCE_SCLK_STATE_EN (1 << 3)
236# define GNB_TT(x) ((x) << 8)
237# define GNB_TT_MASK (0xff << 8)
238# define GNB_TT_SHIFT 8
239# define GNB_THERMTHRO_MASK (1 << 16)
240# define CNB_THERMTHRO_MASK_SCLK (1 << 17)
241# define DPM_SCLK_ENABLE (1 << 18)
242# define GNB_SLOW_FSTATE_0_MASK (1 << 23)
243# define GNB_SLOW_FSTATE_0_SHIFT 23
244# define FORCE_NB_PSTATE_1 (1 << 31)
245
246#define CG_SSP 0x6e8
247# define SST(x) ((x) << 0)
248# define SST_MASK (0xffff << 0)
249# define SST_SHIFT 0
250# define SSTU(x) ((x) << 16)
251# define SSTU_MASK (0xffff << 16)
252# define SSTU_SHIFT 16
253
254#define CG_ACPI_CNTL 0x70c
255# define SCLK_ACPI_DIV(x) ((x) << 0)
256# define SCLK_ACPI_DIV_MASK (0x7f << 0)
257# define SCLK_ACPI_DIV_SHIFT 0
258
259#define CG_SCLK_DPM_CTRL_4 0x71c
260# define DC_HDC(x) ((x) << 14)
261# define DC_HDC_MASK (0x3fff << 14)
262# define DC_HDC_SHIFT 14
263# define DC_HU(x) ((x) << 28)
264# define DC_HU_MASK (0xf << 28)
265# define DC_HU_SHIFT 28
266#define CG_SCLK_DPM_CTRL_5 0x720
267# define SCLK_FSTATE_BOOTUP(x) ((x) << 0)
268# define SCLK_FSTATE_BOOTUP_MASK (0x7 << 0)
269# define SCLK_FSTATE_BOOTUP_SHIFT 0
270# define TT_TP(x) ((x) << 3)
271# define TT_TP_MASK (0xffff << 3)
272# define TT_TP_SHIFT 3
273# define TT_TU(x) ((x) << 19)
274# define TT_TU_MASK (0xff << 19)
275# define TT_TU_SHIFT 19
276#define CG_SCLK_DPM_CTRL_6 0x724
277#define CG_AT_0 0x728
278# define CG_R(x) ((x) << 0)
279# define CG_R_MASK (0xffff << 0)
280# define CG_R_SHIFT 0
281# define CG_L(x) ((x) << 16)
282# define CG_L_MASK (0xffff << 16)
283# define CG_L_SHIFT 16
284#define CG_AT_1 0x72c
285#define CG_AT_2 0x730
286#define CG_THERMAL_INT 0x734
287#define DIG_THERM_INTH(x) ((x) << 8)
288#define DIG_THERM_INTH_MASK 0x0000FF00
289#define DIG_THERM_INTH_SHIFT 8
290#define DIG_THERM_INTL(x) ((x) << 16)
291#define DIG_THERM_INTL_MASK 0x00FF0000
292#define DIG_THERM_INTL_SHIFT 16
293#define THERM_INT_MASK_HIGH (1 << 24)
294#define THERM_INT_MASK_LOW (1 << 25)
295#define CG_AT_3 0x738
296#define CG_AT_4 0x73c
297#define CG_AT_5 0x740
298#define CG_AT_6 0x744
299#define CG_AT_7 0x748
300
301#define CG_BSP_0 0x750
302# define BSP(x) ((x) << 0)
303# define BSP_MASK (0xffff << 0)
304# define BSP_SHIFT 0
305# define BSU(x) ((x) << 16)
306# define BSU_MASK (0xf << 16)
307# define BSU_SHIFT 16
308
309#define CG_CG_VOLTAGE_CNTL 0x770
310# define REQ (1 << 0)
311# define LEVEL(x) ((x) << 1)
312# define LEVEL_MASK (0x3 << 1)
313# define LEVEL_SHIFT 1
314# define CG_VOLTAGE_EN (1 << 3)
315# define FORCE (1 << 4)
316# define PERIOD(x) ((x) << 8)
317# define PERIOD_MASK (0xffff << 8)
318# define PERIOD_SHIFT 8
319# define UNIT(x) ((x) << 24)
320# define UNIT_MASK (0xf << 24)
321# define UNIT_SHIFT 24
322
323#define CG_ACPI_VOLTAGE_CNTL 0x780
324# define ACPI_VOLTAGE_EN (1 << 8)
325
326#define CG_DPM_VOLTAGE_CNTL 0x788
327# define DPM_STATE0_LEVEL_MASK (0x3 << 0)
328# define DPM_STATE0_LEVEL_SHIFT 0
329# define DPM_VOLTAGE_EN (1 << 16)
330
331#define CG_PWR_GATING_CNTL 0x7ac
332# define DYN_PWR_DOWN_EN (1 << 0)
333# define ACPI_PWR_DOWN_EN (1 << 1)
334# define GFX_CLK_OFF_PWR_DOWN_EN (1 << 2)
335# define IOC_DISGPU_PWR_DOWN_EN (1 << 3)
336# define FORCE_POWR_ON (1 << 4)
337# define PGP(x) ((x) << 8)
338# define PGP_MASK (0xffff << 8)
339# define PGP_SHIFT 8
340# define PGU(x) ((x) << 24)
341# define PGU_MASK (0xf << 24)
342# define PGU_SHIFT 24
343
344#define CG_CGTT_LOCAL_0 0x7d0
345#define CG_CGTT_LOCAL_1 0x7d4
346
347#define DEEP_SLEEP_CNTL 0x818
348# define R_DIS (1 << 3)
349# define HS(x) ((x) << 4)
350# define HS_MASK (0xfff << 4)
351# define HS_SHIFT 4
352# define ENABLE_DS (1 << 31)
353#define DEEP_SLEEP_CNTL2 0x81c
354# define LB_UFP_EN (1 << 0)
355# define INOUT_C(x) ((x) << 4)
356# define INOUT_C_MASK (0xff << 4)
357# define INOUT_C_SHIFT 4
358
359#define CG_SCRATCH2 0x824
360
361#define CG_SCLK_DPM_CTRL_11 0x830
362
363#define HW_REV 0x5564
364# define ATI_REV_ID_MASK (0xf << 28)
365# define ATI_REV_ID_SHIFT 28
366/* 0 = A0, 1 = A1, 2 = B0, 3 = C0, etc. */
367
368#define DOUT_SCRATCH3 0x611c
369
370#define GB_ADDR_CONFIG 0x98f8
371
372#endif
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
new file mode 100644
index 000000000000..8a32bcc6bbb5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -0,0 +1,1917 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "trinityd.h"
27#include "r600_dpm.h"
28#include "trinity_dpm.h"
29#include <linux/seq_file.h>
30
31#define TRINITY_MAX_DEEPSLEEP_DIVIDER_ID 5
32#define TRINITY_MINIMUM_ENGINE_CLOCK 800
33#define SCLK_MIN_DIV_INTV_SHIFT 12
34#define TRINITY_DISPCLK_BYPASS_THRESHOLD 10000
35
36#ifndef TRINITY_MGCG_SEQUENCE
37#define TRINITY_MGCG_SEQUENCE 100
38
39static const u32 trinity_mgcg_shls_default[] =
40{
41 /* Register, Value, Mask */
42 0x0000802c, 0xc0000000, 0xffffffff,
43 0x00003fc4, 0xc0000000, 0xffffffff,
44 0x00005448, 0x00000100, 0xffffffff,
45 0x000055e4, 0x00000100, 0xffffffff,
46 0x0000160c, 0x00000100, 0xffffffff,
47 0x00008984, 0x06000100, 0xffffffff,
48 0x0000c164, 0x00000100, 0xffffffff,
49 0x00008a18, 0x00000100, 0xffffffff,
50 0x0000897c, 0x06000100, 0xffffffff,
51 0x00008b28, 0x00000100, 0xffffffff,
52 0x00009144, 0x00800200, 0xffffffff,
53 0x00009a60, 0x00000100, 0xffffffff,
54 0x00009868, 0x00000100, 0xffffffff,
55 0x00008d58, 0x00000100, 0xffffffff,
56 0x00009510, 0x00000100, 0xffffffff,
57 0x0000949c, 0x00000100, 0xffffffff,
58 0x00009654, 0x00000100, 0xffffffff,
59 0x00009030, 0x00000100, 0xffffffff,
60 0x00009034, 0x00000100, 0xffffffff,
61 0x00009038, 0x00000100, 0xffffffff,
62 0x0000903c, 0x00000100, 0xffffffff,
63 0x00009040, 0x00000100, 0xffffffff,
64 0x0000a200, 0x00000100, 0xffffffff,
65 0x0000a204, 0x00000100, 0xffffffff,
66 0x0000a208, 0x00000100, 0xffffffff,
67 0x0000a20c, 0x00000100, 0xffffffff,
68 0x00009744, 0x00000100, 0xffffffff,
69 0x00003f80, 0x00000100, 0xffffffff,
70 0x0000a210, 0x00000100, 0xffffffff,
71 0x0000a214, 0x00000100, 0xffffffff,
72 0x000004d8, 0x00000100, 0xffffffff,
73 0x00009664, 0x00000100, 0xffffffff,
74 0x00009698, 0x00000100, 0xffffffff,
75 0x000004d4, 0x00000200, 0xffffffff,
76 0x000004d0, 0x00000000, 0xffffffff,
77 0x000030cc, 0x00000104, 0xffffffff,
78 0x0000d0c0, 0x00000100, 0xffffffff,
79 0x0000d8c0, 0x00000100, 0xffffffff,
80 0x0000951c, 0x00010000, 0xffffffff,
81 0x00009160, 0x00030002, 0xffffffff,
82 0x00009164, 0x00050004, 0xffffffff,
83 0x00009168, 0x00070006, 0xffffffff,
84 0x00009178, 0x00070000, 0xffffffff,
85 0x0000917c, 0x00030002, 0xffffffff,
86 0x00009180, 0x00050004, 0xffffffff,
87 0x0000918c, 0x00010006, 0xffffffff,
88 0x00009190, 0x00090008, 0xffffffff,
89 0x00009194, 0x00070000, 0xffffffff,
90 0x00009198, 0x00030002, 0xffffffff,
91 0x0000919c, 0x00050004, 0xffffffff,
92 0x000091a8, 0x00010006, 0xffffffff,
93 0x000091ac, 0x00090008, 0xffffffff,
94 0x000091b0, 0x00070000, 0xffffffff,
95 0x000091b4, 0x00030002, 0xffffffff,
96 0x000091b8, 0x00050004, 0xffffffff,
97 0x000091c4, 0x00010006, 0xffffffff,
98 0x000091c8, 0x00090008, 0xffffffff,
99 0x000091cc, 0x00070000, 0xffffffff,
100 0x000091d0, 0x00030002, 0xffffffff,
101 0x000091d4, 0x00050004, 0xffffffff,
102 0x000091e0, 0x00010006, 0xffffffff,
103 0x000091e4, 0x00090008, 0xffffffff,
104 0x000091e8, 0x00000000, 0xffffffff,
105 0x000091ec, 0x00070000, 0xffffffff,
106 0x000091f0, 0x00030002, 0xffffffff,
107 0x000091f4, 0x00050004, 0xffffffff,
108 0x00009200, 0x00010006, 0xffffffff,
109 0x00009204, 0x00090008, 0xffffffff,
110 0x00009208, 0x00070000, 0xffffffff,
111 0x0000920c, 0x00030002, 0xffffffff,
112 0x00009210, 0x00050004, 0xffffffff,
113 0x0000921c, 0x00010006, 0xffffffff,
114 0x00009220, 0x00090008, 0xffffffff,
115 0x00009294, 0x00000000, 0xffffffff
116};
117
118static const u32 trinity_mgcg_shls_enable[] =
119{
120 /* Register, Value, Mask */
121 0x0000802c, 0xc0000000, 0xffffffff,
122 0x000008f8, 0x00000000, 0xffffffff,
123 0x000008fc, 0x00000000, 0x000133FF,
124 0x000008f8, 0x00000001, 0xffffffff,
125 0x000008fc, 0x00000000, 0xE00B03FC,
126 0x00009150, 0x96944200, 0xffffffff
127};
128
129static const u32 trinity_mgcg_shls_disable[] =
130{
131 /* Register, Value, Mask */
132 0x0000802c, 0xc0000000, 0xffffffff,
133 0x00009150, 0x00600000, 0xffffffff,
134 0x000008f8, 0x00000000, 0xffffffff,
135 0x000008fc, 0xffffffff, 0x000133FF,
136 0x000008f8, 0x00000001, 0xffffffff,
137 0x000008fc, 0xffffffff, 0xE00B03FC
138};
139#endif
140
141#ifndef TRINITY_SYSLS_SEQUENCE
142#define TRINITY_SYSLS_SEQUENCE 100
143
144static const u32 trinity_sysls_default[] =
145{
146 /* Register, Value, Mask */
147 0x000055e8, 0x00000000, 0xffffffff,
148 0x0000d0bc, 0x00000000, 0xffffffff,
149 0x0000d8bc, 0x00000000, 0xffffffff,
150 0x000015c0, 0x000c1401, 0xffffffff,
151 0x0000264c, 0x000c0400, 0xffffffff,
152 0x00002648, 0x000c0400, 0xffffffff,
153 0x00002650, 0x000c0400, 0xffffffff,
154 0x000020b8, 0x000c0400, 0xffffffff,
155 0x000020bc, 0x000c0400, 0xffffffff,
156 0x000020c0, 0x000c0c80, 0xffffffff,
157 0x0000f4a0, 0x000000c0, 0xffffffff,
158 0x0000f4a4, 0x00680fff, 0xffffffff,
159 0x00002f50, 0x00000404, 0xffffffff,
160 0x000004c8, 0x00000001, 0xffffffff,
161 0x0000641c, 0x00000000, 0xffffffff,
162 0x00000c7c, 0x00000000, 0xffffffff,
163 0x00006dfc, 0x00000000, 0xffffffff
164};
165
166static const u32 trinity_sysls_disable[] =
167{
168 /* Register, Value, Mask */
169 0x0000d0c0, 0x00000000, 0xffffffff,
170 0x0000d8c0, 0x00000000, 0xffffffff,
171 0x000055e8, 0x00000000, 0xffffffff,
172 0x0000d0bc, 0x00000000, 0xffffffff,
173 0x0000d8bc, 0x00000000, 0xffffffff,
174 0x000015c0, 0x00041401, 0xffffffff,
175 0x0000264c, 0x00040400, 0xffffffff,
176 0x00002648, 0x00040400, 0xffffffff,
177 0x00002650, 0x00040400, 0xffffffff,
178 0x000020b8, 0x00040400, 0xffffffff,
179 0x000020bc, 0x00040400, 0xffffffff,
180 0x000020c0, 0x00040c80, 0xffffffff,
181 0x0000f4a0, 0x000000c0, 0xffffffff,
182 0x0000f4a4, 0x00680000, 0xffffffff,
183 0x00002f50, 0x00000404, 0xffffffff,
184 0x000004c8, 0x00000001, 0xffffffff,
185 0x0000641c, 0x00007ffd, 0xffffffff,
186 0x00000c7c, 0x0000ff00, 0xffffffff,
187 0x00006dfc, 0x0000007f, 0xffffffff
188};
189
190static const u32 trinity_sysls_enable[] =
191{
192 /* Register, Value, Mask */
193 0x000055e8, 0x00000001, 0xffffffff,
194 0x0000d0bc, 0x00000100, 0xffffffff,
195 0x0000d8bc, 0x00000100, 0xffffffff,
196 0x000015c0, 0x000c1401, 0xffffffff,
197 0x0000264c, 0x000c0400, 0xffffffff,
198 0x00002648, 0x000c0400, 0xffffffff,
199 0x00002650, 0x000c0400, 0xffffffff,
200 0x000020b8, 0x000c0400, 0xffffffff,
201 0x000020bc, 0x000c0400, 0xffffffff,
202 0x000020c0, 0x000c0c80, 0xffffffff,
203 0x0000f4a0, 0x000000c0, 0xffffffff,
204 0x0000f4a4, 0x00680fff, 0xffffffff,
205 0x00002f50, 0x00000903, 0xffffffff,
206 0x000004c8, 0x00000000, 0xffffffff,
207 0x0000641c, 0x00000000, 0xffffffff,
208 0x00000c7c, 0x00000000, 0xffffffff,
209 0x00006dfc, 0x00000000, 0xffffffff
210};
211#endif
212
213static const u32 trinity_override_mgpg_sequences[] =
214{
215 /* Register, Value */
216 0x00000200, 0xE030032C,
217 0x00000204, 0x00000FFF,
218 0x00000200, 0xE0300058,
219 0x00000204, 0x00030301,
220 0x00000200, 0xE0300054,
221 0x00000204, 0x500010FF,
222 0x00000200, 0xE0300074,
223 0x00000204, 0x00030301,
224 0x00000200, 0xE0300070,
225 0x00000204, 0x500010FF,
226 0x00000200, 0xE0300090,
227 0x00000204, 0x00030301,
228 0x00000200, 0xE030008C,
229 0x00000204, 0x500010FF,
230 0x00000200, 0xE03000AC,
231 0x00000204, 0x00030301,
232 0x00000200, 0xE03000A8,
233 0x00000204, 0x500010FF,
234 0x00000200, 0xE03000C8,
235 0x00000204, 0x00030301,
236 0x00000200, 0xE03000C4,
237 0x00000204, 0x500010FF,
238 0x00000200, 0xE03000E4,
239 0x00000204, 0x00030301,
240 0x00000200, 0xE03000E0,
241 0x00000204, 0x500010FF,
242 0x00000200, 0xE0300100,
243 0x00000204, 0x00030301,
244 0x00000200, 0xE03000FC,
245 0x00000204, 0x500010FF,
246 0x00000200, 0xE0300058,
247 0x00000204, 0x00030303,
248 0x00000200, 0xE0300054,
249 0x00000204, 0x600010FF,
250 0x00000200, 0xE0300074,
251 0x00000204, 0x00030303,
252 0x00000200, 0xE0300070,
253 0x00000204, 0x600010FF,
254 0x00000200, 0xE0300090,
255 0x00000204, 0x00030303,
256 0x00000200, 0xE030008C,
257 0x00000204, 0x600010FF,
258 0x00000200, 0xE03000AC,
259 0x00000204, 0x00030303,
260 0x00000200, 0xE03000A8,
261 0x00000204, 0x600010FF,
262 0x00000200, 0xE03000C8,
263 0x00000204, 0x00030303,
264 0x00000200, 0xE03000C4,
265 0x00000204, 0x600010FF,
266 0x00000200, 0xE03000E4,
267 0x00000204, 0x00030303,
268 0x00000200, 0xE03000E0,
269 0x00000204, 0x600010FF,
270 0x00000200, 0xE0300100,
271 0x00000204, 0x00030303,
272 0x00000200, 0xE03000FC,
273 0x00000204, 0x600010FF,
274 0x00000200, 0xE0300058,
275 0x00000204, 0x00030303,
276 0x00000200, 0xE0300054,
277 0x00000204, 0x700010FF,
278 0x00000200, 0xE0300074,
279 0x00000204, 0x00030303,
280 0x00000200, 0xE0300070,
281 0x00000204, 0x700010FF,
282 0x00000200, 0xE0300090,
283 0x00000204, 0x00030303,
284 0x00000200, 0xE030008C,
285 0x00000204, 0x700010FF,
286 0x00000200, 0xE03000AC,
287 0x00000204, 0x00030303,
288 0x00000200, 0xE03000A8,
289 0x00000204, 0x700010FF,
290 0x00000200, 0xE03000C8,
291 0x00000204, 0x00030303,
292 0x00000200, 0xE03000C4,
293 0x00000204, 0x700010FF,
294 0x00000200, 0xE03000E4,
295 0x00000204, 0x00030303,
296 0x00000200, 0xE03000E0,
297 0x00000204, 0x700010FF,
298 0x00000200, 0xE0300100,
299 0x00000204, 0x00030303,
300 0x00000200, 0xE03000FC,
301 0x00000204, 0x700010FF,
302 0x00000200, 0xE0300058,
303 0x00000204, 0x00010303,
304 0x00000200, 0xE0300054,
305 0x00000204, 0x800010FF,
306 0x00000200, 0xE0300074,
307 0x00000204, 0x00010303,
308 0x00000200, 0xE0300070,
309 0x00000204, 0x800010FF,
310 0x00000200, 0xE0300090,
311 0x00000204, 0x00010303,
312 0x00000200, 0xE030008C,
313 0x00000204, 0x800010FF,
314 0x00000200, 0xE03000AC,
315 0x00000204, 0x00010303,
316 0x00000200, 0xE03000A8,
317 0x00000204, 0x800010FF,
318 0x00000200, 0xE03000C4,
319 0x00000204, 0x800010FF,
320 0x00000200, 0xE03000C8,
321 0x00000204, 0x00010303,
322 0x00000200, 0xE03000E4,
323 0x00000204, 0x00010303,
324 0x00000200, 0xE03000E0,
325 0x00000204, 0x800010FF,
326 0x00000200, 0xE0300100,
327 0x00000204, 0x00010303,
328 0x00000200, 0xE03000FC,
329 0x00000204, 0x800010FF,
330 0x00000200, 0x0001f198,
331 0x00000204, 0x0003ffff,
332 0x00000200, 0x0001f19C,
333 0x00000204, 0x3fffffff,
334 0x00000200, 0xE030032C,
335 0x00000204, 0x00000000,
336};
337
338static void trinity_program_clk_gating_hw_sequence(struct radeon_device *rdev,
339 const u32 *seq, u32 count);
340static void trinity_override_dynamic_mg_powergating(struct radeon_device *rdev);
341static void trinity_apply_state_adjust_rules(struct radeon_device *rdev,
342 struct radeon_ps *new_rps,
343 struct radeon_ps *old_rps);
344
345struct trinity_ps *trinity_get_ps(struct radeon_ps *rps)
346{
347 struct trinity_ps *ps = rps->ps_priv;
348
349 return ps;
350}
351
352struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev)
353{
354 struct trinity_power_info *pi = rdev->pm.dpm.priv;
355
356 return pi;
357}
358
359static void trinity_gfx_powergating_initialize(struct radeon_device *rdev)
360{
361 struct trinity_power_info *pi = trinity_get_pi(rdev);
362 u32 p, u;
363 u32 value;
364 struct atom_clock_dividers dividers;
365 u32 xclk = radeon_get_xclk(rdev);
366 u32 sssd = 1;
367 int ret;
368 u32 hw_rev = (RREG32(HW_REV) & ATI_REV_ID_MASK) >> ATI_REV_ID_SHIFT;
369
370 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
371 25000, false, &dividers);
372 if (ret)
373 return;
374
375 value = RREG32_SMC(GFX_POWER_GATING_CNTL);
376 value &= ~(SSSD_MASK | PDS_DIV_MASK);
377 if (sssd)
378 value |= SSSD(1);
379 value |= PDS_DIV(dividers.post_div);
380 WREG32_SMC(GFX_POWER_GATING_CNTL, value);
381
382 r600_calculate_u_and_p(500, xclk, 16, &p, &u);
383
384 WREG32(CG_PG_CTRL, SP(p) | SU(u));
385
386 WREG32_P(CG_GIPOTS, CG_GIPOT(p), ~CG_GIPOT_MASK);
387
388 /* XXX double check hw_rev */
389 if (pi->override_dynamic_mgpg && (hw_rev == 0))
390 trinity_override_dynamic_mg_powergating(rdev);
391
392}
393
394#define CGCG_CGTT_LOCAL0_MASK 0xFFFF33FF
395#define CGCG_CGTT_LOCAL1_MASK 0xFFFB0FFE
396#define CGTS_SM_CTRL_REG_DISABLE 0x00600000
397#define CGTS_SM_CTRL_REG_ENABLE 0x96944200
398
399static void trinity_mg_clockgating_enable(struct radeon_device *rdev,
400 bool enable)
401{
402 u32 local0;
403 u32 local1;
404
405 if (enable) {
406 local0 = RREG32_CG(CG_CGTT_LOCAL_0);
407 local1 = RREG32_CG(CG_CGTT_LOCAL_1);
408
409 WREG32_CG(CG_CGTT_LOCAL_0,
410 (0x00380000 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
411 WREG32_CG(CG_CGTT_LOCAL_1,
412 (0x0E000000 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
413
414 WREG32(CGTS_SM_CTRL_REG, CGTS_SM_CTRL_REG_ENABLE);
415 } else {
416 WREG32(CGTS_SM_CTRL_REG, CGTS_SM_CTRL_REG_DISABLE);
417
418 local0 = RREG32_CG(CG_CGTT_LOCAL_0);
419 local1 = RREG32_CG(CG_CGTT_LOCAL_1);
420
421 WREG32_CG(CG_CGTT_LOCAL_0,
422 CGCG_CGTT_LOCAL0_MASK | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
423 WREG32_CG(CG_CGTT_LOCAL_1,
424 CGCG_CGTT_LOCAL1_MASK | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
425 }
426}
427
428static void trinity_mg_clockgating_initialize(struct radeon_device *rdev)
429{
430 u32 count;
431 const u32 *seq = NULL;
432
433 seq = &trinity_mgcg_shls_default[0];
434 count = sizeof(trinity_mgcg_shls_default) / (3 * sizeof(u32));
435
436 trinity_program_clk_gating_hw_sequence(rdev, seq, count);
437}
438
439static void trinity_gfx_clockgating_enable(struct radeon_device *rdev,
440 bool enable)
441{
442 if (enable) {
443 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
444 } else {
445 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
446 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
447 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
448 RREG32(GB_ADDR_CONFIG);
449 }
450}
451
452static void trinity_program_clk_gating_hw_sequence(struct radeon_device *rdev,
453 const u32 *seq, u32 count)
454{
455 u32 i, length = count * 3;
456
457 for (i = 0; i < length; i += 3)
458 WREG32_P(seq[i], seq[i+1], ~seq[i+2]);
459}
460
461static void trinity_program_override_mgpg_sequences(struct radeon_device *rdev,
462 const u32 *seq, u32 count)
463{
464 u32 i, length = count * 2;
465
466 for (i = 0; i < length; i += 2)
467 WREG32(seq[i], seq[i+1]);
468
469}
470
471static void trinity_override_dynamic_mg_powergating(struct radeon_device *rdev)
472{
473 u32 count;
474 const u32 *seq = NULL;
475
476 seq = &trinity_override_mgpg_sequences[0];
477 count = sizeof(trinity_override_mgpg_sequences) / (2 * sizeof(u32));
478
479 trinity_program_override_mgpg_sequences(rdev, seq, count);
480}
481
482static void trinity_ls_clockgating_enable(struct radeon_device *rdev,
483 bool enable)
484{
485 u32 count;
486 const u32 *seq = NULL;
487
488 if (enable) {
489 seq = &trinity_sysls_enable[0];
490 count = sizeof(trinity_sysls_enable) / (3 * sizeof(u32));
491 } else {
492 seq = &trinity_sysls_disable[0];
493 count = sizeof(trinity_sysls_disable) / (3 * sizeof(u32));
494 }
495
496 trinity_program_clk_gating_hw_sequence(rdev, seq, count);
497}
498
499static void trinity_gfx_powergating_enable(struct radeon_device *rdev,
500 bool enable)
501{
502 if (enable) {
503 if (RREG32_SMC(CC_SMU_TST_EFUSE1_MISC) & RB_BACKEND_DISABLE_MASK)
504 WREG32_SMC(SMU_SCRATCH_A, (RREG32_SMC(SMU_SCRATCH_A) | 0x01));
505
506 WREG32_P(SCLK_PWRMGT_CNTL, DYN_PWR_DOWN_EN, ~DYN_PWR_DOWN_EN);
507 } else {
508 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_PWR_DOWN_EN);
509 RREG32(GB_ADDR_CONFIG);
510 }
511}
512
513static void trinity_gfx_dynamic_mgpg_enable(struct radeon_device *rdev,
514 bool enable)
515{
516 u32 value;
517
518 if (enable) {
519 value = RREG32_SMC(PM_I_CNTL_1);
520 value &= ~DS_PG_CNTL_MASK;
521 value |= DS_PG_CNTL(1);
522 WREG32_SMC(PM_I_CNTL_1, value);
523
524 value = RREG32_SMC(SMU_S_PG_CNTL);
525 value &= ~DS_PG_EN_MASK;
526 value |= DS_PG_EN(1);
527 WREG32_SMC(SMU_S_PG_CNTL, value);
528 } else {
529 value = RREG32_SMC(SMU_S_PG_CNTL);
530 value &= ~DS_PG_EN_MASK;
531 WREG32_SMC(SMU_S_PG_CNTL, value);
532
533 value = RREG32_SMC(PM_I_CNTL_1);
534 value &= ~DS_PG_CNTL_MASK;
535 WREG32_SMC(PM_I_CNTL_1, value);
536 }
537
538 trinity_gfx_dynamic_mgpg_config(rdev);
539
540}
541
542static void trinity_enable_clock_power_gating(struct radeon_device *rdev)
543{
544 struct trinity_power_info *pi = trinity_get_pi(rdev);
545
546 if (pi->enable_gfx_clock_gating)
547 sumo_gfx_clockgating_initialize(rdev);
548 if (pi->enable_mg_clock_gating)
549 trinity_mg_clockgating_initialize(rdev);
550 if (pi->enable_gfx_power_gating)
551 trinity_gfx_powergating_initialize(rdev);
552 if (pi->enable_mg_clock_gating) {
553 trinity_ls_clockgating_enable(rdev, true);
554 trinity_mg_clockgating_enable(rdev, true);
555 }
556 if (pi->enable_gfx_clock_gating)
557 trinity_gfx_clockgating_enable(rdev, true);
558 if (pi->enable_gfx_dynamic_mgpg)
559 trinity_gfx_dynamic_mgpg_enable(rdev, true);
560 if (pi->enable_gfx_power_gating)
561 trinity_gfx_powergating_enable(rdev, true);
562}
563
564static void trinity_disable_clock_power_gating(struct radeon_device *rdev)
565{
566 struct trinity_power_info *pi = trinity_get_pi(rdev);
567
568 if (pi->enable_gfx_power_gating)
569 trinity_gfx_powergating_enable(rdev, false);
570 if (pi->enable_gfx_dynamic_mgpg)
571 trinity_gfx_dynamic_mgpg_enable(rdev, false);
572 if (pi->enable_gfx_clock_gating)
573 trinity_gfx_clockgating_enable(rdev, false);
574 if (pi->enable_mg_clock_gating) {
575 trinity_mg_clockgating_enable(rdev, false);
576 trinity_ls_clockgating_enable(rdev, false);
577 }
578}
579
580static void trinity_set_divider_value(struct radeon_device *rdev,
581 u32 index, u32 sclk)
582{
583 struct atom_clock_dividers dividers;
584 int ret;
585 u32 value;
586 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
587
588 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
589 sclk, false, &dividers);
590 if (ret)
591 return;
592
593 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix);
594 value &= ~CLK_DIVIDER_MASK;
595 value |= CLK_DIVIDER(dividers.post_div);
596 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
597
598 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
599 sclk/2, false, &dividers);
600 if (ret)
601 return;
602
603 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_PG_CNTL + ix);
604 value &= ~PD_SCLK_DIVIDER_MASK;
605 value |= PD_SCLK_DIVIDER(dividers.post_div);
606 WREG32_SMC(SMU_SCLK_DPM_STATE_0_PG_CNTL + ix, value);
607}
608
609static void trinity_set_ds_dividers(struct radeon_device *rdev,
610 u32 index, u32 divider)
611{
612 u32 value;
613 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
614
615 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix);
616 value &= ~DS_DIV_MASK;
617 value |= DS_DIV(divider);
618 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix, value);
619}
620
621static void trinity_set_ss_dividers(struct radeon_device *rdev,
622 u32 index, u32 divider)
623{
624 u32 value;
625 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
626
627 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix);
628 value &= ~DS_SH_DIV_MASK;
629 value |= DS_SH_DIV(divider);
630 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix, value);
631}
632
633static void trinity_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
634{
635 struct trinity_power_info *pi = trinity_get_pi(rdev);
636 u32 vid_7bit = sumo_convert_vid2_to_vid7(rdev, &pi->sys_info.vid_mapping_table, vid);
637 u32 value;
638 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
639
640 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix);
641 value &= ~VID_MASK;
642 value |= VID(vid_7bit);
643 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
644
645 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix);
646 value &= ~LVRT_MASK;
647 value |= LVRT(0);
648 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
649}
650
651static void trinity_set_allos_gnb_slow(struct radeon_device *rdev,
652 u32 index, u32 gnb_slow)
653{
654 u32 value;
655 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
656
657 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_3 + ix);
658 value &= ~GNB_SLOW_MASK;
659 value |= GNB_SLOW(gnb_slow);
660 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_3 + ix, value);
661}
662
663static void trinity_set_force_nbp_state(struct radeon_device *rdev,
664 u32 index, u32 force_nbp_state)
665{
666 u32 value;
667 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
668
669 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_3 + ix);
670 value &= ~FORCE_NBPS1_MASK;
671 value |= FORCE_NBPS1(force_nbp_state);
672 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_3 + ix, value);
673}
674
675static void trinity_set_display_wm(struct radeon_device *rdev,
676 u32 index, u32 wm)
677{
678 u32 value;
679 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
680
681 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix);
682 value &= ~DISPLAY_WM_MASK;
683 value |= DISPLAY_WM(wm);
684 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix, value);
685}
686
687static void trinity_set_vce_wm(struct radeon_device *rdev,
688 u32 index, u32 wm)
689{
690 u32 value;
691 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
692
693 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix);
694 value &= ~VCE_WM_MASK;
695 value |= VCE_WM(wm);
696 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix, value);
697}
698
699static void trinity_set_at(struct radeon_device *rdev,
700 u32 index, u32 at)
701{
702 u32 value;
703 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
704
705 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_AT + ix);
706 value &= ~AT_MASK;
707 value |= AT(at);
708 WREG32_SMC(SMU_SCLK_DPM_STATE_0_AT + ix, value);
709}
710
711static void trinity_program_power_level(struct radeon_device *rdev,
712 struct trinity_pl *pl, u32 index)
713{
714 struct trinity_power_info *pi = trinity_get_pi(rdev);
715
716 if (index >= SUMO_MAX_HARDWARE_POWERLEVELS)
717 return;
718
719 trinity_set_divider_value(rdev, index, pl->sclk);
720 trinity_set_vid(rdev, index, pl->vddc_index);
721 trinity_set_ss_dividers(rdev, index, pl->ss_divider_index);
722 trinity_set_ds_dividers(rdev, index, pl->ds_divider_index);
723 trinity_set_allos_gnb_slow(rdev, index, pl->allow_gnb_slow);
724 trinity_set_force_nbp_state(rdev, index, pl->force_nbp_state);
725 trinity_set_display_wm(rdev, index, pl->display_wm);
726 trinity_set_vce_wm(rdev, index, pl->vce_wm);
727 trinity_set_at(rdev, index, pi->at[index]);
728}
729
730static void trinity_power_level_enable_disable(struct radeon_device *rdev,
731 u32 index, bool enable)
732{
733 u32 value;
734 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
735
736 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix);
737 value &= ~STATE_VALID_MASK;
738 if (enable)
739 value |= STATE_VALID(1);
740 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
741}
742
743static bool trinity_dpm_enabled(struct radeon_device *rdev)
744{
745 if (RREG32_SMC(SMU_SCLK_DPM_CNTL) & SCLK_DPM_EN(1))
746 return true;
747 else
748 return false;
749}
750
751static void trinity_start_dpm(struct radeon_device *rdev)
752{
753 u32 value = RREG32_SMC(SMU_SCLK_DPM_CNTL);
754
755 value &= ~(SCLK_DPM_EN_MASK | SCLK_DPM_BOOT_STATE_MASK | VOLTAGE_CHG_EN_MASK);
756 value |= SCLK_DPM_EN(1) | SCLK_DPM_BOOT_STATE(0) | VOLTAGE_CHG_EN(1);
757 WREG32_SMC(SMU_SCLK_DPM_CNTL, value);
758
759 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
760 WREG32_P(CG_CG_VOLTAGE_CNTL, 0, ~EN);
761
762 trinity_dpm_config(rdev, true);
763}
764
765static void trinity_wait_for_dpm_enabled(struct radeon_device *rdev)
766{
767 int i;
768
769 for (i = 0; i < rdev->usec_timeout; i++) {
770 if (RREG32(SCLK_PWRMGT_CNTL) & DYNAMIC_PM_EN)
771 break;
772 udelay(1);
773 }
774 for (i = 0; i < rdev->usec_timeout; i++) {
775 if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_STATE_MASK) == 0)
776 break;
777 udelay(1);
778 }
779 for (i = 0; i < rdev->usec_timeout; i++) {
780 if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_MASK) == 0)
781 break;
782 udelay(1);
783 }
784}
785
786static void trinity_stop_dpm(struct radeon_device *rdev)
787{
788 u32 sclk_dpm_cntl;
789
790 WREG32_P(CG_CG_VOLTAGE_CNTL, EN, ~EN);
791
792 sclk_dpm_cntl = RREG32_SMC(SMU_SCLK_DPM_CNTL);
793 sclk_dpm_cntl &= ~(SCLK_DPM_EN_MASK | VOLTAGE_CHG_EN_MASK);
794 WREG32_SMC(SMU_SCLK_DPM_CNTL, sclk_dpm_cntl);
795
796 trinity_dpm_config(rdev, false);
797}
798
799static void trinity_start_am(struct radeon_device *rdev)
800{
801 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~(RESET_SCLK_CNT | RESET_BUSY_CNT));
802}
803
804static void trinity_reset_am(struct radeon_device *rdev)
805{
806 WREG32_P(SCLK_PWRMGT_CNTL, RESET_SCLK_CNT | RESET_BUSY_CNT,
807 ~(RESET_SCLK_CNT | RESET_BUSY_CNT));
808}
809
810static void trinity_wait_for_level_0(struct radeon_device *rdev)
811{
812 int i;
813
814 for (i = 0; i < rdev->usec_timeout; i++) {
815 if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_MASK) == 0)
816 break;
817 udelay(1);
818 }
819}
820
821static void trinity_enable_power_level_0(struct radeon_device *rdev)
822{
823 trinity_power_level_enable_disable(rdev, 0, true);
824}
825
826static void trinity_force_level_0(struct radeon_device *rdev)
827{
828 trinity_dpm_force_state(rdev, 0);
829}
830
831static void trinity_unforce_levels(struct radeon_device *rdev)
832{
833 trinity_dpm_no_forced_level(rdev);
834}
835
836static void trinity_program_power_levels_0_to_n(struct radeon_device *rdev,
837 struct radeon_ps *new_rps,
838 struct radeon_ps *old_rps)
839{
840 struct trinity_ps *new_ps = trinity_get_ps(new_rps);
841 struct trinity_ps *old_ps = trinity_get_ps(old_rps);
842 u32 i;
843 u32 n_current_state_levels = (old_ps == NULL) ? 1 : old_ps->num_levels;
844
845 for (i = 0; i < new_ps->num_levels; i++) {
846 trinity_program_power_level(rdev, &new_ps->levels[i], i);
847 trinity_power_level_enable_disable(rdev, i, true);
848 }
849
850 for (i = new_ps->num_levels; i < n_current_state_levels; i++)
851 trinity_power_level_enable_disable(rdev, i, false);
852}
853
854static void trinity_program_bootup_state(struct radeon_device *rdev)
855{
856 struct trinity_power_info *pi = trinity_get_pi(rdev);
857 u32 i;
858
859 trinity_program_power_level(rdev, &pi->boot_pl, 0);
860 trinity_power_level_enable_disable(rdev, 0, true);
861
862 for (i = 1; i < 8; i++)
863 trinity_power_level_enable_disable(rdev, i, false);
864}
865
866static void trinity_setup_uvd_clock_table(struct radeon_device *rdev,
867 struct radeon_ps *rps)
868{
869 struct trinity_ps *ps = trinity_get_ps(rps);
870 u32 uvdstates = (ps->vclk_low_divider |
871 ps->vclk_high_divider << 8 |
872 ps->dclk_low_divider << 16 |
873 ps->dclk_high_divider << 24);
874
875 WREG32_SMC(SMU_UVD_DPM_STATES, uvdstates);
876}
877
878static void trinity_setup_uvd_dpm_interval(struct radeon_device *rdev,
879 u32 interval)
880{
881 u32 p, u;
882 u32 tp = RREG32_SMC(PM_TP);
883 u32 val;
884 u32 xclk = radeon_get_xclk(rdev);
885
886 r600_calculate_u_and_p(interval, xclk, 16, &p, &u);
887
888 val = (p + tp - 1) / tp;
889
890 WREG32_SMC(SMU_UVD_DPM_CNTL, val);
891}
892
893static bool trinity_uvd_clocks_zero(struct radeon_ps *rps)
894{
895 if ((rps->vclk == 0) && (rps->dclk == 0))
896 return true;
897 else
898 return false;
899}
900
901static bool trinity_uvd_clocks_equal(struct radeon_ps *rps1,
902 struct radeon_ps *rps2)
903{
904 struct trinity_ps *ps1 = trinity_get_ps(rps1);
905 struct trinity_ps *ps2 = trinity_get_ps(rps2);
906
907 if ((rps1->vclk == rps2->vclk) &&
908 (rps1->dclk == rps2->dclk) &&
909 (ps1->vclk_low_divider == ps2->vclk_low_divider) &&
910 (ps1->vclk_high_divider == ps2->vclk_high_divider) &&
911 (ps1->dclk_low_divider == ps2->dclk_low_divider) &&
912 (ps1->dclk_high_divider == ps2->dclk_high_divider))
913 return true;
914 else
915 return false;
916}
917
918static void trinity_setup_uvd_clocks(struct radeon_device *rdev,
919 struct radeon_ps *new_rps,
920 struct radeon_ps *old_rps)
921{
922 struct trinity_power_info *pi = trinity_get_pi(rdev);
923
924 if (pi->enable_gfx_power_gating) {
925 trinity_gfx_powergating_enable(rdev, false);
926 }
927
928 if (pi->uvd_dpm) {
929 if (trinity_uvd_clocks_zero(new_rps) &&
930 !trinity_uvd_clocks_zero(old_rps)) {
931 trinity_setup_uvd_dpm_interval(rdev, 0);
932 } else if (!trinity_uvd_clocks_zero(new_rps)) {
933 trinity_setup_uvd_clock_table(rdev, new_rps);
934
935 if (trinity_uvd_clocks_zero(old_rps)) {
936 u32 tmp = RREG32(CG_MISC_REG);
937 tmp &= 0xfffffffd;
938 WREG32(CG_MISC_REG, tmp);
939
940 radeon_set_uvd_clocks(rdev, new_rps->vclk, new_rps->dclk);
941
942 trinity_setup_uvd_dpm_interval(rdev, 3000);
943 }
944 }
945 trinity_uvd_dpm_config(rdev);
946 } else {
947 if (trinity_uvd_clocks_zero(new_rps) ||
948 trinity_uvd_clocks_equal(new_rps, old_rps))
949 return;
950
951 radeon_set_uvd_clocks(rdev, new_rps->vclk, new_rps->dclk);
952 }
953
954 if (pi->enable_gfx_power_gating) {
955 trinity_gfx_powergating_enable(rdev, true);
956 }
957}
958
959static void trinity_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
960 struct radeon_ps *new_rps,
961 struct radeon_ps *old_rps)
962{
963 struct trinity_ps *new_ps = trinity_get_ps(new_rps);
964 struct trinity_ps *current_ps = trinity_get_ps(new_rps);
965
966 if (new_ps->levels[new_ps->num_levels - 1].sclk >=
967 current_ps->levels[current_ps->num_levels - 1].sclk)
968 return;
969
970 trinity_setup_uvd_clocks(rdev, new_rps, old_rps);
971}
972
973static void trinity_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
974 struct radeon_ps *new_rps,
975 struct radeon_ps *old_rps)
976{
977 struct trinity_ps *new_ps = trinity_get_ps(new_rps);
978 struct trinity_ps *current_ps = trinity_get_ps(old_rps);
979
980 if (new_ps->levels[new_ps->num_levels - 1].sclk <
981 current_ps->levels[current_ps->num_levels - 1].sclk)
982 return;
983
984 trinity_setup_uvd_clocks(rdev, new_rps, old_rps);
985}
986
987static void trinity_program_ttt(struct radeon_device *rdev)
988{
989 struct trinity_power_info *pi = trinity_get_pi(rdev);
990 u32 value = RREG32_SMC(SMU_SCLK_DPM_TTT);
991
992 value &= ~(HT_MASK | LT_MASK);
993 value |= HT((pi->thermal_auto_throttling + 49) * 8);
994 value |= LT((pi->thermal_auto_throttling + 49 - pi->sys_info.htc_hyst_lmt) * 8);
995 WREG32_SMC(SMU_SCLK_DPM_TTT, value);
996}
997
998static void trinity_enable_att(struct radeon_device *rdev)
999{
1000 u32 value = RREG32_SMC(SMU_SCLK_DPM_TT_CNTL);
1001
1002 value &= ~SCLK_TT_EN_MASK;
1003 value |= SCLK_TT_EN(1);
1004 WREG32_SMC(SMU_SCLK_DPM_TT_CNTL, value);
1005}
1006
1007static void trinity_program_sclk_dpm(struct radeon_device *rdev)
1008{
1009 u32 p, u;
1010 u32 tp = RREG32_SMC(PM_TP);
1011 u32 ni;
1012 u32 xclk = radeon_get_xclk(rdev);
1013 u32 value;
1014
1015 r600_calculate_u_and_p(400, xclk, 16, &p, &u);
1016
1017 ni = (p + tp - 1) / tp;
1018
1019 value = RREG32_SMC(PM_I_CNTL_1);
1020 value &= ~SCLK_DPM_MASK;
1021 value |= SCLK_DPM(ni);
1022 WREG32_SMC(PM_I_CNTL_1, value);
1023}
1024
1025static int trinity_set_thermal_temperature_range(struct radeon_device *rdev,
1026 int min_temp, int max_temp)
1027{
1028 int low_temp = 0 * 1000;
1029 int high_temp = 255 * 1000;
1030
1031 if (low_temp < min_temp)
1032 low_temp = min_temp;
1033 if (high_temp > max_temp)
1034 high_temp = max_temp;
1035 if (high_temp < low_temp) {
1036 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1037 return -EINVAL;
1038 }
1039
1040 WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTH(49 + (high_temp / 1000)), ~DIG_THERM_INTH_MASK);
1041 WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTL(49 + (low_temp / 1000)), ~DIG_THERM_INTL_MASK);
1042
1043 rdev->pm.dpm.thermal.min_temp = low_temp;
1044 rdev->pm.dpm.thermal.max_temp = high_temp;
1045
1046 return 0;
1047}
1048
1049static void trinity_update_current_ps(struct radeon_device *rdev,
1050 struct radeon_ps *rps)
1051{
1052 struct trinity_ps *new_ps = trinity_get_ps(rps);
1053 struct trinity_power_info *pi = trinity_get_pi(rdev);
1054
1055 pi->current_rps = *rps;
1056 pi->current_ps = *new_ps;
1057 pi->current_rps.ps_priv = &pi->current_ps;
1058}
1059
1060static void trinity_update_requested_ps(struct radeon_device *rdev,
1061 struct radeon_ps *rps)
1062{
1063 struct trinity_ps *new_ps = trinity_get_ps(rps);
1064 struct trinity_power_info *pi = trinity_get_pi(rdev);
1065
1066 pi->requested_rps = *rps;
1067 pi->requested_ps = *new_ps;
1068 pi->requested_rps.ps_priv = &pi->requested_ps;
1069}
1070
1071int trinity_dpm_enable(struct radeon_device *rdev)
1072{
1073 struct trinity_power_info *pi = trinity_get_pi(rdev);
1074 int ret;
1075
1076 trinity_acquire_mutex(rdev);
1077
1078 if (trinity_dpm_enabled(rdev)) {
1079 trinity_release_mutex(rdev);
1080 return -EINVAL;
1081 }
1082
1083 trinity_enable_clock_power_gating(rdev);
1084 trinity_program_bootup_state(rdev);
1085 sumo_program_vc(rdev, 0x00C00033);
1086 trinity_start_am(rdev);
1087 if (pi->enable_auto_thermal_throttling) {
1088 trinity_program_ttt(rdev);
1089 trinity_enable_att(rdev);
1090 }
1091 trinity_program_sclk_dpm(rdev);
1092 trinity_start_dpm(rdev);
1093 trinity_wait_for_dpm_enabled(rdev);
1094 trinity_release_mutex(rdev);
1095
1096 if (rdev->irq.installed &&
1097 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1098 ret = trinity_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1099 if (ret) {
1100 trinity_release_mutex(rdev);
1101 return ret;
1102 }
1103 rdev->irq.dpm_thermal = true;
1104 radeon_irq_set(rdev);
1105 }
1106
1107 trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1108
1109 return 0;
1110}
1111
1112void trinity_dpm_disable(struct radeon_device *rdev)
1113{
1114 trinity_acquire_mutex(rdev);
1115 if (!trinity_dpm_enabled(rdev)) {
1116 trinity_release_mutex(rdev);
1117 return;
1118 }
1119 trinity_disable_clock_power_gating(rdev);
1120 sumo_clear_vc(rdev);
1121 trinity_wait_for_level_0(rdev);
1122 trinity_stop_dpm(rdev);
1123 trinity_reset_am(rdev);
1124 trinity_release_mutex(rdev);
1125
1126 if (rdev->irq.installed &&
1127 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1128 rdev->irq.dpm_thermal = false;
1129 radeon_irq_set(rdev);
1130 }
1131
1132 trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1133}
1134
1135static void trinity_get_min_sclk_divider(struct radeon_device *rdev)
1136{
1137 struct trinity_power_info *pi = trinity_get_pi(rdev);
1138
1139 pi->min_sclk_did =
1140 (RREG32_SMC(CC_SMU_MISC_FUSES) & MinSClkDid_MASK) >> MinSClkDid_SHIFT;
1141}
1142
1143static void trinity_setup_nbp_sim(struct radeon_device *rdev,
1144 struct radeon_ps *rps)
1145{
1146 struct trinity_power_info *pi = trinity_get_pi(rdev);
1147 struct trinity_ps *new_ps = trinity_get_ps(rps);
1148 u32 nbpsconfig;
1149
1150 if (pi->sys_info.nb_dpm_enable) {
1151 nbpsconfig = RREG32_SMC(NB_PSTATE_CONFIG);
1152 nbpsconfig &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK | DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
1153 nbpsconfig |= (Dpm0PgNbPsLo(new_ps->Dpm0PgNbPsLo) |
1154 Dpm0PgNbPsHi(new_ps->Dpm0PgNbPsHi) |
1155 DpmXNbPsLo(new_ps->DpmXNbPsLo) |
1156 DpmXNbPsHi(new_ps->DpmXNbPsHi));
1157 WREG32_SMC(NB_PSTATE_CONFIG, nbpsconfig);
1158 }
1159}
1160
1161int trinity_dpm_pre_set_power_state(struct radeon_device *rdev)
1162{
1163 struct trinity_power_info *pi = trinity_get_pi(rdev);
1164 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
1165 struct radeon_ps *new_ps = &requested_ps;
1166
1167 trinity_update_requested_ps(rdev, new_ps);
1168
1169 trinity_apply_state_adjust_rules(rdev,
1170 &pi->requested_rps,
1171 &pi->current_rps);
1172
1173 return 0;
1174}
1175
1176int trinity_dpm_set_power_state(struct radeon_device *rdev)
1177{
1178 struct trinity_power_info *pi = trinity_get_pi(rdev);
1179 struct radeon_ps *new_ps = &pi->requested_rps;
1180 struct radeon_ps *old_ps = &pi->current_rps;
1181
1182 trinity_acquire_mutex(rdev);
1183 if (pi->enable_dpm) {
1184 trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1185 trinity_enable_power_level_0(rdev);
1186 trinity_force_level_0(rdev);
1187 trinity_wait_for_level_0(rdev);
1188 trinity_setup_nbp_sim(rdev, new_ps);
1189 trinity_program_power_levels_0_to_n(rdev, new_ps, old_ps);
1190 trinity_force_level_0(rdev);
1191 trinity_unforce_levels(rdev);
1192 trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1193 }
1194 trinity_release_mutex(rdev);
1195
1196 return 0;
1197}
1198
1199void trinity_dpm_post_set_power_state(struct radeon_device *rdev)
1200{
1201 struct trinity_power_info *pi = trinity_get_pi(rdev);
1202 struct radeon_ps *new_ps = &pi->requested_rps;
1203
1204 trinity_update_current_ps(rdev, new_ps);
1205}
1206
1207void trinity_dpm_setup_asic(struct radeon_device *rdev)
1208{
1209 trinity_acquire_mutex(rdev);
1210 sumo_program_sstp(rdev);
1211 sumo_take_smu_control(rdev, true);
1212 trinity_get_min_sclk_divider(rdev);
1213 trinity_release_mutex(rdev);
1214}
1215
1216void trinity_dpm_reset_asic(struct radeon_device *rdev)
1217{
1218 struct trinity_power_info *pi = trinity_get_pi(rdev);
1219
1220 trinity_acquire_mutex(rdev);
1221 if (pi->enable_dpm) {
1222 trinity_enable_power_level_0(rdev);
1223 trinity_force_level_0(rdev);
1224 trinity_wait_for_level_0(rdev);
1225 trinity_program_bootup_state(rdev);
1226 trinity_force_level_0(rdev);
1227 trinity_unforce_levels(rdev);
1228 }
1229 trinity_release_mutex(rdev);
1230}
1231
1232static u16 trinity_convert_voltage_index_to_value(struct radeon_device *rdev,
1233 u32 vid_2bit)
1234{
1235 struct trinity_power_info *pi = trinity_get_pi(rdev);
1236 u32 vid_7bit = sumo_convert_vid2_to_vid7(rdev, &pi->sys_info.vid_mapping_table, vid_2bit);
1237 u32 svi_mode = (RREG32_SMC(PM_CONFIG) & SVI_Mode) ? 1 : 0;
1238 u32 step = (svi_mode == 0) ? 1250 : 625;
1239 u32 delta = vid_7bit * step + 50;
1240
1241 if (delta > 155000)
1242 return 0;
1243
1244 return (155000 - delta) / 100;
1245}
1246
1247static void trinity_patch_boot_state(struct radeon_device *rdev,
1248 struct trinity_ps *ps)
1249{
1250 struct trinity_power_info *pi = trinity_get_pi(rdev);
1251
1252 ps->num_levels = 1;
1253 ps->nbps_flags = 0;
1254 ps->bapm_flags = 0;
1255 ps->levels[0] = pi->boot_pl;
1256}
1257
1258static u8 trinity_calculate_vce_wm(struct radeon_device *rdev, u32 sclk)
1259{
1260 if (sclk < 20000)
1261 return 1;
1262 return 0;
1263}
1264
1265static void trinity_construct_boot_state(struct radeon_device *rdev)
1266{
1267 struct trinity_power_info *pi = trinity_get_pi(rdev);
1268
1269 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
1270 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
1271 pi->boot_pl.ds_divider_index = 0;
1272 pi->boot_pl.ss_divider_index = 0;
1273 pi->boot_pl.allow_gnb_slow = 1;
1274 pi->boot_pl.force_nbp_state = 0;
1275 pi->boot_pl.display_wm = 0;
1276 pi->boot_pl.vce_wm = 0;
1277 pi->current_ps.num_levels = 1;
1278 pi->current_ps.levels[0] = pi->boot_pl;
1279}
1280
1281static u8 trinity_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1282 u32 sclk, u32 min_sclk_in_sr)
1283{
1284 struct trinity_power_info *pi = trinity_get_pi(rdev);
1285 u32 i;
1286 u32 temp;
1287 u32 min = (min_sclk_in_sr > TRINITY_MINIMUM_ENGINE_CLOCK) ?
1288 min_sclk_in_sr : TRINITY_MINIMUM_ENGINE_CLOCK;
1289
1290 if (sclk < min)
1291 return 0;
1292
1293 if (!pi->enable_sclk_ds)
1294 return 0;
1295
1296 for (i = TRINITY_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
1297 temp = sclk / sumo_get_sleep_divider_from_id(i);
1298 if (temp >= min || i == 0)
1299 break;
1300 }
1301
1302 return (u8)i;
1303}
1304
1305static u32 trinity_get_valid_engine_clock(struct radeon_device *rdev,
1306 u32 lower_limit)
1307{
1308 struct trinity_power_info *pi = trinity_get_pi(rdev);
1309 u32 i;
1310
1311 for (i = 0; i < pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries; i++) {
1312 if (pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency >= lower_limit)
1313 return pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency;
1314 }
1315
1316 if (i == pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries)
1317 DRM_ERROR("engine clock out of range!");
1318
1319 return 0;
1320}
1321
1322static void trinity_patch_thermal_state(struct radeon_device *rdev,
1323 struct trinity_ps *ps,
1324 struct trinity_ps *current_ps)
1325{
1326 struct trinity_power_info *pi = trinity_get_pi(rdev);
1327 u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */
1328 u32 current_vddc;
1329 u32 current_sclk;
1330 u32 current_index = 0;
1331
1332 if (current_ps) {
1333 current_vddc = current_ps->levels[current_index].vddc_index;
1334 current_sclk = current_ps->levels[current_index].sclk;
1335 } else {
1336 current_vddc = pi->boot_pl.vddc_index;
1337 current_sclk = pi->boot_pl.sclk;
1338 }
1339
1340 ps->levels[0].vddc_index = current_vddc;
1341
1342 if (ps->levels[0].sclk > current_sclk)
1343 ps->levels[0].sclk = current_sclk;
1344
1345 ps->levels[0].ds_divider_index =
1346 trinity_get_sleep_divider_id_from_clock(rdev, ps->levels[0].sclk, sclk_in_sr);
1347 ps->levels[0].ss_divider_index = ps->levels[0].ds_divider_index;
1348 ps->levels[0].allow_gnb_slow = 1;
1349 ps->levels[0].force_nbp_state = 0;
1350 ps->levels[0].display_wm = 0;
1351 ps->levels[0].vce_wm =
1352 trinity_calculate_vce_wm(rdev, ps->levels[0].sclk);
1353}
1354
1355static u8 trinity_calculate_display_wm(struct radeon_device *rdev,
1356 struct trinity_ps *ps, u32 index)
1357{
1358 if (ps == NULL || ps->num_levels <= 1)
1359 return 0;
1360 else if (ps->num_levels == 2) {
1361 if (index == 0)
1362 return 0;
1363 else
1364 return 1;
1365 } else {
1366 if (index == 0)
1367 return 0;
1368 else if (ps->levels[index].sclk < 30000)
1369 return 0;
1370 else
1371 return 1;
1372 }
1373}
1374
1375static u32 trinity_get_uvd_clock_index(struct radeon_device *rdev,
1376 struct radeon_ps *rps)
1377{
1378 struct trinity_power_info *pi = trinity_get_pi(rdev);
1379 u32 i = 0;
1380
1381 for (i = 0; i < 4; i++) {
1382 if ((rps->vclk == pi->sys_info.uvd_clock_table_entries[i].vclk) &&
1383 (rps->dclk == pi->sys_info.uvd_clock_table_entries[i].dclk))
1384 break;
1385 }
1386
1387 if (i >= 4) {
1388 DRM_ERROR("UVD clock index not found!\n");
1389 i = 3;
1390 }
1391 return i;
1392}
1393
1394static void trinity_adjust_uvd_state(struct radeon_device *rdev,
1395 struct radeon_ps *rps)
1396{
1397 struct trinity_ps *ps = trinity_get_ps(rps);
1398 struct trinity_power_info *pi = trinity_get_pi(rdev);
1399 u32 high_index = 0;
1400 u32 low_index = 0;
1401
1402 if (pi->uvd_dpm && r600_is_uvd_state(rps->class, rps->class2)) {
1403 high_index = trinity_get_uvd_clock_index(rdev, rps);
1404
1405 switch(high_index) {
1406 case 3:
1407 case 2:
1408 low_index = 1;
1409 break;
1410 case 1:
1411 case 0:
1412 default:
1413 low_index = 0;
1414 break;
1415 }
1416
1417 ps->vclk_low_divider =
1418 pi->sys_info.uvd_clock_table_entries[high_index].vclk_did;
1419 ps->dclk_low_divider =
1420 pi->sys_info.uvd_clock_table_entries[high_index].dclk_did;
1421 ps->vclk_high_divider =
1422 pi->sys_info.uvd_clock_table_entries[low_index].vclk_did;
1423 ps->dclk_high_divider =
1424 pi->sys_info.uvd_clock_table_entries[low_index].dclk_did;
1425 }
1426}
1427
1428
1429
1430static void trinity_apply_state_adjust_rules(struct radeon_device *rdev,
1431 struct radeon_ps *new_rps,
1432 struct radeon_ps *old_rps)
1433{
1434 struct trinity_ps *ps = trinity_get_ps(new_rps);
1435 struct trinity_ps *current_ps = trinity_get_ps(old_rps);
1436 struct trinity_power_info *pi = trinity_get_pi(rdev);
1437 u32 min_voltage = 0; /* ??? */
1438 u32 min_sclk = pi->sys_info.min_sclk; /* XXX check against disp reqs */
1439 u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */
1440 u32 i;
1441 bool force_high;
1442 u32 num_active_displays = rdev->pm.dpm.new_active_crtc_count;
1443
1444 if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1445 return trinity_patch_thermal_state(rdev, ps, current_ps);
1446
1447 trinity_adjust_uvd_state(rdev, new_rps);
1448
1449 for (i = 0; i < ps->num_levels; i++) {
1450 if (ps->levels[i].vddc_index < min_voltage)
1451 ps->levels[i].vddc_index = min_voltage;
1452
1453 if (ps->levels[i].sclk < min_sclk)
1454 ps->levels[i].sclk =
1455 trinity_get_valid_engine_clock(rdev, min_sclk);
1456
1457 ps->levels[i].ds_divider_index =
1458 sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[i].sclk, sclk_in_sr);
1459
1460 ps->levels[i].ss_divider_index = ps->levels[i].ds_divider_index;
1461
1462 ps->levels[i].allow_gnb_slow = 1;
1463 ps->levels[i].force_nbp_state = 0;
1464 ps->levels[i].display_wm =
1465 trinity_calculate_display_wm(rdev, ps, i);
1466 ps->levels[i].vce_wm =
1467 trinity_calculate_vce_wm(rdev, ps->levels[0].sclk);
1468 }
1469
1470 if ((new_rps->class & (ATOM_PPLIB_CLASSIFICATION_HDSTATE | ATOM_PPLIB_CLASSIFICATION_SDSTATE)) ||
1471 ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY))
1472 ps->bapm_flags |= TRINITY_POWERSTATE_FLAGS_BAPM_DISABLE;
1473
1474 if (pi->sys_info.nb_dpm_enable) {
1475 ps->Dpm0PgNbPsLo = 0x1;
1476 ps->Dpm0PgNbPsHi = 0x0;
1477 ps->DpmXNbPsLo = 0x2;
1478 ps->DpmXNbPsHi = 0x1;
1479
1480 if ((new_rps->class & (ATOM_PPLIB_CLASSIFICATION_HDSTATE | ATOM_PPLIB_CLASSIFICATION_SDSTATE)) ||
1481 ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)) {
1482 force_high = ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) ||
1483 ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) &&
1484 (pi->sys_info.uma_channel_number == 1)));
1485 force_high = (num_active_displays >= 3) || force_high;
1486 ps->Dpm0PgNbPsLo = force_high ? 0x2 : 0x3;
1487 ps->Dpm0PgNbPsHi = 0x1;
1488 ps->DpmXNbPsLo = force_high ? 0x2 : 0x3;
1489 ps->DpmXNbPsHi = 0x2;
1490 ps->levels[ps->num_levels - 1].allow_gnb_slow = 0;
1491 }
1492 }
1493}
1494
1495static void trinity_cleanup_asic(struct radeon_device *rdev)
1496{
1497 sumo_take_smu_control(rdev, false);
1498}
1499
1500#if 0
1501static void trinity_pre_display_configuration_change(struct radeon_device *rdev)
1502{
1503 struct trinity_power_info *pi = trinity_get_pi(rdev);
1504
1505 if (pi->voltage_drop_in_dce)
1506 trinity_dce_enable_voltage_adjustment(rdev, false);
1507}
1508#endif
1509
1510static void trinity_add_dccac_value(struct radeon_device *rdev)
1511{
1512 u32 gpu_cac_avrg_cntl_window_size;
1513 u32 num_active_displays = rdev->pm.dpm.new_active_crtc_count;
1514 u64 disp_clk = rdev->clock.default_dispclk / 100;
1515 u32 dc_cac_value;
1516
1517 gpu_cac_avrg_cntl_window_size =
1518 (RREG32_SMC(GPU_CAC_AVRG_CNTL) & WINDOW_SIZE_MASK) >> WINDOW_SIZE_SHIFT;
1519
1520 dc_cac_value = (u32)((14213 * disp_clk * disp_clk * (u64)num_active_displays) >>
1521 (32 - gpu_cac_avrg_cntl_window_size));
1522
1523 WREG32_SMC(DC_CAC_VALUE, dc_cac_value);
1524}
1525
1526void trinity_dpm_display_configuration_changed(struct radeon_device *rdev)
1527{
1528 struct trinity_power_info *pi = trinity_get_pi(rdev);
1529
1530 if (pi->voltage_drop_in_dce)
1531 trinity_dce_enable_voltage_adjustment(rdev, true);
1532 trinity_add_dccac_value(rdev);
1533}
1534
1535union power_info {
1536 struct _ATOM_POWERPLAY_INFO info;
1537 struct _ATOM_POWERPLAY_INFO_V2 info_2;
1538 struct _ATOM_POWERPLAY_INFO_V3 info_3;
1539 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
1540 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
1541 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
1542};
1543
1544union pplib_clock_info {
1545 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
1546 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
1547 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
1548 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
1549};
1550
1551union pplib_power_state {
1552 struct _ATOM_PPLIB_STATE v1;
1553 struct _ATOM_PPLIB_STATE_V2 v2;
1554};
1555
1556static void trinity_parse_pplib_non_clock_info(struct radeon_device *rdev,
1557 struct radeon_ps *rps,
1558 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
1559 u8 table_rev)
1560{
1561 struct trinity_ps *ps = trinity_get_ps(rps);
1562
1563 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
1564 rps->class = le16_to_cpu(non_clock_info->usClassification);
1565 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
1566
1567 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
1568 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
1569 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
1570 } else {
1571 rps->vclk = 0;
1572 rps->dclk = 0;
1573 }
1574
1575 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
1576 rdev->pm.dpm.boot_ps = rps;
1577 trinity_patch_boot_state(rdev, ps);
1578 }
1579 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
1580 rdev->pm.dpm.uvd_ps = rps;
1581}
1582
1583static void trinity_parse_pplib_clock_info(struct radeon_device *rdev,
1584 struct radeon_ps *rps, int index,
1585 union pplib_clock_info *clock_info)
1586{
1587 struct trinity_power_info *pi = trinity_get_pi(rdev);
1588 struct trinity_ps *ps = trinity_get_ps(rps);
1589 struct trinity_pl *pl = &ps->levels[index];
1590 u32 sclk;
1591
1592 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
1593 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
1594 pl->sclk = sclk;
1595 pl->vddc_index = clock_info->sumo.vddcIndex;
1596
1597 ps->num_levels = index + 1;
1598
1599 if (pi->enable_sclk_ds) {
1600 pl->ds_divider_index = 5;
1601 pl->ss_divider_index = 5;
1602 }
1603}
1604
1605static int trinity_parse_power_table(struct radeon_device *rdev)
1606{
1607 struct radeon_mode_info *mode_info = &rdev->mode_info;
1608 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
1609 union pplib_power_state *power_state;
1610 int i, j, k, non_clock_array_index, clock_array_index;
1611 union pplib_clock_info *clock_info;
1612 struct _StateArray *state_array;
1613 struct _ClockInfoArray *clock_info_array;
1614 struct _NonClockInfoArray *non_clock_info_array;
1615 union power_info *power_info;
1616 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
1617 u16 data_offset;
1618 u8 frev, crev;
1619 u8 *power_state_offset;
1620 struct sumo_ps *ps;
1621
1622 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
1623 &frev, &crev, &data_offset))
1624 return -EINVAL;
1625 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1626
1627 state_array = (struct _StateArray *)
1628 (mode_info->atom_context->bios + data_offset +
1629 le16_to_cpu(power_info->pplib.usStateArrayOffset));
1630 clock_info_array = (struct _ClockInfoArray *)
1631 (mode_info->atom_context->bios + data_offset +
1632 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
1633 non_clock_info_array = (struct _NonClockInfoArray *)
1634 (mode_info->atom_context->bios + data_offset +
1635 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
1636
1637 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
1638 state_array->ucNumEntries, GFP_KERNEL);
1639 if (!rdev->pm.dpm.ps)
1640 return -ENOMEM;
1641 power_state_offset = (u8 *)state_array->states;
1642 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
1643 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
1644 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
1645 for (i = 0; i < state_array->ucNumEntries; i++) {
1646 power_state = (union pplib_power_state *)power_state_offset;
1647 non_clock_array_index = power_state->v2.nonClockInfoIndex;
1648 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
1649 &non_clock_info_array->nonClockInfo[non_clock_array_index];
1650 if (!rdev->pm.power_state[i].clock_info)
1651 return -EINVAL;
1652 ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
1653 if (ps == NULL) {
1654 kfree(rdev->pm.dpm.ps);
1655 return -ENOMEM;
1656 }
1657 rdev->pm.dpm.ps[i].ps_priv = ps;
1658 k = 0;
1659 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
1660 clock_array_index = power_state->v2.clockInfoIndex[j];
1661 if (clock_array_index >= clock_info_array->ucNumEntries)
1662 continue;
1663 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
1664 break;
1665 clock_info = (union pplib_clock_info *)
1666 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
1667 trinity_parse_pplib_clock_info(rdev,
1668 &rdev->pm.dpm.ps[i], k,
1669 clock_info);
1670 k++;
1671 }
1672 trinity_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
1673 non_clock_info,
1674 non_clock_info_array->ucEntrySize);
1675 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
1676 }
1677 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
1678 return 0;
1679}
1680
1681union igp_info {
1682 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
1683 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
1684 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
1685 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
1686 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
1687};
1688
1689static u32 trinity_convert_did_to_freq(struct radeon_device *rdev, u8 did)
1690{
1691 struct trinity_power_info *pi = trinity_get_pi(rdev);
1692 u32 divider;
1693
1694 if (did >= 8 && did <= 0x3f)
1695 divider = did * 25;
1696 else if (did > 0x3f && did <= 0x5f)
1697 divider = (did - 64) * 50 + 1600;
1698 else if (did > 0x5f && did <= 0x7e)
1699 divider = (did - 96) * 100 + 3200;
1700 else if (did == 0x7f)
1701 divider = 128 * 100;
1702 else
1703 return 10000;
1704
1705 return ((pi->sys_info.dentist_vco_freq * 100) + (divider - 1)) / divider;
1706}
1707
1708static int trinity_parse_sys_info_table(struct radeon_device *rdev)
1709{
1710 struct trinity_power_info *pi = trinity_get_pi(rdev);
1711 struct radeon_mode_info *mode_info = &rdev->mode_info;
1712 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
1713 union igp_info *igp_info;
1714 u8 frev, crev;
1715 u16 data_offset;
1716 int i;
1717
1718 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1719 &frev, &crev, &data_offset)) {
1720 igp_info = (union igp_info *)(mode_info->atom_context->bios +
1721 data_offset);
1722
1723 if (crev != 7) {
1724 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
1725 return -EINVAL;
1726 }
1727 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_7.ulBootUpEngineClock);
1728 pi->sys_info.min_sclk = le32_to_cpu(igp_info->info_7.ulMinEngineClock);
1729 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_7.ulBootUpUMAClock);
1730 pi->sys_info.dentist_vco_freq = le32_to_cpu(igp_info->info_7.ulDentistVCOFreq);
1731 pi->sys_info.bootup_nb_voltage_index =
1732 le16_to_cpu(igp_info->info_7.usBootUpNBVoltage);
1733 if (igp_info->info_7.ucHtcTmpLmt == 0)
1734 pi->sys_info.htc_tmp_lmt = 203;
1735 else
1736 pi->sys_info.htc_tmp_lmt = igp_info->info_7.ucHtcTmpLmt;
1737 if (igp_info->info_7.ucHtcHystLmt == 0)
1738 pi->sys_info.htc_hyst_lmt = 5;
1739 else
1740 pi->sys_info.htc_hyst_lmt = igp_info->info_7.ucHtcHystLmt;
1741 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
1742 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
1743 }
1744
1745 if (pi->enable_nbps_policy)
1746 pi->sys_info.nb_dpm_enable = igp_info->info_7.ucNBDPMEnable;
1747 else
1748 pi->sys_info.nb_dpm_enable = 0;
1749
1750 for (i = 0; i < TRINITY_NUM_NBPSTATES; i++) {
1751 pi->sys_info.nbp_mclk[i] = le32_to_cpu(igp_info->info_7.ulNbpStateMemclkFreq[i]);
1752 pi->sys_info.nbp_nclk[i] = le32_to_cpu(igp_info->info_7.ulNbpStateNClkFreq[i]);
1753 }
1754
1755 pi->sys_info.nbp_voltage_index[0] = le16_to_cpu(igp_info->info_7.usNBP0Voltage);
1756 pi->sys_info.nbp_voltage_index[1] = le16_to_cpu(igp_info->info_7.usNBP1Voltage);
1757 pi->sys_info.nbp_voltage_index[2] = le16_to_cpu(igp_info->info_7.usNBP2Voltage);
1758 pi->sys_info.nbp_voltage_index[3] = le16_to_cpu(igp_info->info_7.usNBP3Voltage);
1759
1760 if (!pi->sys_info.nb_dpm_enable) {
1761 for (i = 1; i < TRINITY_NUM_NBPSTATES; i++) {
1762 pi->sys_info.nbp_mclk[i] = pi->sys_info.nbp_mclk[0];
1763 pi->sys_info.nbp_nclk[i] = pi->sys_info.nbp_nclk[0];
1764 pi->sys_info.nbp_voltage_index[i] = pi->sys_info.nbp_voltage_index[0];
1765 }
1766 }
1767
1768 pi->sys_info.uma_channel_number = igp_info->info_7.ucUMAChannelNumber;
1769
1770 sumo_construct_sclk_voltage_mapping_table(rdev,
1771 &pi->sys_info.sclk_voltage_mapping_table,
1772 igp_info->info_7.sAvail_SCLK);
1773 sumo_construct_vid_mapping_table(rdev, &pi->sys_info.vid_mapping_table,
1774 igp_info->info_7.sAvail_SCLK);
1775
1776 pi->sys_info.uvd_clock_table_entries[0].vclk_did =
1777 igp_info->info_7.ucDPMState0VclkFid;
1778 pi->sys_info.uvd_clock_table_entries[1].vclk_did =
1779 igp_info->info_7.ucDPMState1VclkFid;
1780 pi->sys_info.uvd_clock_table_entries[2].vclk_did =
1781 igp_info->info_7.ucDPMState2VclkFid;
1782 pi->sys_info.uvd_clock_table_entries[3].vclk_did =
1783 igp_info->info_7.ucDPMState3VclkFid;
1784
1785 pi->sys_info.uvd_clock_table_entries[0].dclk_did =
1786 igp_info->info_7.ucDPMState0DclkFid;
1787 pi->sys_info.uvd_clock_table_entries[1].dclk_did =
1788 igp_info->info_7.ucDPMState1DclkFid;
1789 pi->sys_info.uvd_clock_table_entries[2].dclk_did =
1790 igp_info->info_7.ucDPMState2DclkFid;
1791 pi->sys_info.uvd_clock_table_entries[3].dclk_did =
1792 igp_info->info_7.ucDPMState3DclkFid;
1793
1794 for (i = 0; i < 4; i++) {
1795 pi->sys_info.uvd_clock_table_entries[i].vclk =
1796 trinity_convert_did_to_freq(rdev,
1797 pi->sys_info.uvd_clock_table_entries[i].vclk_did);
1798 pi->sys_info.uvd_clock_table_entries[i].dclk =
1799 trinity_convert_did_to_freq(rdev,
1800 pi->sys_info.uvd_clock_table_entries[i].dclk_did);
1801 }
1802
1803
1804
1805 }
1806 return 0;
1807}
1808
1809int trinity_dpm_init(struct radeon_device *rdev)
1810{
1811 struct trinity_power_info *pi;
1812 int ret, i;
1813
1814 pi = kzalloc(sizeof(struct trinity_power_info), GFP_KERNEL);
1815 if (pi == NULL)
1816 return -ENOMEM;
1817 rdev->pm.dpm.priv = pi;
1818
1819 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1820 pi->at[i] = TRINITY_AT_DFLT;
1821
1822 pi->enable_nbps_policy = true;
1823 pi->enable_sclk_ds = true;
1824 pi->enable_gfx_power_gating = true;
1825 pi->enable_gfx_clock_gating = true;
1826 pi->enable_mg_clock_gating = true;
1827 pi->enable_gfx_dynamic_mgpg = true; /* ??? */
1828 pi->override_dynamic_mgpg = true;
1829 pi->enable_auto_thermal_throttling = true;
1830 pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */
1831 pi->uvd_dpm = true; /* ??? */
1832
1833 ret = trinity_parse_sys_info_table(rdev);
1834 if (ret)
1835 return ret;
1836
1837 trinity_construct_boot_state(rdev);
1838
1839 ret = trinity_parse_power_table(rdev);
1840 if (ret)
1841 return ret;
1842
1843 pi->thermal_auto_throttling = pi->sys_info.htc_tmp_lmt;
1844 pi->enable_dpm = true;
1845
1846 return 0;
1847}
1848
1849void trinity_dpm_print_power_state(struct radeon_device *rdev,
1850 struct radeon_ps *rps)
1851{
1852 int i;
1853 struct trinity_ps *ps = trinity_get_ps(rps);
1854
1855 r600_dpm_print_class_info(rps->class, rps->class2);
1856 r600_dpm_print_cap_info(rps->caps);
1857 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
1858 for (i = 0; i < ps->num_levels; i++) {
1859 struct trinity_pl *pl = &ps->levels[i];
1860 printk("\t\tpower level %d sclk: %u vddc: %u\n",
1861 i, pl->sclk,
1862 trinity_convert_voltage_index_to_value(rdev, pl->vddc_index));
1863 }
1864 r600_dpm_print_ps_status(rdev, rps);
1865}
1866
1867void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
1868 struct seq_file *m)
1869{
1870 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
1871 struct trinity_ps *ps = trinity_get_ps(rps);
1872 struct trinity_pl *pl;
1873 u32 current_index =
1874 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_MASK) >>
1875 CURRENT_STATE_SHIFT;
1876
1877 if (current_index >= ps->num_levels) {
1878 seq_printf(m, "invalid dpm profile %d\n", current_index);
1879 } else {
1880 pl = &ps->levels[current_index];
1881 seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
1882 seq_printf(m, "power level %d sclk: %u vddc: %u\n",
1883 current_index, pl->sclk,
1884 trinity_convert_voltage_index_to_value(rdev, pl->vddc_index));
1885 }
1886}
1887
1888void trinity_dpm_fini(struct radeon_device *rdev)
1889{
1890 int i;
1891
1892 trinity_cleanup_asic(rdev); /* ??? */
1893
1894 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1895 kfree(rdev->pm.dpm.ps[i].ps_priv);
1896 }
1897 kfree(rdev->pm.dpm.ps);
1898 kfree(rdev->pm.dpm.priv);
1899}
1900
1901u32 trinity_dpm_get_sclk(struct radeon_device *rdev, bool low)
1902{
1903 struct trinity_power_info *pi = trinity_get_pi(rdev);
1904 struct trinity_ps *requested_state = trinity_get_ps(&pi->requested_rps);
1905
1906 if (low)
1907 return requested_state->levels[0].sclk;
1908 else
1909 return requested_state->levels[requested_state->num_levels - 1].sclk;
1910}
1911
1912u32 trinity_dpm_get_mclk(struct radeon_device *rdev, bool low)
1913{
1914 struct trinity_power_info *pi = trinity_get_pi(rdev);
1915
1916 return pi->sys_info.bootup_uma_clk;
1917}
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h
new file mode 100644
index 000000000000..c621b843aab5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/trinity_dpm.h
@@ -0,0 +1,131 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __TRINITY_DPM_H__
24#define __TRINITY_DPM_H__
25
26#include "sumo_dpm.h"
27
28#define TRINITY_SIZEOF_DPM_STATE_TABLE (SMU_SCLK_DPM_STATE_1_CNTL_0 - SMU_SCLK_DPM_STATE_0_CNTL_0)
29
30struct trinity_pl {
31 u32 sclk;
32 u8 vddc_index;
33 u8 ds_divider_index;
34 u8 ss_divider_index;
35 u8 allow_gnb_slow;
36 u8 force_nbp_state;
37 u8 display_wm;
38 u8 vce_wm;
39};
40
41#define TRINITY_POWERSTATE_FLAGS_NBPS_FORCEHIGH (1 << 0)
42#define TRINITY_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1 << 1)
43#define TRINITY_POWERSTATE_FLAGS_NBPS_LOCKTOLOW (1 << 2)
44
45#define TRINITY_POWERSTATE_FLAGS_BAPM_DISABLE (1 << 0)
46
47struct trinity_ps {
48 u32 num_levels;
49 struct trinity_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
50
51 u32 nbps_flags;
52 u32 bapm_flags;
53
54 u8 Dpm0PgNbPsLo;
55 u8 Dpm0PgNbPsHi;
56 u8 DpmXNbPsLo;
57 u8 DpmXNbPsHi;
58
59 u32 vclk_low_divider;
60 u32 vclk_high_divider;
61 u32 dclk_low_divider;
62 u32 dclk_high_divider;
63};
64
65#define TRINITY_NUM_NBPSTATES 4
66
67struct trinity_uvd_clock_table_entry
68{
69 u32 vclk;
70 u32 dclk;
71 u8 vclk_did;
72 u8 dclk_did;
73 u8 rsv[2];
74};
75
76struct trinity_sys_info {
77 u32 bootup_uma_clk;
78 u32 bootup_sclk;
79 u32 min_sclk;
80 u32 dentist_vco_freq;
81 u32 nb_dpm_enable;
82 u32 nbp_mclk[TRINITY_NUM_NBPSTATES];
83 u32 nbp_nclk[TRINITY_NUM_NBPSTATES];
84 u16 nbp_voltage_index[TRINITY_NUM_NBPSTATES];
85 u16 bootup_nb_voltage_index;
86 u8 htc_tmp_lmt;
87 u8 htc_hyst_lmt;
88 struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
89 struct sumo_vid_mapping_table vid_mapping_table;
90 u32 uma_channel_number;
91 struct trinity_uvd_clock_table_entry uvd_clock_table_entries[4];
92};
93
94struct trinity_power_info {
95 u32 at[SUMO_MAX_HARDWARE_POWERLEVELS];
96 u32 dpm_interval;
97 u32 thermal_auto_throttling;
98 struct trinity_sys_info sys_info;
99 struct trinity_pl boot_pl;
100 u32 min_sclk_did;
101 bool enable_nbps_policy;
102 bool voltage_drop_in_dce;
103 bool override_dynamic_mgpg;
104 bool enable_gfx_clock_gating;
105 bool enable_gfx_power_gating;
106 bool enable_mg_clock_gating;
107 bool enable_gfx_dynamic_mgpg;
108 bool enable_auto_thermal_throttling;
109 bool enable_dpm;
110 bool enable_sclk_ds;
111 bool uvd_dpm;
112 struct radeon_ps current_rps;
113 struct trinity_ps current_ps;
114 struct radeon_ps requested_rps;
115 struct trinity_ps requested_ps;
116};
117
118#define TRINITY_AT_DFLT 30
119
120/* trinity_smc.c */
121int trinity_dpm_config(struct radeon_device *rdev, bool enable);
122int trinity_uvd_dpm_config(struct radeon_device *rdev);
123int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
124int trinity_dpm_no_forced_level(struct radeon_device *rdev);
125int trinity_dce_enable_voltage_adjustment(struct radeon_device *rdev,
126 bool enable);
127int trinity_gfx_dynamic_mgpg_config(struct radeon_device *rdev);
128void trinity_acquire_mutex(struct radeon_device *rdev);
129void trinity_release_mutex(struct radeon_device *rdev);
130
131#endif
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
new file mode 100644
index 000000000000..85f86a29513c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "trinityd.h"
27#include "trinity_dpm.h"
28#include "ppsmc.h"
29
30struct trinity_ps *trinity_get_ps(struct radeon_ps *rps);
31struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev);
32
33static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
34{
35 int i;
36 u32 v = 0;
37
38 WREG32(SMC_MESSAGE_0, id);
39 for (i = 0; i < rdev->usec_timeout; i++) {
40 if (RREG32(SMC_RESP_0) != 0)
41 break;
42 udelay(1);
43 }
44 v = RREG32(SMC_RESP_0);
45
46 if (v != 1) {
47 if (v == 0xFF) {
48 DRM_ERROR("SMC failed to handle the message!\n");
49 return -EINVAL;
50 } else if (v == 0xFE) {
51 DRM_ERROR("Unknown SMC message!\n");
52 return -EINVAL;
53 }
54 }
55
56 return 0;
57}
58
59int trinity_dpm_config(struct radeon_device *rdev, bool enable)
60{
61 if (enable)
62 WREG32_SMC(SMU_SCRATCH0, 1);
63 else
64 WREG32_SMC(SMU_SCRATCH0, 0);
65
66 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Config);
67}
68
69int trinity_dpm_force_state(struct radeon_device *rdev, u32 n)
70{
71 WREG32_SMC(SMU_SCRATCH0, n);
72
73 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DPM_ForceState);
74}
75
76int trinity_uvd_dpm_config(struct radeon_device *rdev)
77{
78 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_UVD_DPM_Config);
79}
80
81int trinity_dpm_no_forced_level(struct radeon_device *rdev)
82{
83 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
84}
85
86int trinity_dce_enable_voltage_adjustment(struct radeon_device *rdev,
87 bool enable)
88{
89 if (enable)
90 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DCE_AllowVoltageAdjustment);
91 else
92 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DCE_RemoveVoltageAdjustment);
93}
94
95int trinity_gfx_dynamic_mgpg_config(struct radeon_device *rdev)
96{
97 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_PG_SIMD_Config);
98}
99
100void trinity_acquire_mutex(struct radeon_device *rdev)
101{
102 int i;
103
104 WREG32(SMC_INT_REQ, 1);
105 for (i = 0; i < rdev->usec_timeout; i++) {
106 if ((RREG32(SMC_INT_REQ) & 0xffff) == 1)
107 break;
108 udelay(1);
109 }
110}
111
112void trinity_release_mutex(struct radeon_device *rdev)
113{
114 WREG32(SMC_INT_REQ, 0);
115}
diff --git a/drivers/gpu/drm/radeon/trinityd.h b/drivers/gpu/drm/radeon/trinityd.h
new file mode 100644
index 000000000000..fd32e2771755
--- /dev/null
+++ b/drivers/gpu/drm/radeon/trinityd.h
@@ -0,0 +1,228 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef _TRINITYD_H_
25#define _TRINITYD_H_
26
27/* pm registers */
28
29/* cg */
30#define CG_CGTT_LOCAL_0 0x0
31#define CG_CGTT_LOCAL_1 0x1
32
33/* smc */
34#define SMU_SCLK_DPM_STATE_0_CNTL_0 0x1f000
35# define STATE_VALID(x) ((x) << 0)
36# define STATE_VALID_MASK (0xff << 0)
37# define STATE_VALID_SHIFT 0
38# define CLK_DIVIDER(x) ((x) << 8)
39# define CLK_DIVIDER_MASK (0xff << 8)
40# define CLK_DIVIDER_SHIFT 8
41# define VID(x) ((x) << 16)
42# define VID_MASK (0xff << 16)
43# define VID_SHIFT 16
44# define LVRT(x) ((x) << 24)
45# define LVRT_MASK (0xff << 24)
46# define LVRT_SHIFT 24
47#define SMU_SCLK_DPM_STATE_0_CNTL_1 0x1f004
48# define DS_DIV(x) ((x) << 0)
49# define DS_DIV_MASK (0xff << 0)
50# define DS_DIV_SHIFT 0
51# define DS_SH_DIV(x) ((x) << 8)
52# define DS_SH_DIV_MASK (0xff << 8)
53# define DS_SH_DIV_SHIFT 8
54# define DISPLAY_WM(x) ((x) << 16)
55# define DISPLAY_WM_MASK (0xff << 16)
56# define DISPLAY_WM_SHIFT 16
57# define VCE_WM(x) ((x) << 24)
58# define VCE_WM_MASK (0xff << 24)
59# define VCE_WM_SHIFT 24
60
61#define SMU_SCLK_DPM_STATE_0_CNTL_3 0x1f00c
62# define GNB_SLOW(x) ((x) << 0)
63# define GNB_SLOW_MASK (0xff << 0)
64# define GNB_SLOW_SHIFT 0
65# define FORCE_NBPS1(x) ((x) << 8)
66# define FORCE_NBPS1_MASK (0xff << 8)
67# define FORCE_NBPS1_SHIFT 8
68#define SMU_SCLK_DPM_STATE_0_AT 0x1f010
69# define AT(x) ((x) << 0)
70# define AT_MASK (0xff << 0)
71# define AT_SHIFT 0
72
73#define SMU_SCLK_DPM_STATE_0_PG_CNTL 0x1f014
74# define PD_SCLK_DIVIDER(x) ((x) << 16)
75# define PD_SCLK_DIVIDER_MASK (0xff << 16)
76# define PD_SCLK_DIVIDER_SHIFT 16
77
78#define SMU_SCLK_DPM_STATE_1_CNTL_0 0x1f020
79
80#define SMU_SCLK_DPM_CNTL 0x1f100
81# define SCLK_DPM_EN(x) ((x) << 0)
82# define SCLK_DPM_EN_MASK (0xff << 0)
83# define SCLK_DPM_EN_SHIFT 0
84# define SCLK_DPM_BOOT_STATE(x) ((x) << 16)
85# define SCLK_DPM_BOOT_STATE_MASK (0xff << 16)
86# define SCLK_DPM_BOOT_STATE_SHIFT 16
87# define VOLTAGE_CHG_EN(x) ((x) << 24)
88# define VOLTAGE_CHG_EN_MASK (0xff << 24)
89# define VOLTAGE_CHG_EN_SHIFT 24
90
91#define SMU_SCLK_DPM_TT_CNTL 0x1f108
92# define SCLK_TT_EN(x) ((x) << 0)
93# define SCLK_TT_EN_MASK (0xff << 0)
94# define SCLK_TT_EN_SHIFT 0
95#define SMU_SCLK_DPM_TTT 0x1f10c
96# define LT(x) ((x) << 0)
97# define LT_MASK (0xffff << 0)
98# define LT_SHIFT 0
99# define HT(x) ((x) << 16)
100# define HT_MASK (0xffff << 16)
101# define HT_SHIFT 16
102
103#define SMU_UVD_DPM_STATES 0x1f1a0
104#define SMU_UVD_DPM_CNTL 0x1f1a4
105
106#define SMU_S_PG_CNTL 0x1f118
107# define DS_PG_EN(x) ((x) << 16)
108# define DS_PG_EN_MASK (0xff << 16)
109# define DS_PG_EN_SHIFT 16
110
111#define GFX_POWER_GATING_CNTL 0x1f38c
112# define PDS_DIV(x) ((x) << 0)
113# define PDS_DIV_MASK (0xff << 0)
114# define PDS_DIV_SHIFT 0
115# define SSSD(x) ((x) << 8)
116# define SSSD_MASK (0xff << 8)
117# define SSSD_SHIFT 8
118
119#define PM_CONFIG 0x1f428
120# define SVI_Mode (1 << 29)
121
122#define PM_I_CNTL_1 0x1f464
123# define SCLK_DPM(x) ((x) << 0)
124# define SCLK_DPM_MASK (0xff << 0)
125# define SCLK_DPM_SHIFT 0
126# define DS_PG_CNTL(x) ((x) << 16)
127# define DS_PG_CNTL_MASK (0xff << 16)
128# define DS_PG_CNTL_SHIFT 16
129#define PM_TP 0x1f468
130
131#define NB_PSTATE_CONFIG 0x1f5f8
132# define Dpm0PgNbPsLo(x) ((x) << 0)
133# define Dpm0PgNbPsLo_MASK (3 << 0)
134# define Dpm0PgNbPsLo_SHIFT 0
135# define Dpm0PgNbPsHi(x) ((x) << 2)
136# define Dpm0PgNbPsHi_MASK (3 << 2)
137# define Dpm0PgNbPsHi_SHIFT 2
138# define DpmXNbPsLo(x) ((x) << 4)
139# define DpmXNbPsLo_MASK (3 << 4)
140# define DpmXNbPsLo_SHIFT 4
141# define DpmXNbPsHi(x) ((x) << 6)
142# define DpmXNbPsHi_MASK (3 << 6)
143# define DpmXNbPsHi_SHIFT 6
144
145#define DC_CAC_VALUE 0x1f908
146
147#define GPU_CAC_AVRG_CNTL 0x1f920
148# define WINDOW_SIZE(x) ((x) << 0)
149# define WINDOW_SIZE_MASK (0xff << 0)
150# define WINDOW_SIZE_SHIFT 0
151
152#define CC_SMU_MISC_FUSES 0xe0001004
153# define MinSClkDid(x) ((x) << 2)
154# define MinSClkDid_MASK (0x7f << 2)
155# define MinSClkDid_SHIFT 2
156
157#define CC_SMU_TST_EFUSE1_MISC 0xe000101c
158# define RB_BACKEND_DISABLE(x) ((x) << 16)
159# define RB_BACKEND_DISABLE_MASK (3 << 16)
160# define RB_BACKEND_DISABLE_SHIFT 16
161
162#define SMU_SCRATCH_A 0xe0003024
163
164#define SMU_SCRATCH0 0xe0003040
165
166/* mmio */
167#define SMC_INT_REQ 0x220
168
169#define SMC_MESSAGE_0 0x22c
170#define SMC_RESP_0 0x230
171
172#define GENERAL_PWRMGT 0x670
173# define GLOBAL_PWRMGT_EN (1 << 0)
174
175#define SCLK_PWRMGT_CNTL 0x678
176# define DYN_PWR_DOWN_EN (1 << 2)
177# define RESET_BUSY_CNT (1 << 4)
178# define RESET_SCLK_CNT (1 << 5)
179# define DYN_GFX_CLK_OFF_EN (1 << 7)
180# define GFX_CLK_FORCE_ON (1 << 8)
181# define DYNAMIC_PM_EN (1 << 21)
182
183#define TARGET_AND_CURRENT_PROFILE_INDEX 0x684
184# define TARGET_STATE(x) ((x) << 0)
185# define TARGET_STATE_MASK (0xf << 0)
186# define TARGET_STATE_SHIFT 0
187# define CURRENT_STATE(x) ((x) << 4)
188# define CURRENT_STATE_MASK (0xf << 4)
189# define CURRENT_STATE_SHIFT 4
190
191#define CG_GIPOTS 0x6d8
192# define CG_GIPOT(x) ((x) << 16)
193# define CG_GIPOT_MASK (0xffff << 16)
194# define CG_GIPOT_SHIFT 16
195
196#define CG_PG_CTRL 0x6e0
197# define SP(x) ((x) << 0)
198# define SP_MASK (0xffff << 0)
199# define SP_SHIFT 0
200# define SU(x) ((x) << 16)
201# define SU_MASK (0xffff << 16)
202# define SU_SHIFT 16
203
204#define CG_MISC_REG 0x708
205
206#define CG_THERMAL_INT_CTRL 0x738
207# define DIG_THERM_INTH(x) ((x) << 0)
208# define DIG_THERM_INTH_MASK (0xff << 0)
209# define DIG_THERM_INTH_SHIFT 0
210# define DIG_THERM_INTL(x) ((x) << 8)
211# define DIG_THERM_INTL_MASK (0xff << 8)
212# define DIG_THERM_INTL_SHIFT 8
213# define THERM_INTH_MASK (1 << 24)
214# define THERM_INTL_MASK (1 << 25)
215
216#define CG_CG_VOLTAGE_CNTL 0x770
217# define EN (1 << 9)
218
219#define HW_REV 0x5564
220# define ATI_REV_ID_MASK (0xf << 28)
221# define ATI_REV_ID_SHIFT 28
222/* 0 = A0, 1 = A1, 2 = B0, 3 = C0, etc. */
223
224#define CGTS_SM_CTRL_REG 0x9150
225
226#define GB_ADDR_CONFIG 0x98f8
227
228#endif
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
new file mode 100644
index 000000000000..72887df8dd76
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -0,0 +1,9 @@
1config DRM_RCAR_DU
2 tristate "DRM Support for R-Car Display Unit"
3 depends on DRM && ARM
4 select DRM_KMS_HELPER
5 select DRM_KMS_CMA_HELPER
6 select DRM_GEM_CMA_HELPER
7 help
8 Choose this option if you have an R-Car chipset.
9 If M is selected the module will be called rcar-du-drm.
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
new file mode 100644
index 000000000000..7333c0094015
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -0,0 +1,8 @@
1rcar-du-drm-y := rcar_du_crtc.o \
2 rcar_du_drv.o \
3 rcar_du_kms.o \
4 rcar_du_lvds.o \
5 rcar_du_plane.o \
6 rcar_du_vga.o
7
8obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
new file mode 100644
index 000000000000..24183fb93592
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -0,0 +1,595 @@
1/*
2 * rcar_du_crtc.c -- R-Car Display Unit CRTCs
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/clk.h>
15#include <linux/mutex.h>
16
17#include <drm/drmP.h>
18#include <drm/drm_crtc.h>
19#include <drm/drm_crtc_helper.h>
20#include <drm/drm_fb_cma_helper.h>
21#include <drm/drm_gem_cma_helper.h>
22
23#include "rcar_du_crtc.h"
24#include "rcar_du_drv.h"
25#include "rcar_du_kms.h"
26#include "rcar_du_lvds.h"
27#include "rcar_du_plane.h"
28#include "rcar_du_regs.h"
29#include "rcar_du_vga.h"
30
31#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
32
33static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
34{
35 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
36
37 return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
38}
39
40static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
41{
42 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
43
44 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
45}
46
47static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
48{
49 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
50
51 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
52 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
53}
54
55static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
56{
57 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
58
59 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
60 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
61}
62
63static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg,
64 u32 clr, u32 set)
65{
66 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
67 u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
68
69 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set);
70}
71
72static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
73{
74 struct drm_crtc *crtc = &rcrtc->crtc;
75 struct rcar_du_device *rcdu = crtc->dev->dev_private;
76 const struct drm_display_mode *mode = &crtc->mode;
77 unsigned long clk;
78 u32 value;
79 u32 div;
80
81 /* Dot clock */
82 clk = clk_get_rate(rcdu->clock);
83 div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000);
84 div = clamp(div, 1U, 64U) - 1;
85
86 rcar_du_write(rcdu, rcrtc->index ? ESCR2 : ESCR,
87 ESCR_DCLKSEL_CLKS | div);
88 rcar_du_write(rcdu, rcrtc->index ? OTAR2 : OTAR, 0);
89
90 /* Signal polarities */
91 value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
92 | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
93 | DSMR_DIPM_DE;
94 rcar_du_crtc_write(rcrtc, DSMR, value);
95
96 /* Display timings */
97 rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19);
98 rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start +
99 mode->hdisplay - 19);
100 rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end -
101 mode->hsync_start - 1);
102 rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1);
103
104 rcar_du_crtc_write(rcrtc, VDSR, mode->vtotal - mode->vsync_end - 2);
105 rcar_du_crtc_write(rcrtc, VDER, mode->vtotal - mode->vsync_end +
106 mode->vdisplay - 2);
107 rcar_du_crtc_write(rcrtc, VSPR, mode->vtotal - mode->vsync_end +
108 mode->vsync_start - 1);
109 rcar_du_crtc_write(rcrtc, VCR, mode->vtotal - 1);
110
111 rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start);
112 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
113}
114
115static void rcar_du_crtc_set_routing(struct rcar_du_crtc *rcrtc)
116{
117 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
118 u32 dorcr = rcar_du_read(rcdu, DORCR);
119
120 dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
121
122 /* Set the DU1 pins sources. Select CRTC 0 if explicitly requested and
123 * CRTC 1 in all other cases to avoid cloning CRTC 0 to DU0 and DU1 by
124 * default.
125 */
126 if (rcrtc->outputs & (1 << 1) && rcrtc->index == 0)
127 dorcr |= DORCR_PG2D_DS1;
128 else
129 dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
130
131 rcar_du_write(rcdu, DORCR, dorcr);
132}
133
134static void __rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
135{
136 rcar_du_write(rcdu, DSYSR,
137 (rcar_du_read(rcdu, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) |
138 (start ? DSYSR_DEN : DSYSR_DRES));
139}
140
141static void rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
142{
143 /* Many of the configuration bits are only updated when the display
144 * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
145 * of those bits could be pre-configured, but others (especially the
146 * bits related to plane assignment to display timing controllers) need
147 * to be modified at runtime.
148 *
149 * Restart the display controller if a start is requested. Sorry for the
150 * flicker. It should be possible to move most of the "DRES-update" bits
151 * setup to driver initialization time and minimize the number of cases
152 * when the display controller will have to be restarted.
153 */
154 if (start) {
155 if (rcdu->used_crtcs++ != 0)
156 __rcar_du_start_stop(rcdu, false);
157 __rcar_du_start_stop(rcdu, true);
158 } else {
159 if (--rcdu->used_crtcs == 0)
160 __rcar_du_start_stop(rcdu, false);
161 }
162}
163
164void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output)
165{
166 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
167
168 /* Store the route from the CRTC output to the DU output. The DU will be
169 * configured when starting the CRTC.
170 */
171 rcrtc->outputs |= 1 << output;
172}
173
174void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
175{
176 struct rcar_du_device *rcdu = crtc->dev->dev_private;
177 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
178 struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
179 unsigned int num_planes = 0;
180 unsigned int prio = 0;
181 unsigned int i;
182 u32 dptsr = 0;
183 u32 dspr = 0;
184
185 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
186 struct rcar_du_plane *plane = &rcdu->planes.planes[i];
187 unsigned int j;
188
189 if (plane->crtc != &rcrtc->crtc || !plane->enabled)
190 continue;
191
192 /* Insert the plane in the sorted planes array. */
193 for (j = num_planes++; j > 0; --j) {
194 if (planes[j-1]->zpos <= plane->zpos)
195 break;
196 planes[j] = planes[j-1];
197 }
198
199 planes[j] = plane;
200 prio += plane->format->planes * 4;
201 }
202
203 for (i = 0; i < num_planes; ++i) {
204 struct rcar_du_plane *plane = planes[i];
205 unsigned int index = plane->hwindex;
206
207 prio -= 4;
208 dspr |= (index + 1) << prio;
209 dptsr |= DPTSR_PnDK(index) | DPTSR_PnTS(index);
210
211 if (plane->format->planes == 2) {
212 index = (index + 1) % 8;
213
214 prio -= 4;
215 dspr |= (index + 1) << prio;
216 dptsr |= DPTSR_PnDK(index) | DPTSR_PnTS(index);
217 }
218 }
219
220 /* Select display timing and dot clock generator 2 for planes associated
221 * with superposition controller 2.
222 */
223 if (rcrtc->index) {
224 u32 value = rcar_du_read(rcdu, DPTSR);
225
226 /* The DPTSR register is updated when the display controller is
227 * stopped. We thus need to restart the DU. Once again, sorry
228 * for the flicker. One way to mitigate the issue would be to
229 * pre-associate planes with CRTCs (either with a fixed 4/4
230 * split, or through a module parameter). Flicker would then
231 * occur only if we need to break the pre-association.
232 */
233 if (value != dptsr) {
234 rcar_du_write(rcdu, DPTSR, dptsr);
235 if (rcdu->used_crtcs) {
236 __rcar_du_start_stop(rcdu, false);
237 __rcar_du_start_stop(rcdu, true);
238 }
239 }
240 }
241
242 rcar_du_write(rcdu, rcrtc->index ? DS2PR : DS1PR, dspr);
243}
244
245static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
246{
247 struct drm_crtc *crtc = &rcrtc->crtc;
248 struct rcar_du_device *rcdu = crtc->dev->dev_private;
249 unsigned int i;
250
251 if (rcrtc->started)
252 return;
253
254 if (WARN_ON(rcrtc->plane->format == NULL))
255 return;
256
257 /* Set display off and background to black */
258 rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
259 rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));
260
261 /* Configure display timings and output routing */
262 rcar_du_crtc_set_display_timing(rcrtc);
263 rcar_du_crtc_set_routing(rcrtc);
264
265 mutex_lock(&rcdu->planes.lock);
266 rcrtc->plane->enabled = true;
267 rcar_du_crtc_update_planes(crtc);
268 mutex_unlock(&rcdu->planes.lock);
269
270 /* Setup planes. */
271 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
272 struct rcar_du_plane *plane = &rcdu->planes.planes[i];
273
274 if (plane->crtc != crtc || !plane->enabled)
275 continue;
276
277 rcar_du_plane_setup(plane);
278 }
279
280 /* Select master sync mode. This enables display operation in master
281 * sync mode (with the HSYNC and VSYNC signals configured as outputs and
282 * actively driven).
283 */
284 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_MASTER);
285
286 rcar_du_start_stop(rcdu, true);
287
288 rcrtc->started = true;
289}
290
291static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
292{
293 struct drm_crtc *crtc = &rcrtc->crtc;
294 struct rcar_du_device *rcdu = crtc->dev->dev_private;
295
296 if (!rcrtc->started)
297 return;
298
299 mutex_lock(&rcdu->planes.lock);
300 rcrtc->plane->enabled = false;
301 rcar_du_crtc_update_planes(crtc);
302 mutex_unlock(&rcdu->planes.lock);
303
304 /* Select switch sync mode. This stops display operation and configures
305 * the HSYNC and VSYNC signals as inputs.
306 */
307 rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH);
308
309 rcar_du_start_stop(rcdu, false);
310
311 rcrtc->started = false;
312}
313
314void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
315{
316 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
317
318 rcar_du_crtc_stop(rcrtc);
319 rcar_du_put(rcdu);
320}
321
322void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
323{
324 struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
325
326 if (rcrtc->dpms != DRM_MODE_DPMS_ON)
327 return;
328
329 rcar_du_get(rcdu);
330 rcar_du_crtc_start(rcrtc);
331}
332
333static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc)
334{
335 struct drm_crtc *crtc = &rcrtc->crtc;
336
337 rcar_du_plane_compute_base(rcrtc->plane, crtc->fb);
338 rcar_du_plane_update_base(rcrtc->plane);
339}
340
341static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode)
342{
343 struct rcar_du_device *rcdu = crtc->dev->dev_private;
344 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
345
346 if (rcrtc->dpms == mode)
347 return;
348
349 if (mode == DRM_MODE_DPMS_ON) {
350 rcar_du_get(rcdu);
351 rcar_du_crtc_start(rcrtc);
352 } else {
353 rcar_du_crtc_stop(rcrtc);
354 rcar_du_put(rcdu);
355 }
356
357 rcrtc->dpms = mode;
358}
359
360static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
361 const struct drm_display_mode *mode,
362 struct drm_display_mode *adjusted_mode)
363{
364 /* TODO Fixup modes */
365 return true;
366}
367
368static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc)
369{
370 struct rcar_du_device *rcdu = crtc->dev->dev_private;
371 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
372
373 /* We need to access the hardware during mode set, acquire a reference
374 * to the DU.
375 */
376 rcar_du_get(rcdu);
377
378 /* Stop the CRTC and release the plane. Force the DPMS mode to off as a
379 * result.
380 */
381 rcar_du_crtc_stop(rcrtc);
382 rcar_du_plane_release(rcrtc->plane);
383
384 rcrtc->dpms = DRM_MODE_DPMS_OFF;
385}
386
387static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
388 struct drm_display_mode *mode,
389 struct drm_display_mode *adjusted_mode,
390 int x, int y,
391 struct drm_framebuffer *old_fb)
392{
393 struct rcar_du_device *rcdu = crtc->dev->dev_private;
394 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
395 const struct rcar_du_format_info *format;
396 int ret;
397
398 format = rcar_du_format_info(crtc->fb->pixel_format);
399 if (format == NULL) {
400 dev_dbg(rcdu->dev, "mode_set: unsupported format %08x\n",
401 crtc->fb->pixel_format);
402 ret = -EINVAL;
403 goto error;
404 }
405
406 ret = rcar_du_plane_reserve(rcrtc->plane, format);
407 if (ret < 0)
408 goto error;
409
410 rcrtc->plane->format = format;
411 rcrtc->plane->pitch = crtc->fb->pitches[0];
412
413 rcrtc->plane->src_x = x;
414 rcrtc->plane->src_y = y;
415 rcrtc->plane->width = mode->hdisplay;
416 rcrtc->plane->height = mode->vdisplay;
417
418 rcar_du_plane_compute_base(rcrtc->plane, crtc->fb);
419
420 rcrtc->outputs = 0;
421
422 return 0;
423
424error:
425 /* There's no rollback/abort operation to clean up in case of error. We
426 * thus need to release the reference to the DU acquired in prepare()
427 * here.
428 */
429 rcar_du_put(rcdu);
430 return ret;
431}
432
433static void rcar_du_crtc_mode_commit(struct drm_crtc *crtc)
434{
435 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
436
437 /* We're done, restart the CRTC and set the DPMS mode to on. The
438 * reference to the DU acquired at prepare() time will thus be released
439 * by the DPMS handler (possibly called by the disable() handler).
440 */
441 rcar_du_crtc_start(rcrtc);
442 rcrtc->dpms = DRM_MODE_DPMS_ON;
443}
444
445static int rcar_du_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
446 struct drm_framebuffer *old_fb)
447{
448 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
449
450 rcrtc->plane->src_x = x;
451 rcrtc->plane->src_y = y;
452
453 rcar_du_crtc_update_base(to_rcar_crtc(crtc));
454
455 return 0;
456}
457
458static void rcar_du_crtc_disable(struct drm_crtc *crtc)
459{
460 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
461
462 rcar_du_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
463 rcar_du_plane_release(rcrtc->plane);
464}
465
466static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
467 .dpms = rcar_du_crtc_dpms,
468 .mode_fixup = rcar_du_crtc_mode_fixup,
469 .prepare = rcar_du_crtc_mode_prepare,
470 .commit = rcar_du_crtc_mode_commit,
471 .mode_set = rcar_du_crtc_mode_set,
472 .mode_set_base = rcar_du_crtc_mode_set_base,
473 .disable = rcar_du_crtc_disable,
474};
475
476void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
477 struct drm_file *file)
478{
479 struct drm_pending_vblank_event *event;
480 struct drm_device *dev = rcrtc->crtc.dev;
481 unsigned long flags;
482
483 /* Destroy the pending vertical blanking event associated with the
484 * pending page flip, if any, and disable vertical blanking interrupts.
485 */
486 spin_lock_irqsave(&dev->event_lock, flags);
487 event = rcrtc->event;
488 if (event && event->base.file_priv == file) {
489 rcrtc->event = NULL;
490 event->base.destroy(&event->base);
491 drm_vblank_put(dev, rcrtc->index);
492 }
493 spin_unlock_irqrestore(&dev->event_lock, flags);
494}
495
496static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
497{
498 struct drm_pending_vblank_event *event;
499 struct drm_device *dev = rcrtc->crtc.dev;
500 unsigned long flags;
501
502 spin_lock_irqsave(&dev->event_lock, flags);
503 event = rcrtc->event;
504 rcrtc->event = NULL;
505 spin_unlock_irqrestore(&dev->event_lock, flags);
506
507 if (event == NULL)
508 return;
509
510 spin_lock_irqsave(&dev->event_lock, flags);
511 drm_send_vblank_event(dev, rcrtc->index, event);
512 spin_unlock_irqrestore(&dev->event_lock, flags);
513
514 drm_vblank_put(dev, rcrtc->index);
515}
516
517static int rcar_du_crtc_page_flip(struct drm_crtc *crtc,
518 struct drm_framebuffer *fb,
519 struct drm_pending_vblank_event *event)
520{
521 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
522 struct drm_device *dev = rcrtc->crtc.dev;
523 unsigned long flags;
524
525 spin_lock_irqsave(&dev->event_lock, flags);
526 if (rcrtc->event != NULL) {
527 spin_unlock_irqrestore(&dev->event_lock, flags);
528 return -EBUSY;
529 }
530 spin_unlock_irqrestore(&dev->event_lock, flags);
531
532 crtc->fb = fb;
533 rcar_du_crtc_update_base(rcrtc);
534
535 if (event) {
536 event->pipe = rcrtc->index;
537 drm_vblank_get(dev, rcrtc->index);
538 spin_lock_irqsave(&dev->event_lock, flags);
539 rcrtc->event = event;
540 spin_unlock_irqrestore(&dev->event_lock, flags);
541 }
542
543 return 0;
544}
545
546static const struct drm_crtc_funcs crtc_funcs = {
547 .destroy = drm_crtc_cleanup,
548 .set_config = drm_crtc_helper_set_config,
549 .page_flip = rcar_du_crtc_page_flip,
550};
551
552int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index)
553{
554 struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
555 struct drm_crtc *crtc = &rcrtc->crtc;
556 int ret;
557
558 rcrtc->mmio_offset = index ? DISP2_REG_OFFSET : 0;
559 rcrtc->index = index;
560 rcrtc->dpms = DRM_MODE_DPMS_OFF;
561 rcrtc->plane = &rcdu->planes.planes[index];
562
563 rcrtc->plane->crtc = crtc;
564
565 ret = drm_crtc_init(rcdu->ddev, crtc, &crtc_funcs);
566 if (ret < 0)
567 return ret;
568
569 drm_crtc_helper_add(crtc, &crtc_helper_funcs);
570
571 return 0;
572}
573
574void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable)
575{
576 if (enable) {
577 rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL);
578 rcar_du_crtc_set(rcrtc, DIER, DIER_VBE);
579 } else {
580 rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
581 }
582}
583
584void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc)
585{
586 u32 status;
587
588 status = rcar_du_crtc_read(rcrtc, DSSR);
589 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
590
591 if (status & DSSR_VBK) {
592 drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
593 rcar_du_crtc_finish_page_flip(rcrtc);
594 }
595}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
new file mode 100644
index 000000000000..2a0365bcbd14
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -0,0 +1,50 @@
1/*
2 * rcar_du_crtc.h -- R-Car Display Unit CRTCs
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_CRTC_H__
15#define __RCAR_DU_CRTC_H__
16
17#include <linux/mutex.h>
18
19#include <drm/drmP.h>
20#include <drm/drm_crtc.h>
21
22struct rcar_du_device;
23struct rcar_du_plane;
24
25struct rcar_du_crtc {
26 struct drm_crtc crtc;
27
28 unsigned int mmio_offset;
29 unsigned int index;
30 bool started;
31
32 struct drm_pending_vblank_event *event;
33 unsigned int outputs;
34 int dpms;
35
36 struct rcar_du_plane *plane;
37};
38
39int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index);
40void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
41void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc);
42void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
43 struct drm_file *file);
44void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
45void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
46
47void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output);
48void rcar_du_crtc_update_planes(struct drm_crtc *crtc);
49
50#endif /* __RCAR_DU_CRTC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
new file mode 100644
index 000000000000..003b34ee38e3
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -0,0 +1,325 @@
1/*
2 * rcar_du_drv.c -- R-Car Display Unit DRM driver
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/pm.h>
20#include <linux/slab.h>
21
22#include <drm/drmP.h>
23#include <drm/drm_crtc_helper.h>
24#include <drm/drm_gem_cma_helper.h>
25
26#include "rcar_du_crtc.h"
27#include "rcar_du_drv.h"
28#include "rcar_du_kms.h"
29#include "rcar_du_regs.h"
30
31/* -----------------------------------------------------------------------------
32 * Core device operations
33 */
34
35/*
36 * rcar_du_get - Acquire a reference to the DU
37 *
38 * Acquiring a reference enables the device clock and setup core registers. A
39 * reference must be held before accessing any hardware registers.
40 *
41 * This function must be called with the DRM mode_config lock held.
42 *
43 * Return 0 in case of success or a negative error code otherwise.
44 */
45int rcar_du_get(struct rcar_du_device *rcdu)
46{
47 int ret;
48
49 if (rcdu->use_count)
50 goto done;
51
52 /* Enable clocks before accessing the hardware. */
53 ret = clk_prepare_enable(rcdu->clock);
54 if (ret < 0)
55 return ret;
56
57 /* Enable extended features */
58 rcar_du_write(rcdu, DEFR, DEFR_CODE | DEFR_DEFE);
59 rcar_du_write(rcdu, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
60 rcar_du_write(rcdu, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
61 rcar_du_write(rcdu, DEFR4, DEFR4_CODE);
62 rcar_du_write(rcdu, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
63
64 /* Use DS1PR and DS2PR to configure planes priorities and connects the
65 * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
66 */
67 rcar_du_write(rcdu, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
68
69done:
70 rcdu->use_count++;
71 return 0;
72}
73
74/*
75 * rcar_du_put - Release a reference to the DU
76 *
77 * Releasing the last reference disables the device clock.
78 *
79 * This function must be called with the DRM mode_config lock held.
80 */
81void rcar_du_put(struct rcar_du_device *rcdu)
82{
83 if (--rcdu->use_count)
84 return;
85
86 clk_disable_unprepare(rcdu->clock);
87}
88
89/* -----------------------------------------------------------------------------
90 * DRM operations
91 */
92
93static int rcar_du_unload(struct drm_device *dev)
94{
95 drm_kms_helper_poll_fini(dev);
96 drm_mode_config_cleanup(dev);
97 drm_vblank_cleanup(dev);
98 drm_irq_uninstall(dev);
99
100 dev->dev_private = NULL;
101
102 return 0;
103}
104
105static int rcar_du_load(struct drm_device *dev, unsigned long flags)
106{
107 struct platform_device *pdev = dev->platformdev;
108 struct rcar_du_platform_data *pdata = pdev->dev.platform_data;
109 struct rcar_du_device *rcdu;
110 struct resource *ioarea;
111 struct resource *mem;
112 int ret;
113
114 if (pdata == NULL) {
115 dev_err(dev->dev, "no platform data\n");
116 return -ENODEV;
117 }
118
119 rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
120 if (rcdu == NULL) {
121 dev_err(dev->dev, "failed to allocate private data\n");
122 return -ENOMEM;
123 }
124
125 rcdu->dev = &pdev->dev;
126 rcdu->pdata = pdata;
127 rcdu->ddev = dev;
128 dev->dev_private = rcdu;
129
130 /* I/O resources and clocks */
131 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
132 if (mem == NULL) {
133 dev_err(&pdev->dev, "failed to get memory resource\n");
134 return -EINVAL;
135 }
136
137 ioarea = devm_request_mem_region(&pdev->dev, mem->start,
138 resource_size(mem), pdev->name);
139 if (ioarea == NULL) {
140 dev_err(&pdev->dev, "failed to request memory region\n");
141 return -EBUSY;
142 }
143
144 rcdu->mmio = devm_ioremap_nocache(&pdev->dev, ioarea->start,
145 resource_size(ioarea));
146 if (rcdu->mmio == NULL) {
147 dev_err(&pdev->dev, "failed to remap memory resource\n");
148 return -ENOMEM;
149 }
150
151 rcdu->clock = devm_clk_get(&pdev->dev, NULL);
152 if (IS_ERR(rcdu->clock)) {
153 dev_err(&pdev->dev, "failed to get clock\n");
154 return -ENOENT;
155 }
156
157 /* DRM/KMS objects */
158 ret = rcar_du_modeset_init(rcdu);
159 if (ret < 0) {
160 dev_err(&pdev->dev, "failed to initialize DRM/KMS\n");
161 goto done;
162 }
163
164 /* IRQ and vblank handling */
165 ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
166 if (ret < 0) {
167 dev_err(&pdev->dev, "failed to initialize vblank\n");
168 goto done;
169 }
170
171 ret = drm_irq_install(dev);
172 if (ret < 0) {
173 dev_err(&pdev->dev, "failed to install IRQ handler\n");
174 goto done;
175 }
176
177 platform_set_drvdata(pdev, rcdu);
178
179done:
180 if (ret)
181 rcar_du_unload(dev);
182
183 return ret;
184}
185
186static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file)
187{
188 struct rcar_du_device *rcdu = dev->dev_private;
189 unsigned int i;
190
191 for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
192 rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
193}
194
195static irqreturn_t rcar_du_irq(int irq, void *arg)
196{
197 struct drm_device *dev = arg;
198 struct rcar_du_device *rcdu = dev->dev_private;
199 unsigned int i;
200
201 for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
202 rcar_du_crtc_irq(&rcdu->crtcs[i]);
203
204 return IRQ_HANDLED;
205}
206
207static int rcar_du_enable_vblank(struct drm_device *dev, int crtc)
208{
209 struct rcar_du_device *rcdu = dev->dev_private;
210
211 rcar_du_crtc_enable_vblank(&rcdu->crtcs[crtc], true);
212
213 return 0;
214}
215
216static void rcar_du_disable_vblank(struct drm_device *dev, int crtc)
217{
218 struct rcar_du_device *rcdu = dev->dev_private;
219
220 rcar_du_crtc_enable_vblank(&rcdu->crtcs[crtc], false);
221}
222
223static const struct file_operations rcar_du_fops = {
224 .owner = THIS_MODULE,
225 .open = drm_open,
226 .release = drm_release,
227 .unlocked_ioctl = drm_ioctl,
228#ifdef CONFIG_COMPAT
229 .compat_ioctl = drm_compat_ioctl,
230#endif
231 .poll = drm_poll,
232 .read = drm_read,
233 .fasync = drm_fasync,
234 .llseek = no_llseek,
235 .mmap = drm_gem_cma_mmap,
236};
237
238static struct drm_driver rcar_du_driver = {
239 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
240 | DRIVER_PRIME,
241 .load = rcar_du_load,
242 .unload = rcar_du_unload,
243 .preclose = rcar_du_preclose,
244 .irq_handler = rcar_du_irq,
245 .get_vblank_counter = drm_vblank_count,
246 .enable_vblank = rcar_du_enable_vblank,
247 .disable_vblank = rcar_du_disable_vblank,
248 .gem_free_object = drm_gem_cma_free_object,
249 .gem_vm_ops = &drm_gem_cma_vm_ops,
250 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
251 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
252 .gem_prime_import = drm_gem_cma_dmabuf_import,
253 .gem_prime_export = drm_gem_cma_dmabuf_export,
254 .dumb_create = drm_gem_cma_dumb_create,
255 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
256 .dumb_destroy = drm_gem_cma_dumb_destroy,
257 .fops = &rcar_du_fops,
258 .name = "rcar-du",
259 .desc = "Renesas R-Car Display Unit",
260 .date = "20130110",
261 .major = 1,
262 .minor = 0,
263};
264
265/* -----------------------------------------------------------------------------
266 * Power management
267 */
268
269#if CONFIG_PM_SLEEP
270static int rcar_du_pm_suspend(struct device *dev)
271{
272 struct rcar_du_device *rcdu = dev_get_drvdata(dev);
273
274 drm_kms_helper_poll_disable(rcdu->ddev);
275 /* TODO Suspend the CRTC */
276
277 return 0;
278}
279
280static int rcar_du_pm_resume(struct device *dev)
281{
282 struct rcar_du_device *rcdu = dev_get_drvdata(dev);
283
284 /* TODO Resume the CRTC */
285
286 drm_kms_helper_poll_enable(rcdu->ddev);
287 return 0;
288}
289#endif
290
291static const struct dev_pm_ops rcar_du_pm_ops = {
292 SET_SYSTEM_SLEEP_PM_OPS(rcar_du_pm_suspend, rcar_du_pm_resume)
293};
294
295/* -----------------------------------------------------------------------------
296 * Platform driver
297 */
298
299static int rcar_du_probe(struct platform_device *pdev)
300{
301 return drm_platform_init(&rcar_du_driver, pdev);
302}
303
304static int rcar_du_remove(struct platform_device *pdev)
305{
306 drm_platform_exit(&rcar_du_driver, pdev);
307
308 return 0;
309}
310
311static struct platform_driver rcar_du_platform_driver = {
312 .probe = rcar_du_probe,
313 .remove = rcar_du_remove,
314 .driver = {
315 .owner = THIS_MODULE,
316 .name = "rcar-du",
317 .pm = &rcar_du_pm_ops,
318 },
319};
320
321module_platform_driver(rcar_du_platform_driver);
322
323MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
324MODULE_DESCRIPTION("Renesas R-Car Display Unit DRM Driver");
325MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
new file mode 100644
index 000000000000..193cc59d495c
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -0,0 +1,66 @@
1/*
2 * rcar_du_drv.h -- R-Car Display Unit DRM driver
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_DRV_H__
15#define __RCAR_DU_DRV_H__
16
17#include <linux/kernel.h>
18#include <linux/mutex.h>
19#include <linux/platform_data/rcar-du.h>
20
21#include "rcar_du_crtc.h"
22#include "rcar_du_plane.h"
23
24struct clk;
25struct device;
26struct drm_device;
27
28struct rcar_du_device {
29 struct device *dev;
30 const struct rcar_du_platform_data *pdata;
31
32 void __iomem *mmio;
33 struct clk *clock;
34 unsigned int use_count;
35
36 struct drm_device *ddev;
37
38 struct rcar_du_crtc crtcs[2];
39 unsigned int used_crtcs;
40 unsigned int num_crtcs;
41
42 struct {
43 struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
44 unsigned int free;
45 struct mutex lock;
46
47 struct drm_property *alpha;
48 struct drm_property *colorkey;
49 struct drm_property *zpos;
50 } planes;
51};
52
53int rcar_du_get(struct rcar_du_device *rcdu);
54void rcar_du_put(struct rcar_du_device *rcdu);
55
56static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg)
57{
58 return ioread32(rcdu->mmio + reg);
59}
60
61static inline void rcar_du_write(struct rcar_du_device *rcdu, u32 reg, u32 data)
62{
63 iowrite32(data, rcdu->mmio + reg);
64}
65
66#endif /* __RCAR_DU_DRV_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
new file mode 100644
index 000000000000..9c63f39658de
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -0,0 +1,245 @@
1/*
2 * rcar_du_kms.c -- R-Car Display Unit Mode Setting
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <drm/drmP.h>
15#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_fb_cma_helper.h>
18#include <drm/drm_gem_cma_helper.h>
19
20#include "rcar_du_crtc.h"
21#include "rcar_du_drv.h"
22#include "rcar_du_kms.h"
23#include "rcar_du_lvds.h"
24#include "rcar_du_regs.h"
25#include "rcar_du_vga.h"
26
27/* -----------------------------------------------------------------------------
28 * Format helpers
29 */
30
31static const struct rcar_du_format_info rcar_du_format_infos[] = {
32 {
33 .fourcc = DRM_FORMAT_RGB565,
34 .bpp = 16,
35 .planes = 1,
36 .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
37 .edf = PnDDCR4_EDF_NONE,
38 }, {
39 .fourcc = DRM_FORMAT_ARGB1555,
40 .bpp = 16,
41 .planes = 1,
42 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
43 .edf = PnDDCR4_EDF_NONE,
44 }, {
45 .fourcc = DRM_FORMAT_XRGB1555,
46 .bpp = 16,
47 .planes = 1,
48 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
49 .edf = PnDDCR4_EDF_NONE,
50 }, {
51 .fourcc = DRM_FORMAT_XRGB8888,
52 .bpp = 32,
53 .planes = 1,
54 .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
55 .edf = PnDDCR4_EDF_RGB888,
56 }, {
57 .fourcc = DRM_FORMAT_ARGB8888,
58 .bpp = 32,
59 .planes = 1,
60 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP,
61 .edf = PnDDCR4_EDF_ARGB8888,
62 }, {
63 .fourcc = DRM_FORMAT_UYVY,
64 .bpp = 16,
65 .planes = 1,
66 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
67 .edf = PnDDCR4_EDF_NONE,
68 }, {
69 .fourcc = DRM_FORMAT_YUYV,
70 .bpp = 16,
71 .planes = 1,
72 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
73 .edf = PnDDCR4_EDF_NONE,
74 }, {
75 .fourcc = DRM_FORMAT_NV12,
76 .bpp = 12,
77 .planes = 2,
78 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
79 .edf = PnDDCR4_EDF_NONE,
80 }, {
81 .fourcc = DRM_FORMAT_NV21,
82 .bpp = 12,
83 .planes = 2,
84 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
85 .edf = PnDDCR4_EDF_NONE,
86 }, {
87 /* In YUV 4:2:2, only NV16 is supported (NV61 isn't) */
88 .fourcc = DRM_FORMAT_NV16,
89 .bpp = 16,
90 .planes = 2,
91 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
92 .edf = PnDDCR4_EDF_NONE,
93 },
94};
95
96const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
97{
98 unsigned int i;
99
100 for (i = 0; i < ARRAY_SIZE(rcar_du_format_infos); ++i) {
101 if (rcar_du_format_infos[i].fourcc == fourcc)
102 return &rcar_du_format_infos[i];
103 }
104
105 return NULL;
106}
107
108/* -----------------------------------------------------------------------------
109 * Common connector and encoder functions
110 */
111
112struct drm_encoder *
113rcar_du_connector_best_encoder(struct drm_connector *connector)
114{
115 struct rcar_du_connector *rcon = to_rcar_connector(connector);
116
117 return &rcon->encoder->encoder;
118}
119
120void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
121{
122}
123
124void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
125 struct drm_display_mode *mode,
126 struct drm_display_mode *adjusted_mode)
127{
128 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
129
130 rcar_du_crtc_route_output(encoder->crtc, renc->output);
131}
132
133void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
134{
135}
136
137/* -----------------------------------------------------------------------------
138 * Frame buffer
139 */
140
141static struct drm_framebuffer *
142rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
143 struct drm_mode_fb_cmd2 *mode_cmd)
144{
145 const struct rcar_du_format_info *format;
146
147 format = rcar_du_format_info(mode_cmd->pixel_format);
148 if (format == NULL) {
149 dev_dbg(dev->dev, "unsupported pixel format %08x\n",
150 mode_cmd->pixel_format);
151 return ERR_PTR(-EINVAL);
152 }
153
154 if (mode_cmd->pitches[0] & 15 || mode_cmd->pitches[0] >= 8192) {
155 dev_dbg(dev->dev, "invalid pitch value %u\n",
156 mode_cmd->pitches[0]);
157 return ERR_PTR(-EINVAL);
158 }
159
160 if (format->planes == 2) {
161 if (mode_cmd->pitches[1] != mode_cmd->pitches[0]) {
162 dev_dbg(dev->dev,
163 "luma and chroma pitches do not match\n");
164 return ERR_PTR(-EINVAL);
165 }
166 }
167
168 return drm_fb_cma_create(dev, file_priv, mode_cmd);
169}
170
171static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
172 .fb_create = rcar_du_fb_create,
173};
174
175int rcar_du_modeset_init(struct rcar_du_device *rcdu)
176{
177 struct drm_device *dev = rcdu->ddev;
178 struct drm_encoder *encoder;
179 unsigned int i;
180 int ret;
181
182 drm_mode_config_init(rcdu->ddev);
183
184 rcdu->ddev->mode_config.min_width = 0;
185 rcdu->ddev->mode_config.min_height = 0;
186 rcdu->ddev->mode_config.max_width = 4095;
187 rcdu->ddev->mode_config.max_height = 2047;
188 rcdu->ddev->mode_config.funcs = &rcar_du_mode_config_funcs;
189
190 ret = rcar_du_plane_init(rcdu);
191 if (ret < 0)
192 return ret;
193
194 for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
195 rcar_du_crtc_create(rcdu, i);
196
197 rcdu->used_crtcs = 0;
198 rcdu->num_crtcs = i;
199
200 for (i = 0; i < rcdu->pdata->num_encoders; ++i) {
201 const struct rcar_du_encoder_data *pdata =
202 &rcdu->pdata->encoders[i];
203
204 if (pdata->output >= ARRAY_SIZE(rcdu->crtcs)) {
205 dev_warn(rcdu->dev,
206 "encoder %u references unexisting output %u, skipping\n",
207 i, pdata->output);
208 continue;
209 }
210
211 switch (pdata->encoder) {
212 case RCAR_DU_ENCODER_VGA:
213 rcar_du_vga_init(rcdu, &pdata->u.vga, pdata->output);
214 break;
215
216 case RCAR_DU_ENCODER_LVDS:
217 rcar_du_lvds_init(rcdu, &pdata->u.lvds, pdata->output);
218 break;
219
220 default:
221 break;
222 }
223 }
224
225 /* Set the possible CRTCs and possible clones. All encoders can be
226 * driven by the CRTC associated with the output they're connected to,
227 * as well as by CRTC 0.
228 */
229 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
230 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
231
232 encoder->possible_crtcs = (1 << 0) | (1 << renc->output);
233 encoder->possible_clones = 1 << 0;
234 }
235
236 ret = rcar_du_plane_register(rcdu);
237 if (ret < 0)
238 return ret;
239
240 drm_kms_helper_poll_init(rcdu->ddev);
241
242 drm_helper_disable_unused_functions(rcdu->ddev);
243
244 return 0;
245}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
new file mode 100644
index 000000000000..e4d8db069a06
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -0,0 +1,59 @@
1/*
2 * rcar_du_kms.h -- R-Car Display Unit Mode Setting
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_KMS_H__
15#define __RCAR_DU_KMS_H__
16
17#include <linux/types.h>
18
19#include <drm/drm_crtc.h>
20
21struct rcar_du_device;
22
23struct rcar_du_format_info {
24 u32 fourcc;
25 unsigned int bpp;
26 unsigned int planes;
27 unsigned int pnmr;
28 unsigned int edf;
29};
30
31struct rcar_du_encoder {
32 struct drm_encoder encoder;
33 unsigned int output;
34};
35
36#define to_rcar_encoder(e) \
37 container_of(e, struct rcar_du_encoder, encoder)
38
39struct rcar_du_connector {
40 struct drm_connector connector;
41 struct rcar_du_encoder *encoder;
42};
43
44#define to_rcar_connector(c) \
45 container_of(c, struct rcar_du_connector, connector)
46
47const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc);
48
49struct drm_encoder *
50rcar_du_connector_best_encoder(struct drm_connector *connector);
51void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder);
52void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
53 struct drm_display_mode *mode,
54 struct drm_display_mode *adjusted_mode);
55void rcar_du_encoder_mode_commit(struct drm_encoder *encoder);
56
57int rcar_du_modeset_init(struct rcar_du_device *rcdu);
58
59#endif /* __RCAR_DU_KMS_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c b/drivers/gpu/drm/rcar-du/rcar_du_lvds.c
new file mode 100644
index 000000000000..7aefe7267e1d
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvds.c
@@ -0,0 +1,216 @@
1/*
2 * rcar_du_lvds.c -- R-Car Display Unit LVDS Encoder and Connector
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <drm/drmP.h>
15#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h>
17
18#include "rcar_du_drv.h"
19#include "rcar_du_kms.h"
20#include "rcar_du_lvds.h"
21
22struct rcar_du_lvds_connector {
23 struct rcar_du_connector connector;
24
25 const struct rcar_du_panel_data *panel;
26};
27
28#define to_rcar_lvds_connector(c) \
29 container_of(c, struct rcar_du_lvds_connector, connector.connector)
30
31/* -----------------------------------------------------------------------------
32 * Connector
33 */
34
35static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
36{
37 struct rcar_du_lvds_connector *lvdscon = to_rcar_lvds_connector(connector);
38 struct drm_display_mode *mode;
39
40 mode = drm_mode_create(connector->dev);
41 if (mode == NULL)
42 return 0;
43
44 mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
45 mode->clock = lvdscon->panel->mode.clock;
46 mode->hdisplay = lvdscon->panel->mode.hdisplay;
47 mode->hsync_start = lvdscon->panel->mode.hsync_start;
48 mode->hsync_end = lvdscon->panel->mode.hsync_end;
49 mode->htotal = lvdscon->panel->mode.htotal;
50 mode->vdisplay = lvdscon->panel->mode.vdisplay;
51 mode->vsync_start = lvdscon->panel->mode.vsync_start;
52 mode->vsync_end = lvdscon->panel->mode.vsync_end;
53 mode->vtotal = lvdscon->panel->mode.vtotal;
54 mode->flags = lvdscon->panel->mode.flags;
55
56 drm_mode_set_name(mode);
57 drm_mode_probed_add(connector, mode);
58
59 return 1;
60}
61
62static int rcar_du_lvds_connector_mode_valid(struct drm_connector *connector,
63 struct drm_display_mode *mode)
64{
65 return MODE_OK;
66}
67
68static const struct drm_connector_helper_funcs connector_helper_funcs = {
69 .get_modes = rcar_du_lvds_connector_get_modes,
70 .mode_valid = rcar_du_lvds_connector_mode_valid,
71 .best_encoder = rcar_du_connector_best_encoder,
72};
73
74static void rcar_du_lvds_connector_destroy(struct drm_connector *connector)
75{
76 drm_sysfs_connector_remove(connector);
77 drm_connector_cleanup(connector);
78}
79
80static enum drm_connector_status
81rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force)
82{
83 return connector_status_connected;
84}
85
86static const struct drm_connector_funcs connector_funcs = {
87 .dpms = drm_helper_connector_dpms,
88 .detect = rcar_du_lvds_connector_detect,
89 .fill_modes = drm_helper_probe_single_connector_modes,
90 .destroy = rcar_du_lvds_connector_destroy,
91};
92
93static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
94 struct rcar_du_encoder *renc,
95 const struct rcar_du_panel_data *panel)
96{
97 struct rcar_du_lvds_connector *lvdscon;
98 struct drm_connector *connector;
99 int ret;
100
101 lvdscon = devm_kzalloc(rcdu->dev, sizeof(*lvdscon), GFP_KERNEL);
102 if (lvdscon == NULL)
103 return -ENOMEM;
104
105 lvdscon->panel = panel;
106
107 connector = &lvdscon->connector.connector;
108 connector->display_info.width_mm = panel->width_mm;
109 connector->display_info.height_mm = panel->height_mm;
110
111 ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
112 DRM_MODE_CONNECTOR_LVDS);
113 if (ret < 0)
114 return ret;
115
116 drm_connector_helper_add(connector, &connector_helper_funcs);
117 ret = drm_sysfs_connector_add(connector);
118 if (ret < 0)
119 return ret;
120
121 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
122 drm_object_property_set_value(&connector->base,
123 rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
124
125 ret = drm_mode_connector_attach_encoder(connector, &renc->encoder);
126 if (ret < 0)
127 return ret;
128
129 connector->encoder = &renc->encoder;
130 lvdscon->connector.encoder = renc;
131
132 return 0;
133}
134
135/* -----------------------------------------------------------------------------
136 * Encoder
137 */
138
139static void rcar_du_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
140{
141}
142
143static bool rcar_du_lvds_encoder_mode_fixup(struct drm_encoder *encoder,
144 const struct drm_display_mode *mode,
145 struct drm_display_mode *adjusted_mode)
146{
147 const struct drm_display_mode *panel_mode;
148 struct drm_device *dev = encoder->dev;
149 struct drm_connector *connector;
150 bool found = false;
151
152 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
153 if (connector->encoder == encoder) {
154 found = true;
155 break;
156 }
157 }
158
159 if (!found) {
160 dev_dbg(dev->dev, "mode_fixup: no connector found\n");
161 return false;
162 }
163
164 if (list_empty(&connector->modes)) {
165 dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
166 return false;
167 }
168
169 panel_mode = list_first_entry(&connector->modes,
170 struct drm_display_mode, head);
171
172 /* We're not allowed to modify the resolution. */
173 if (mode->hdisplay != panel_mode->hdisplay ||
174 mode->vdisplay != panel_mode->vdisplay)
175 return false;
176
177 /* The flat panel mode is fixed, just copy it to the adjusted mode. */
178 drm_mode_copy(adjusted_mode, panel_mode);
179
180 return true;
181}
182
183static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
184 .dpms = rcar_du_lvds_encoder_dpms,
185 .mode_fixup = rcar_du_lvds_encoder_mode_fixup,
186 .prepare = rcar_du_encoder_mode_prepare,
187 .commit = rcar_du_encoder_mode_commit,
188 .mode_set = rcar_du_encoder_mode_set,
189};
190
191static const struct drm_encoder_funcs encoder_funcs = {
192 .destroy = drm_encoder_cleanup,
193};
194
195int rcar_du_lvds_init(struct rcar_du_device *rcdu,
196 const struct rcar_du_encoder_lvds_data *data,
197 unsigned int output)
198{
199 struct rcar_du_encoder *renc;
200 int ret;
201
202 renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
203 if (renc == NULL)
204 return -ENOMEM;
205
206 renc->output = output;
207
208 ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
209 DRM_MODE_ENCODER_LVDS);
210 if (ret < 0)
211 return ret;
212
213 drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
214
215 return rcar_du_lvds_connector_init(rcdu, renc, &data->panel);
216}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.h b/drivers/gpu/drm/rcar-du/rcar_du_lvds.h
new file mode 100644
index 000000000000..b47f8328e103
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvds.h
@@ -0,0 +1,24 @@
1/*
2 * rcar_du_lvds.h -- R-Car Display Unit LVDS Encoder and Connector
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_LVDS_H__
15#define __RCAR_DU_LVDS_H__
16
17struct rcar_du_device;
18struct rcar_du_encoder_lvds_data;
19
20int rcar_du_lvds_init(struct rcar_du_device *rcdu,
21 const struct rcar_du_encoder_lvds_data *data,
22 unsigned int output);
23
24#endif /* __RCAR_DU_LVDS_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
new file mode 100644
index 000000000000..a65f81ddf51d
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -0,0 +1,507 @@
1/*
2 * rcar_du_plane.c -- R-Car Display Unit Planes
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <drm/drmP.h>
15#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_fb_cma_helper.h>
18#include <drm/drm_gem_cma_helper.h>
19
20#include "rcar_du_drv.h"
21#include "rcar_du_kms.h"
22#include "rcar_du_plane.h"
23#include "rcar_du_regs.h"
24
25#define RCAR_DU_COLORKEY_NONE (0 << 24)
26#define RCAR_DU_COLORKEY_SOURCE (1 << 24)
27#define RCAR_DU_COLORKEY_MASK (1 << 24)
28
29struct rcar_du_kms_plane {
30 struct drm_plane plane;
31 struct rcar_du_plane *hwplane;
32};
33
34static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
35{
36 return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane;
37}
38
39static u32 rcar_du_plane_read(struct rcar_du_device *rcdu,
40 unsigned int index, u32 reg)
41{
42 return rcar_du_read(rcdu, index * PLANE_OFF + reg);
43}
44
45static void rcar_du_plane_write(struct rcar_du_device *rcdu,
46 unsigned int index, u32 reg, u32 data)
47{
48 rcar_du_write(rcdu, index * PLANE_OFF + reg, data);
49}
50
51int rcar_du_plane_reserve(struct rcar_du_plane *plane,
52 const struct rcar_du_format_info *format)
53{
54 struct rcar_du_device *rcdu = plane->dev;
55 unsigned int i;
56 int ret = -EBUSY;
57
58 mutex_lock(&rcdu->planes.lock);
59
60 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
61 if (!(rcdu->planes.free & (1 << i)))
62 continue;
63
64 if (format->planes == 1 ||
65 rcdu->planes.free & (1 << ((i + 1) % 8)))
66 break;
67 }
68
69 if (i == ARRAY_SIZE(rcdu->planes.planes))
70 goto done;
71
72 rcdu->planes.free &= ~(1 << i);
73 if (format->planes == 2)
74 rcdu->planes.free &= ~(1 << ((i + 1) % 8));
75
76 plane->hwindex = i;
77
78 ret = 0;
79
80done:
81 mutex_unlock(&rcdu->planes.lock);
82 return ret;
83}
84
85void rcar_du_plane_release(struct rcar_du_plane *plane)
86{
87 struct rcar_du_device *rcdu = plane->dev;
88
89 if (plane->hwindex == -1)
90 return;
91
92 mutex_lock(&rcdu->planes.lock);
93 rcdu->planes.free |= 1 << plane->hwindex;
94 if (plane->format->planes == 2)
95 rcdu->planes.free |= 1 << ((plane->hwindex + 1) % 8);
96 mutex_unlock(&rcdu->planes.lock);
97
98 plane->hwindex = -1;
99}
100
101void rcar_du_plane_update_base(struct rcar_du_plane *plane)
102{
103 struct rcar_du_device *rcdu = plane->dev;
104 unsigned int index = plane->hwindex;
105
106 /* According to the datasheet the Y position is expressed in raster line
107 * units. However, 32bpp formats seem to require a doubled Y position
108 * value. Similarly, for the second plane, NV12 and NV21 formats seem to
109 * require a halved Y position value.
110 */
111 rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x);
112 rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y *
113 (plane->format->bpp == 32 ? 2 : 1));
114 rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[0]);
115
116 if (plane->format->planes == 2) {
117 index = (index + 1) % 8;
118
119 rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x);
120 rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y *
121 (plane->format->bpp == 16 ? 2 : 1) / 2);
122 rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[1]);
123 }
124}
125
126void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
127 struct drm_framebuffer *fb)
128{
129 struct drm_gem_cma_object *gem;
130
131 gem = drm_fb_cma_get_gem_obj(fb, 0);
132 plane->dma[0] = gem->paddr + fb->offsets[0];
133
134 if (plane->format->planes == 2) {
135 gem = drm_fb_cma_get_gem_obj(fb, 1);
136 plane->dma[1] = gem->paddr + fb->offsets[1];
137 }
138}
139
140static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
141 unsigned int index)
142{
143 struct rcar_du_device *rcdu = plane->dev;
144 u32 colorkey;
145 u32 pnmr;
146
147 /* The PnALPHAR register controls alpha-blending in 16bpp formats
148 * (ARGB1555 and XRGB1555).
149 *
150 * For ARGB, set the alpha value to 0, and enable alpha-blending when
151 * the A bit is 0. This maps A=0 to alpha=0 and A=1 to alpha=255.
152 *
153 * For XRGB, set the alpha value to the plane-wide alpha value and
154 * enable alpha-blending regardless of the X bit value.
155 */
156 if (plane->format->fourcc != DRM_FORMAT_XRGB1555)
157 rcar_du_plane_write(rcdu, index, PnALPHAR, PnALPHAR_ABIT_0);
158 else
159 rcar_du_plane_write(rcdu, index, PnALPHAR,
160 PnALPHAR_ABIT_X | plane->alpha);
161
162 pnmr = PnMR_BM_MD | plane->format->pnmr;
163
164 /* Disable color keying when requested. YUV formats have the
165 * PnMR_SPIM_TP_OFF bit set in their pnmr field, disabling color keying
166 * automatically.
167 */
168 if ((plane->colorkey & RCAR_DU_COLORKEY_MASK) == RCAR_DU_COLORKEY_NONE)
169 pnmr |= PnMR_SPIM_TP_OFF;
170
171 /* For packed YUV formats we need to select the U/V order. */
172 if (plane->format->fourcc == DRM_FORMAT_YUYV)
173 pnmr |= PnMR_YCDF_YUYV;
174
175 rcar_du_plane_write(rcdu, index, PnMR, pnmr);
176
177 switch (plane->format->fourcc) {
178 case DRM_FORMAT_RGB565:
179 colorkey = ((plane->colorkey & 0xf80000) >> 8)
180 | ((plane->colorkey & 0x00fc00) >> 5)
181 | ((plane->colorkey & 0x0000f8) >> 3);
182 rcar_du_plane_write(rcdu, index, PnTC2R, colorkey);
183 break;
184
185 case DRM_FORMAT_ARGB1555:
186 case DRM_FORMAT_XRGB1555:
187 colorkey = ((plane->colorkey & 0xf80000) >> 9)
188 | ((plane->colorkey & 0x00f800) >> 6)
189 | ((plane->colorkey & 0x0000f8) >> 3);
190 rcar_du_plane_write(rcdu, index, PnTC2R, colorkey);
191 break;
192
193 case DRM_FORMAT_XRGB8888:
194 case DRM_FORMAT_ARGB8888:
195 rcar_du_plane_write(rcdu, index, PnTC3R,
196 PnTC3R_CODE | (plane->colorkey & 0xffffff));
197 break;
198 }
199}
200
201static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
202 unsigned int index)
203{
204 struct rcar_du_device *rcdu = plane->dev;
205 u32 ddcr2 = PnDDCR2_CODE;
206 u32 ddcr4;
207 u32 mwr;
208
209 /* Data format
210 *
211 * The data format is selected by the DDDF field in PnMR and the EDF
212 * field in DDCR4.
213 */
214 ddcr4 = rcar_du_plane_read(rcdu, index, PnDDCR4);
215 ddcr4 &= ~PnDDCR4_EDF_MASK;
216 ddcr4 |= plane->format->edf | PnDDCR4_CODE;
217
218 rcar_du_plane_setup_mode(plane, index);
219
220 if (plane->format->planes == 2) {
221 if (plane->hwindex != index) {
222 if (plane->format->fourcc == DRM_FORMAT_NV12 ||
223 plane->format->fourcc == DRM_FORMAT_NV21)
224 ddcr2 |= PnDDCR2_Y420;
225
226 if (plane->format->fourcc == DRM_FORMAT_NV21)
227 ddcr2 |= PnDDCR2_NV21;
228
229 ddcr2 |= PnDDCR2_DIVU;
230 } else {
231 ddcr2 |= PnDDCR2_DIVY;
232 }
233 }
234
235 rcar_du_plane_write(rcdu, index, PnDDCR2, ddcr2);
236 rcar_du_plane_write(rcdu, index, PnDDCR4, ddcr4);
237
238 /* Memory pitch (expressed in pixels) */
239 if (plane->format->planes == 2)
240 mwr = plane->pitch;
241 else
242 mwr = plane->pitch * 8 / plane->format->bpp;
243
244 rcar_du_plane_write(rcdu, index, PnMWR, mwr);
245
246 /* Destination position and size */
247 rcar_du_plane_write(rcdu, index, PnDSXR, plane->width);
248 rcar_du_plane_write(rcdu, index, PnDSYR, plane->height);
249 rcar_du_plane_write(rcdu, index, PnDPXR, plane->dst_x);
250 rcar_du_plane_write(rcdu, index, PnDPYR, plane->dst_y);
251
252 /* Wrap-around and blinking, disabled */
253 rcar_du_plane_write(rcdu, index, PnWASPR, 0);
254 rcar_du_plane_write(rcdu, index, PnWAMWR, 4095);
255 rcar_du_plane_write(rcdu, index, PnBTR, 0);
256 rcar_du_plane_write(rcdu, index, PnMLR, 0);
257}
258
259void rcar_du_plane_setup(struct rcar_du_plane *plane)
260{
261 __rcar_du_plane_setup(plane, plane->hwindex);
262 if (plane->format->planes == 2)
263 __rcar_du_plane_setup(plane, (plane->hwindex + 1) % 8);
264
265 rcar_du_plane_update_base(plane);
266}
267
268static int
269rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
270 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
271 unsigned int crtc_w, unsigned int crtc_h,
272 uint32_t src_x, uint32_t src_y,
273 uint32_t src_w, uint32_t src_h)
274{
275 struct rcar_du_plane *rplane = to_rcar_plane(plane);
276 struct rcar_du_device *rcdu = plane->dev->dev_private;
277 const struct rcar_du_format_info *format;
278 unsigned int nplanes;
279 int ret;
280
281 format = rcar_du_format_info(fb->pixel_format);
282 if (format == NULL) {
283 dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__,
284 fb->pixel_format);
285 return -EINVAL;
286 }
287
288 if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) {
289 dev_dbg(rcdu->dev, "%s: scaling not supported\n", __func__);
290 return -EINVAL;
291 }
292
293 nplanes = rplane->format ? rplane->format->planes : 0;
294
295 /* Reallocate hardware planes if the number of required planes has
296 * changed.
297 */
298 if (format->planes != nplanes) {
299 rcar_du_plane_release(rplane);
300 ret = rcar_du_plane_reserve(rplane, format);
301 if (ret < 0)
302 return ret;
303 }
304
305 rplane->crtc = crtc;
306 rplane->format = format;
307 rplane->pitch = fb->pitches[0];
308
309 rplane->src_x = src_x >> 16;
310 rplane->src_y = src_y >> 16;
311 rplane->dst_x = crtc_x;
312 rplane->dst_y = crtc_y;
313 rplane->width = crtc_w;
314 rplane->height = crtc_h;
315
316 rcar_du_plane_compute_base(rplane, fb);
317 rcar_du_plane_setup(rplane);
318
319 mutex_lock(&rcdu->planes.lock);
320 rplane->enabled = true;
321 rcar_du_crtc_update_planes(rplane->crtc);
322 mutex_unlock(&rcdu->planes.lock);
323
324 return 0;
325}
326
327static int rcar_du_plane_disable(struct drm_plane *plane)
328{
329 struct rcar_du_device *rcdu = plane->dev->dev_private;
330 struct rcar_du_plane *rplane = to_rcar_plane(plane);
331
332 if (!rplane->enabled)
333 return 0;
334
335 mutex_lock(&rcdu->planes.lock);
336 rplane->enabled = false;
337 rcar_du_crtc_update_planes(rplane->crtc);
338 mutex_unlock(&rcdu->planes.lock);
339
340 rcar_du_plane_release(rplane);
341
342 rplane->crtc = NULL;
343 rplane->format = NULL;
344
345 return 0;
346}
347
348/* Both the .set_property and the .update_plane operations are called with the
349 * mode_config lock held. There is this no need to explicitly protect access to
350 * the alpha and colorkey fields and the mode register.
351 */
352static void rcar_du_plane_set_alpha(struct rcar_du_plane *plane, u32 alpha)
353{
354 if (plane->alpha == alpha)
355 return;
356
357 plane->alpha = alpha;
358 if (!plane->enabled || plane->format->fourcc != DRM_FORMAT_XRGB1555)
359 return;
360
361 rcar_du_plane_setup_mode(plane, plane->hwindex);
362}
363
364static void rcar_du_plane_set_colorkey(struct rcar_du_plane *plane,
365 u32 colorkey)
366{
367 if (plane->colorkey == colorkey)
368 return;
369
370 plane->colorkey = colorkey;
371 if (!plane->enabled)
372 return;
373
374 rcar_du_plane_setup_mode(plane, plane->hwindex);
375}
376
377static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
378 unsigned int zpos)
379{
380 struct rcar_du_device *rcdu = plane->dev;
381
382 mutex_lock(&rcdu->planes.lock);
383 if (plane->zpos == zpos)
384 goto done;
385
386 plane->zpos = zpos;
387 if (!plane->enabled)
388 goto done;
389
390 rcar_du_crtc_update_planes(plane->crtc);
391
392done:
393 mutex_unlock(&rcdu->planes.lock);
394}
395
396static int rcar_du_plane_set_property(struct drm_plane *plane,
397 struct drm_property *property,
398 uint64_t value)
399{
400 struct rcar_du_device *rcdu = plane->dev->dev_private;
401 struct rcar_du_plane *rplane = to_rcar_plane(plane);
402
403 if (property == rcdu->planes.alpha)
404 rcar_du_plane_set_alpha(rplane, value);
405 else if (property == rcdu->planes.colorkey)
406 rcar_du_plane_set_colorkey(rplane, value);
407 else if (property == rcdu->planes.zpos)
408 rcar_du_plane_set_zpos(rplane, value);
409 else
410 return -EINVAL;
411
412 return 0;
413}
414
415static const struct drm_plane_funcs rcar_du_plane_funcs = {
416 .update_plane = rcar_du_plane_update,
417 .disable_plane = rcar_du_plane_disable,
418 .set_property = rcar_du_plane_set_property,
419 .destroy = drm_plane_cleanup,
420};
421
422static const uint32_t formats[] = {
423 DRM_FORMAT_RGB565,
424 DRM_FORMAT_ARGB1555,
425 DRM_FORMAT_XRGB1555,
426 DRM_FORMAT_XRGB8888,
427 DRM_FORMAT_ARGB8888,
428 DRM_FORMAT_UYVY,
429 DRM_FORMAT_YUYV,
430 DRM_FORMAT_NV12,
431 DRM_FORMAT_NV21,
432 DRM_FORMAT_NV16,
433};
434
435int rcar_du_plane_init(struct rcar_du_device *rcdu)
436{
437 unsigned int i;
438
439 mutex_init(&rcdu->planes.lock);
440 rcdu->planes.free = 0xff;
441
442 rcdu->planes.alpha =
443 drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
444 if (rcdu->planes.alpha == NULL)
445 return -ENOMEM;
446
447 /* The color key is expressed as an RGB888 triplet stored in a 32-bit
448 * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
449 * or enable source color keying (1).
450 */
451 rcdu->planes.colorkey =
452 drm_property_create_range(rcdu->ddev, 0, "colorkey",
453 0, 0x01ffffff);
454 if (rcdu->planes.colorkey == NULL)
455 return -ENOMEM;
456
457 rcdu->planes.zpos =
458 drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7);
459 if (rcdu->planes.zpos == NULL)
460 return -ENOMEM;
461
462 for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
463 struct rcar_du_plane *plane = &rcdu->planes.planes[i];
464
465 plane->dev = rcdu;
466 plane->hwindex = -1;
467 plane->alpha = 255;
468 plane->colorkey = RCAR_DU_COLORKEY_NONE;
469 plane->zpos = 0;
470 }
471
472 return 0;
473}
474
475int rcar_du_plane_register(struct rcar_du_device *rcdu)
476{
477 unsigned int i;
478 int ret;
479
480 for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
481 struct rcar_du_kms_plane *plane;
482
483 plane = devm_kzalloc(rcdu->dev, sizeof(*plane), GFP_KERNEL);
484 if (plane == NULL)
485 return -ENOMEM;
486
487 plane->hwplane = &rcdu->planes.planes[i + 2];
488 plane->hwplane->zpos = 1;
489
490 ret = drm_plane_init(rcdu->ddev, &plane->plane,
491 (1 << rcdu->num_crtcs) - 1,
492 &rcar_du_plane_funcs, formats,
493 ARRAY_SIZE(formats), false);
494 if (ret < 0)
495 return ret;
496
497 drm_object_attach_property(&plane->plane.base,
498 rcdu->planes.alpha, 255);
499 drm_object_attach_property(&plane->plane.base,
500 rcdu->planes.colorkey,
501 RCAR_DU_COLORKEY_NONE);
502 drm_object_attach_property(&plane->plane.base,
503 rcdu->planes.zpos, 1);
504 }
505
506 return 0;
507}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
new file mode 100644
index 000000000000..5397dba2fe57
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -0,0 +1,67 @@
1/*
2 * rcar_du_plane.h -- R-Car Display Unit Planes
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_PLANE_H__
15#define __RCAR_DU_PLANE_H__
16
17struct drm_crtc;
18struct drm_framebuffer;
19struct rcar_du_device;
20struct rcar_du_format_info;
21
22/* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As
23 * using KMS planes requires at least one of the CRTCs being enabled, no more
24 * than 7 KMS planes can be available. We thus create 7 KMS planes and
25 * 9 software planes (one for each KMS planes and one for each CRTC).
26 */
27
28#define RCAR_DU_NUM_KMS_PLANES 7
29#define RCAR_DU_NUM_HW_PLANES 8
30#define RCAR_DU_NUM_SW_PLANES 9
31
32struct rcar_du_plane {
33 struct rcar_du_device *dev;
34 struct drm_crtc *crtc;
35
36 bool enabled;
37
38 int hwindex; /* 0-based, -1 means unused */
39 unsigned int alpha;
40 unsigned int colorkey;
41 unsigned int zpos;
42
43 const struct rcar_du_format_info *format;
44
45 unsigned long dma[2];
46 unsigned int pitch;
47
48 unsigned int width;
49 unsigned int height;
50
51 unsigned int src_x;
52 unsigned int src_y;
53 unsigned int dst_x;
54 unsigned int dst_y;
55};
56
57int rcar_du_plane_init(struct rcar_du_device *rcdu);
58int rcar_du_plane_register(struct rcar_du_device *rcdu);
59void rcar_du_plane_setup(struct rcar_du_plane *plane);
60void rcar_du_plane_update_base(struct rcar_du_plane *plane);
61void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
62 struct drm_framebuffer *fb);
63int rcar_du_plane_reserve(struct rcar_du_plane *plane,
64 const struct rcar_du_format_info *format);
65void rcar_du_plane_release(struct rcar_du_plane *plane);
66
67#endif /* __RCAR_DU_PLANE_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
new file mode 100644
index 000000000000..69f21f19b51c
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -0,0 +1,445 @@
1/*
2 * rcar_du_regs.h -- R-Car Display Unit Registers Definitions
3 *
4 * Copyright (C) 2013 Renesas Electronics Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation.
11 */
12
13#ifndef __RCAR_DU_REGS_H__
14#define __RCAR_DU_REGS_H__
15
16#define DISP2_REG_OFFSET 0x30000
17
18/* -----------------------------------------------------------------------------
19 * Display Control Registers
20 */
21
22#define DSYSR 0x00000 /* display 1 */
23#define D2SYSR 0x30000 /* display 2 */
24#define DSYSR_ILTS (1 << 29)
25#define DSYSR_DSEC (1 << 20)
26#define DSYSR_IUPD (1 << 16)
27#define DSYSR_DRES (1 << 9)
28#define DSYSR_DEN (1 << 8)
29#define DSYSR_TVM_MASTER (0 << 6)
30#define DSYSR_TVM_SWITCH (1 << 6)
31#define DSYSR_TVM_TVSYNC (2 << 6)
32#define DSYSR_TVM_MASK (3 << 6)
33#define DSYSR_SCM_INT_NONE (0 << 4)
34#define DSYSR_SCM_INT_SYNC (2 << 4)
35#define DSYSR_SCM_INT_VIDEO (3 << 4)
36
37#define DSMR 0x00004
38#define D2SMR 0x30004
39#define DSMR_VSPM (1 << 28)
40#define DSMR_ODPM (1 << 27)
41#define DSMR_DIPM_DISP (0 << 25)
42#define DSMR_DIPM_CSYNC (1 << 25)
43#define DSMR_DIPM_DE (3 << 25)
44#define DSMR_DIPM_MASK (3 << 25)
45#define DSMR_CSPM (1 << 24)
46#define DSMR_DIL (1 << 19)
47#define DSMR_VSL (1 << 18)
48#define DSMR_HSL (1 << 17)
49#define DSMR_DDIS (1 << 16)
50#define DSMR_CDEL (1 << 15)
51#define DSMR_CDEM_CDE (0 << 13)
52#define DSMR_CDEM_LOW (2 << 13)
53#define DSMR_CDEM_HIGH (3 << 13)
54#define DSMR_CDEM_MASK (3 << 13)
55#define DSMR_CDED (1 << 12)
56#define DSMR_ODEV (1 << 8)
57#define DSMR_CSY_VH_OR (0 << 6)
58#define DSMR_CSY_333 (2 << 6)
59#define DSMR_CSY_222 (3 << 6)
60#define DSMR_CSY_MASK (3 << 6)
61
62#define DSSR 0x00008
63#define D2SSR 0x30008
64#define DSSR_VC1FB_DSA0 (0 << 30)
65#define DSSR_VC1FB_DSA1 (1 << 30)
66#define DSSR_VC1FB_DSA2 (2 << 30)
67#define DSSR_VC1FB_INIT (3 << 30)
68#define DSSR_VC1FB_MASK (3 << 30)
69#define DSSR_VC0FB_DSA0 (0 << 28)
70#define DSSR_VC0FB_DSA1 (1 << 28)
71#define DSSR_VC0FB_DSA2 (2 << 28)
72#define DSSR_VC0FB_INIT (3 << 28)
73#define DSSR_VC0FB_MASK (3 << 28)
74#define DSSR_DFB(n) (1 << ((n)+15))
75#define DSSR_TVR (1 << 15)
76#define DSSR_FRM (1 << 14)
77#define DSSR_VBK (1 << 11)
78#define DSSR_RINT (1 << 9)
79#define DSSR_HBK (1 << 8)
80#define DSSR_ADC(n) (1 << ((n)-1))
81
82#define DSRCR 0x0000c
83#define D2SRCR 0x3000c
84#define DSRCR_TVCL (1 << 15)
85#define DSRCR_FRCL (1 << 14)
86#define DSRCR_VBCL (1 << 11)
87#define DSRCR_RICL (1 << 9)
88#define DSRCR_HBCL (1 << 8)
89#define DSRCR_ADCL(n) (1 << ((n)-1))
90#define DSRCR_MASK 0x0000cbff
91
92#define DIER 0x00010
93#define D2IER 0x30010
94#define DIER_TVE (1 << 15)
95#define DIER_FRE (1 << 14)
96#define DIER_VBE (1 << 11)
97#define DIER_RIE (1 << 9)
98#define DIER_HBE (1 << 8)
99#define DIER_ADCE(n) (1 << ((n)-1))
100
101#define CPCR 0x00014
102#define CPCR_CP4CE (1 << 19)
103#define CPCR_CP3CE (1 << 18)
104#define CPCR_CP2CE (1 << 17)
105#define CPCR_CP1CE (1 << 16)
106
107#define DPPR 0x00018
108#define DPPR_DPE(n) (1 << ((n)*4-1))
109#define DPPR_DPS(n, p) (((p)-1) << DPPR_DPS_SHIFT(n))
110#define DPPR_DPS_SHIFT(n) (((n)-1)*4)
111#define DPPR_BPP16 (DPPR_DPE(8) | DPPR_DPS(8, 1)) /* plane1 */
112#define DPPR_BPP32_P1 (DPPR_DPE(7) | DPPR_DPS(7, 1))
113#define DPPR_BPP32_P2 (DPPR_DPE(8) | DPPR_DPS(8, 2))
114#define DPPR_BPP32 (DPPR_BPP32_P1 | DPPR_BPP32_P2) /* plane1 & 2 */
115
116#define DEFR 0x00020
117#define D2EFR 0x30020
118#define DEFR_CODE (0x7773 << 16)
119#define DEFR_EXSL (1 << 12)
120#define DEFR_EXVL (1 << 11)
121#define DEFR_EXUP (1 << 5)
122#define DEFR_VCUP (1 << 4)
123#define DEFR_DEFE (1 << 0)
124
125#define DAPCR 0x00024
126#define DAPCR_CODE (0x7773 << 16)
127#define DAPCR_AP2E (1 << 4)
128#define DAPCR_AP1E (1 << 0)
129
130#define DCPCR 0x00028
131#define DCPCR_CODE (0x7773 << 16)
132#define DCPCR_CA2B (1 << 13)
133#define DCPCR_CD2F (1 << 12)
134#define DCPCR_DC2E (1 << 8)
135#define DCPCR_CAB (1 << 5)
136#define DCPCR_CDF (1 << 4)
137#define DCPCR_DCE (1 << 0)
138
139#define DEFR2 0x00034
140#define D2EFR2 0x30034
141#define DEFR2_CODE (0x7775 << 16)
142#define DEFR2_DEFE2G (1 << 0)
143
144#define DEFR3 0x00038
145#define D2EFR3 0x30038
146#define DEFR3_CODE (0x7776 << 16)
147#define DEFR3_EVDA (1 << 14)
148#define DEFR3_EVDM_1 (1 << 12)
149#define DEFR3_EVDM_2 (2 << 12)
150#define DEFR3_EVDM_3 (3 << 12)
151#define DEFR3_VMSM2_EMA (1 << 6)
152#define DEFR3_VMSM1_ENA (1 << 4)
153#define DEFR3_DEFE3 (1 << 0)
154
155#define DEFR4 0x0003c
156#define D2EFR4 0x3003c
157#define DEFR4_CODE (0x7777 << 16)
158#define DEFR4_LRUO (1 << 5)
159#define DEFR4_SPCE (1 << 4)
160
161#define DVCSR 0x000d0
162#define DVCSR_VCnFB2_DSA0(n) (0 << ((n)*2+16))
163#define DVCSR_VCnFB2_DSA1(n) (1 << ((n)*2+16))
164#define DVCSR_VCnFB2_DSA2(n) (2 << ((n)*2+16))
165#define DVCSR_VCnFB2_INIT(n) (3 << ((n)*2+16))
166#define DVCSR_VCnFB2_MASK(n) (3 << ((n)*2+16))
167#define DVCSR_VCnFB_DSA0(n) (0 << ((n)*2))
168#define DVCSR_VCnFB_DSA1(n) (1 << ((n)*2))
169#define DVCSR_VCnFB_DSA2(n) (2 << ((n)*2))
170#define DVCSR_VCnFB_INIT(n) (3 << ((n)*2))
171#define DVCSR_VCnFB_MASK(n) (3 << ((n)*2))
172
173#define DEFR5 0x000e0
174#define DEFR5_CODE (0x66 << 24)
175#define DEFR5_YCRGB2_DIS (0 << 14)
176#define DEFR5_YCRGB2_PRI1 (1 << 14)
177#define DEFR5_YCRGB2_PRI2 (2 << 14)
178#define DEFR5_YCRGB2_PRI3 (3 << 14)
179#define DEFR5_YCRGB2_MASK (3 << 14)
180#define DEFR5_YCRGB1_DIS (0 << 12)
181#define DEFR5_YCRGB1_PRI1 (1 << 12)
182#define DEFR5_YCRGB1_PRI2 (2 << 12)
183#define DEFR5_YCRGB1_PRI3 (3 << 12)
184#define DEFR5_YCRGB1_MASK (3 << 12)
185#define DEFR5_DEFE5 (1 << 0)
186
187#define DDLTR 0x000e4
188#define DDLTR_CODE (0x7766 << 16)
189#define DDLTR_DLAR2 (1 << 6)
190#define DDLTR_DLAY2 (1 << 5)
191#define DDLTR_DLAY1 (1 << 1)
192
193#define DEFR6 0x000e8
194#define DEFR6_CODE (0x7778 << 16)
195#define DEFR6_ODPM22_D2SMR (0 << 10)
196#define DEFR6_ODPM22_DISP (2 << 10)
197#define DEFR6_ODPM22_CDE (3 << 10)
198#define DEFR6_ODPM22_MASK (3 << 10)
199#define DEFR6_ODPM12_DSMR (0 << 8)
200#define DEFR6_ODPM12_DISP (2 << 8)
201#define DEFR6_ODPM12_CDE (3 << 8)
202#define DEFR6_ODPM12_MASK (3 << 8)
203#define DEFR6_TCNE2 (1 << 6)
204#define DEFR6_MLOS1 (1 << 2)
205#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2)
206
207/* -----------------------------------------------------------------------------
208 * Display Timing Generation Registers
209 */
210
211#define HDSR 0x00040
212#define HDER 0x00044
213#define VDSR 0x00048
214#define VDER 0x0004c
215#define HCR 0x00050
216#define HSWR 0x00054
217#define VCR 0x00058
218#define VSPR 0x0005c
219#define EQWR 0x00060
220#define SPWR 0x00064
221#define CLAMPSR 0x00070
222#define CLAMPWR 0x00074
223#define DESR 0x00078
224#define DEWR 0x0007c
225
226/* -----------------------------------------------------------------------------
227 * Display Attribute Registers
228 */
229
230#define CP1TR 0x00080
231#define CP2TR 0x00084
232#define CP3TR 0x00088
233#define CP4TR 0x0008c
234
235#define DOOR 0x00090
236#define DOOR_RGB(r, g, b) (((r) << 18) | ((g) << 10) | ((b) << 2))
237#define CDER 0x00094
238#define CDER_RGB(r, g, b) (((r) << 18) | ((g) << 10) | ((b) << 2))
239#define BPOR 0x00098
240#define BPOR_RGB(r, g, b) (((r) << 18) | ((g) << 10) | ((b) << 2))
241
242#define RINTOFSR 0x0009c
243
244#define DSHPR 0x000c8
245#define DSHPR_CODE (0x7776 << 16)
246#define DSHPR_PRIH (0xa << 4)
247#define DSHPR_PRIL_BPP16 (0x8 << 0)
248#define DSHPR_PRIL_BPP32 (0x9 << 0)
249
250/* -----------------------------------------------------------------------------
251 * Display Plane Registers
252 */
253
254#define PLANE_OFF 0x00100
255
256#define PnMR 0x00100 /* plane 1 */
257#define PnMR_VISL_VIN0 (0 << 26) /* use Video Input 0 */
258#define PnMR_VISL_VIN1 (1 << 26) /* use Video Input 1 */
259#define PnMR_VISL_VIN2 (2 << 26) /* use Video Input 2 */
260#define PnMR_VISL_VIN3 (3 << 26) /* use Video Input 3 */
261#define PnMR_YCDF_YUYV (1 << 20) /* YUYV format */
262#define PnMR_TC_R (0 << 17) /* Tranparent color is PnTC1R */
263#define PnMR_TC_CP (1 << 17) /* Tranparent color is color palette */
264#define PnMR_WAE (1 << 16) /* Wrap around Enable */
265#define PnMR_SPIM_TP (0 << 12) /* Transparent Color */
266#define PnMR_SPIM_ALP (1 << 12) /* Alpha Blending */
267#define PnMR_SPIM_EOR (2 << 12) /* EOR */
268#define PnMR_SPIM_TP_OFF (1 << 14) /* No Transparent Color */
269#define PnMR_CPSL_CP1 (0 << 8) /* Color Palette selected 1 */
270#define PnMR_CPSL_CP2 (1 << 8) /* Color Palette selected 2 */
271#define PnMR_CPSL_CP3 (2 << 8) /* Color Palette selected 3 */
272#define PnMR_CPSL_CP4 (3 << 8) /* Color Palette selected 4 */
273#define PnMR_DC (1 << 7) /* Display Area Change */
274#define PnMR_BM_MD (0 << 4) /* Manual Display Change Mode */
275#define PnMR_BM_AR (1 << 4) /* Auto Rendering Mode */
276#define PnMR_BM_AD (2 << 4) /* Auto Display Change Mode */
277#define PnMR_BM_VC (3 << 4) /* Video Capture Mode */
278#define PnMR_DDDF_8BPP (0 << 0) /* 8bit */
279#define PnMR_DDDF_16BPP (1 << 0) /* 16bit or 32bit */
280#define PnMR_DDDF_ARGB (2 << 0) /* ARGB */
281#define PnMR_DDDF_YC (3 << 0) /* YC */
282#define PnMR_DDDF_MASK (3 << 0)
283
284#define PnMWR 0x00104
285
286#define PnALPHAR 0x00108
287#define PnALPHAR_ABIT_1 (0 << 12)
288#define PnALPHAR_ABIT_0 (1 << 12)
289#define PnALPHAR_ABIT_X (2 << 12)
290
291#define PnDSXR 0x00110
292#define PnDSYR 0x00114
293#define PnDPXR 0x00118
294#define PnDPYR 0x0011c
295
296#define PnDSA0R 0x00120
297#define PnDSA1R 0x00124
298#define PnDSA2R 0x00128
299#define PnDSA_MASK 0xfffffff0
300
301#define PnSPXR 0x00130
302#define PnSPYR 0x00134
303#define PnWASPR 0x00138
304#define PnWAMWR 0x0013c
305
306#define PnBTR 0x00140
307
308#define PnTC1R 0x00144
309#define PnTC2R 0x00148
310#define PnTC3R 0x0014c
311#define PnTC3R_CODE (0x66 << 24)
312
313#define PnMLR 0x00150
314
315#define PnSWAPR 0x00180
316#define PnSWAPR_DIGN (1 << 4)
317#define PnSWAPR_SPQW (1 << 3)
318#define PnSWAPR_SPLW (1 << 2)
319#define PnSWAPR_SPWD (1 << 1)
320#define PnSWAPR_SPBY (1 << 0)
321
322#define PnDDCR 0x00184
323#define PnDDCR_CODE (0x7775 << 16)
324#define PnDDCR_LRGB1 (1 << 11)
325#define PnDDCR_LRGB0 (1 << 10)
326
327#define PnDDCR2 0x00188
328#define PnDDCR2_CODE (0x7776 << 16)
329#define PnDDCR2_NV21 (1 << 5)
330#define PnDDCR2_Y420 (1 << 4)
331#define PnDDCR2_DIVU (1 << 1)
332#define PnDDCR2_DIVY (1 << 0)
333
334#define PnDDCR4 0x00190
335#define PnDDCR4_CODE (0x7766 << 16)
336#define PnDDCR4_SDFS_RGB (0 << 4)
337#define PnDDCR4_SDFS_YC (5 << 4)
338#define PnDDCR4_SDFS_MASK (7 << 4)
339#define PnDDCR4_EDF_NONE (0 << 0)
340#define PnDDCR4_EDF_ARGB8888 (1 << 0)
341#define PnDDCR4_EDF_RGB888 (2 << 0)
342#define PnDDCR4_EDF_RGB666 (3 << 0)
343#define PnDDCR4_EDF_MASK (7 << 0)
344
345#define APnMR 0x0a100
346#define APnMR_WAE (1 << 16) /* Wrap around Enable */
347#define APnMR_DC (1 << 7) /* Display Area Change */
348#define APnMR_BM_MD (0 << 4) /* Manual Display Change Mode */
349#define APnMR_BM_AD (2 << 4) /* Auto Display Change Mode */
350
351#define APnMWR 0x0a104
352#define APnDSA0R 0x0a120
353#define APnDSA1R 0x0a124
354#define APnDSA2R 0x0a128
355#define APnMLR 0x0a150
356
357/* -----------------------------------------------------------------------------
358 * Display Capture Registers
359 */
360
361#define DCMWR 0x0c104
362#define DC2MWR 0x0c204
363#define DCSAR 0x0c120
364#define DC2SAR 0x0c220
365#define DCMLR 0x0c150
366#define DC2MLR 0x0c250
367
368/* -----------------------------------------------------------------------------
369 * Color Palette Registers
370 */
371
372#define CP1_000R 0x01000
373#define CP1_255R 0x013fc
374#define CP2_000R 0x02000
375#define CP2_255R 0x023fc
376#define CP3_000R 0x03000
377#define CP3_255R 0x033fc
378#define CP4_000R 0x04000
379#define CP4_255R 0x043fc
380
381/* -----------------------------------------------------------------------------
382 * External Synchronization Control Registers
383 */
384
385#define ESCR 0x10000
386#define ESCR2 0x31000
387#define ESCR_DCLKOINV (1 << 25)
388#define ESCR_DCLKSEL_DCLKIN (0 << 20)
389#define ESCR_DCLKSEL_CLKS (1 << 20)
390#define ESCR_DCLKSEL_MASK (1 << 20)
391#define ESCR_DCLKDIS (1 << 16)
392#define ESCR_SYNCSEL_OFF (0 << 8)
393#define ESCR_SYNCSEL_EXVSYNC (2 << 8)
394#define ESCR_SYNCSEL_EXHSYNC (3 << 8)
395#define ESCR_FRQSEL_MASK (0x3f << 0)
396
397#define OTAR 0x10004
398#define OTAR2 0x31004
399
400/* -----------------------------------------------------------------------------
401 * Dual Display Output Control Registers
402 */
403
404#define DORCR 0x11000
405#define DORCR_PG2T (1 << 30)
406#define DORCR_DK2S (1 << 28)
407#define DORCR_PG2D_DS1 (0 << 24)
408#define DORCR_PG2D_DS2 (1 << 24)
409#define DORCR_PG2D_FIX0 (2 << 24)
410#define DORCR_PG2D_DOOR (3 << 24)
411#define DORCR_PG2D_MASK (3 << 24)
412#define DORCR_DR1D (1 << 21)
413#define DORCR_PG1D_DS1 (0 << 16)
414#define DORCR_PG1D_DS2 (1 << 16)
415#define DORCR_PG1D_FIX0 (2 << 16)
416#define DORCR_PG1D_DOOR (3 << 16)
417#define DORCR_PG1D_MASK (3 << 16)
418#define DORCR_RGPV (1 << 4)
419#define DORCR_DPRS (1 << 0)
420
421#define DPTSR 0x11004
422#define DPTSR_PnDK(n) (1 << ((n) + 16))
423#define DPTSR_PnTS(n) (1 << (n))
424
425#define DAPTSR 0x11008
426#define DAPTSR_APnDK(n) (1 << ((n) + 16))
427#define DAPTSR_APnTS(n) (1 << (n))
428
429#define DS1PR 0x11020
430#define DS2PR 0x11024
431
432/* -----------------------------------------------------------------------------
433 * YC-RGB Conversion Coefficient Registers
434 */
435
436#define YNCR 0x11080
437#define YNOR 0x11084
438#define CRNOR 0x11088
439#define CBNOR 0x1108c
440#define RCRCR 0x11090
441#define GCRCR 0x11094
442#define GCBCR 0x11098
443#define BCBCR 0x1109c
444
445#endif /* __RCAR_DU_REGS_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.c b/drivers/gpu/drm/rcar-du/rcar_du_vga.c
new file mode 100644
index 000000000000..327289ec380d
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vga.c
@@ -0,0 +1,149 @@
1/*
2 * rcar_du_vga.c -- R-Car Display Unit VGA DAC and Connector
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <drm/drmP.h>
15#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h>
17
18#include "rcar_du_drv.h"
19#include "rcar_du_kms.h"
20#include "rcar_du_vga.h"
21
22/* -----------------------------------------------------------------------------
23 * Connector
24 */
25
26static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
27{
28 return 0;
29}
30
31static int rcar_du_vga_connector_mode_valid(struct drm_connector *connector,
32 struct drm_display_mode *mode)
33{
34 return MODE_OK;
35}
36
37static const struct drm_connector_helper_funcs connector_helper_funcs = {
38 .get_modes = rcar_du_vga_connector_get_modes,
39 .mode_valid = rcar_du_vga_connector_mode_valid,
40 .best_encoder = rcar_du_connector_best_encoder,
41};
42
43static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
44{
45 drm_sysfs_connector_remove(connector);
46 drm_connector_cleanup(connector);
47}
48
49static enum drm_connector_status
50rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
51{
52 return connector_status_unknown;
53}
54
55static const struct drm_connector_funcs connector_funcs = {
56 .dpms = drm_helper_connector_dpms,
57 .detect = rcar_du_vga_connector_detect,
58 .fill_modes = drm_helper_probe_single_connector_modes,
59 .destroy = rcar_du_vga_connector_destroy,
60};
61
62static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
63 struct rcar_du_encoder *renc)
64{
65 struct rcar_du_connector *rcon;
66 struct drm_connector *connector;
67 int ret;
68
69 rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
70 if (rcon == NULL)
71 return -ENOMEM;
72
73 connector = &rcon->connector;
74 connector->display_info.width_mm = 0;
75 connector->display_info.height_mm = 0;
76
77 ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
78 DRM_MODE_CONNECTOR_VGA);
79 if (ret < 0)
80 return ret;
81
82 drm_connector_helper_add(connector, &connector_helper_funcs);
83 ret = drm_sysfs_connector_add(connector);
84 if (ret < 0)
85 return ret;
86
87 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
88 drm_object_property_set_value(&connector->base,
89 rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
90
91 ret = drm_mode_connector_attach_encoder(connector, &renc->encoder);
92 if (ret < 0)
93 return ret;
94
95 connector->encoder = &renc->encoder;
96 rcon->encoder = renc;
97
98 return 0;
99}
100
101/* -----------------------------------------------------------------------------
102 * Encoder
103 */
104
105static void rcar_du_vga_encoder_dpms(struct drm_encoder *encoder, int mode)
106{
107}
108
109static bool rcar_du_vga_encoder_mode_fixup(struct drm_encoder *encoder,
110 const struct drm_display_mode *mode,
111 struct drm_display_mode *adjusted_mode)
112{
113 return true;
114}
115
116static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
117 .dpms = rcar_du_vga_encoder_dpms,
118 .mode_fixup = rcar_du_vga_encoder_mode_fixup,
119 .prepare = rcar_du_encoder_mode_prepare,
120 .commit = rcar_du_encoder_mode_commit,
121 .mode_set = rcar_du_encoder_mode_set,
122};
123
124static const struct drm_encoder_funcs encoder_funcs = {
125 .destroy = drm_encoder_cleanup,
126};
127
128int rcar_du_vga_init(struct rcar_du_device *rcdu,
129 const struct rcar_du_encoder_vga_data *data,
130 unsigned int output)
131{
132 struct rcar_du_encoder *renc;
133 int ret;
134
135 renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
136 if (renc == NULL)
137 return -ENOMEM;
138
139 renc->output = output;
140
141 ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
142 DRM_MODE_ENCODER_DAC);
143 if (ret < 0)
144 return ret;
145
146 drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
147
148 return rcar_du_vga_connector_init(rcdu, renc);
149}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.h b/drivers/gpu/drm/rcar-du/rcar_du_vga.h
new file mode 100644
index 000000000000..66b4d2d7190d
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vga.h
@@ -0,0 +1,24 @@
1/*
2 * rcar_du_vga.h -- R-Car Display Unit VGA DAC and Connector
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_VGA_H__
15#define __RCAR_DU_VGA_H__
16
17struct rcar_du_device;
18struct rcar_du_encoder_vga_data;
19
20int rcar_du_vga_init(struct rcar_du_device *rcdu,
21 const struct rcar_du_encoder_vga_data *data,
22 unsigned int output);
23
24#endif /* __RCAR_DU_VGA_H__ */
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index b55c1d661147..bd6b2cf508d5 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -570,9 +570,6 @@ int savage_driver_firstopen(struct drm_device *dev)
570 unsigned int fb_rsrc, aper_rsrc; 570 unsigned int fb_rsrc, aper_rsrc;
571 int ret = 0; 571 int ret = 0;
572 572
573 dev_priv->mtrr[0].handle = -1;
574 dev_priv->mtrr[1].handle = -1;
575 dev_priv->mtrr[2].handle = -1;
576 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 573 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
577 fb_rsrc = 0; 574 fb_rsrc = 0;
578 fb_base = pci_resource_start(dev->pdev, 0); 575 fb_base = pci_resource_start(dev->pdev, 0);
@@ -584,21 +581,14 @@ int savage_driver_firstopen(struct drm_device *dev)
584 if (pci_resource_len(dev->pdev, 0) == 0x08000000) { 581 if (pci_resource_len(dev->pdev, 0) == 0x08000000) {
585 /* Don't make MMIO write-cobining! We need 3 582 /* Don't make MMIO write-cobining! We need 3
586 * MTRRs. */ 583 * MTRRs. */
587 dev_priv->mtrr[0].base = fb_base; 584 dev_priv->mtrr_handles[0] =
588 dev_priv->mtrr[0].size = 0x01000000; 585 arch_phys_wc_add(fb_base, 0x01000000);
589 dev_priv->mtrr[0].handle = 586 dev_priv->mtrr_handles[1] =
590 drm_mtrr_add(dev_priv->mtrr[0].base, 587 arch_phys_wc_add(fb_base + 0x02000000,
591 dev_priv->mtrr[0].size, DRM_MTRR_WC); 588 0x02000000);
592 dev_priv->mtrr[1].base = fb_base + 0x02000000; 589 dev_priv->mtrr_handles[2] =
593 dev_priv->mtrr[1].size = 0x02000000; 590 arch_phys_wc_add(fb_base + 0x04000000,
594 dev_priv->mtrr[1].handle = 591 0x04000000);
595 drm_mtrr_add(dev_priv->mtrr[1].base,
596 dev_priv->mtrr[1].size, DRM_MTRR_WC);
597 dev_priv->mtrr[2].base = fb_base + 0x04000000;
598 dev_priv->mtrr[2].size = 0x04000000;
599 dev_priv->mtrr[2].handle =
600 drm_mtrr_add(dev_priv->mtrr[2].base,
601 dev_priv->mtrr[2].size, DRM_MTRR_WC);
602 } else { 592 } else {
603 DRM_ERROR("strange pci_resource_len %08llx\n", 593 DRM_ERROR("strange pci_resource_len %08llx\n",
604 (unsigned long long) 594 (unsigned long long)
@@ -616,11 +606,9 @@ int savage_driver_firstopen(struct drm_device *dev)
616 if (pci_resource_len(dev->pdev, 1) == 0x08000000) { 606 if (pci_resource_len(dev->pdev, 1) == 0x08000000) {
617 /* Can use one MTRR to cover both fb and 607 /* Can use one MTRR to cover both fb and
618 * aperture. */ 608 * aperture. */
619 dev_priv->mtrr[0].base = fb_base; 609 dev_priv->mtrr_handles[0] =
620 dev_priv->mtrr[0].size = 0x08000000; 610 arch_phys_wc_add(fb_base,
621 dev_priv->mtrr[0].handle = 611 0x08000000);
622 drm_mtrr_add(dev_priv->mtrr[0].base,
623 dev_priv->mtrr[0].size, DRM_MTRR_WC);
624 } else { 612 } else {
625 DRM_ERROR("strange pci_resource_len %08llx\n", 613 DRM_ERROR("strange pci_resource_len %08llx\n",
626 (unsigned long long) 614 (unsigned long long)
@@ -660,11 +648,10 @@ void savage_driver_lastclose(struct drm_device *dev)
660 drm_savage_private_t *dev_priv = dev->dev_private; 648 drm_savage_private_t *dev_priv = dev->dev_private;
661 int i; 649 int i;
662 650
663 for (i = 0; i < 3; ++i) 651 for (i = 0; i < 3; ++i) {
664 if (dev_priv->mtrr[i].handle >= 0) 652 arch_phys_wc_del(dev_priv->mtrr_handles[i]);
665 drm_mtrr_del(dev_priv->mtrr[i].handle, 653 dev_priv->mtrr_handles[i] = 0;
666 dev_priv->mtrr[i].base, 654 }
667 dev_priv->mtrr[i].size, DRM_MTRR_WC);
668} 655}
669 656
670int savage_driver_unload(struct drm_device *dev) 657int savage_driver_unload(struct drm_device *dev)
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
index df2aac6636f7..c05082a59f6f 100644
--- a/drivers/gpu/drm/savage/savage_drv.h
+++ b/drivers/gpu/drm/savage/savage_drv.h
@@ -160,10 +160,7 @@ typedef struct drm_savage_private {
160 drm_local_map_t *cmd_dma; 160 drm_local_map_t *cmd_dma;
161 drm_local_map_t fake_dma; 161 drm_local_map_t fake_dma;
162 162
163 struct { 163 int mtrr_handles[3];
164 int handle;
165 unsigned long base, size;
166 } mtrr[3];
167 164
168 /* BCI and status-related stuff */ 165 /* BCI and status-related stuff */
169 volatile uint32_t *status_ptr, *bci_ptr; 166 volatile uint32_t *status_ptr, *bci_ptr;
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index 7e7d52b2a2fc..ca498d151a76 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -1,6 +1,6 @@
1config DRM_SHMOBILE 1config DRM_SHMOBILE
2 tristate "DRM Support for SH Mobile" 2 tristate "DRM Support for SH Mobile"
3 depends on DRM && (SUPERH || ARCH_SHMOBILE) 3 depends on DRM && (ARM || SUPERH)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_CMA_HELPER 5 select DRM_KMS_CMA_HELPER
6 select DRM_GEM_CMA_HELPER 6 select DRM_GEM_CMA_HELPER
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index f6e0b5395051..edc10181f551 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -90,7 +90,7 @@ static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
90 return -EINVAL; 90 return -EINVAL;
91 } 91 }
92 92
93 clk = clk_get(sdev->dev, clkname); 93 clk = devm_clk_get(sdev->dev, clkname);
94 if (IS_ERR(clk)) { 94 if (IS_ERR(clk)) {
95 dev_err(sdev->dev, "cannot get dot clock %s\n", clkname); 95 dev_err(sdev->dev, "cannot get dot clock %s\n", clkname);
96 return PTR_ERR(clk); 96 return PTR_ERR(clk);
@@ -106,21 +106,12 @@ static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
106 106
107static int shmob_drm_unload(struct drm_device *dev) 107static int shmob_drm_unload(struct drm_device *dev)
108{ 108{
109 struct shmob_drm_device *sdev = dev->dev_private;
110
111 drm_kms_helper_poll_fini(dev); 109 drm_kms_helper_poll_fini(dev);
112 drm_mode_config_cleanup(dev); 110 drm_mode_config_cleanup(dev);
113 drm_vblank_cleanup(dev); 111 drm_vblank_cleanup(dev);
114 drm_irq_uninstall(dev); 112 drm_irq_uninstall(dev);
115 113
116 if (sdev->clock)
117 clk_put(sdev->clock);
118
119 if (sdev->mmio)
120 iounmap(sdev->mmio);
121
122 dev->dev_private = NULL; 114 dev->dev_private = NULL;
123 kfree(sdev);
124 115
125 return 0; 116 return 0;
126} 117}
@@ -139,7 +130,7 @@ static int shmob_drm_load(struct drm_device *dev, unsigned long flags)
139 return -EINVAL; 130 return -EINVAL;
140 } 131 }
141 132
142 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); 133 sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
143 if (sdev == NULL) { 134 if (sdev == NULL) {
144 dev_err(dev->dev, "failed to allocate private data\n"); 135 dev_err(dev->dev, "failed to allocate private data\n");
145 return -ENOMEM; 136 return -ENOMEM;
@@ -156,29 +147,28 @@ static int shmob_drm_load(struct drm_device *dev, unsigned long flags)
156 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 147 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
157 if (res == NULL) { 148 if (res == NULL) {
158 dev_err(&pdev->dev, "failed to get memory resource\n"); 149 dev_err(&pdev->dev, "failed to get memory resource\n");
159 ret = -EINVAL; 150 return -EINVAL;
160 goto done;
161 } 151 }
162 152
163 sdev->mmio = ioremap_nocache(res->start, resource_size(res)); 153 sdev->mmio = devm_ioremap_nocache(&pdev->dev, res->start,
154 resource_size(res));
164 if (sdev->mmio == NULL) { 155 if (sdev->mmio == NULL) {
165 dev_err(&pdev->dev, "failed to remap memory resource\n"); 156 dev_err(&pdev->dev, "failed to remap memory resource\n");
166 ret = -ENOMEM; 157 return -ENOMEM;
167 goto done;
168 } 158 }
169 159
170 ret = shmob_drm_setup_clocks(sdev, pdata->clk_source); 160 ret = shmob_drm_setup_clocks(sdev, pdata->clk_source);
171 if (ret < 0) 161 if (ret < 0)
172 goto done; 162 return ret;
173 163
174 ret = shmob_drm_init_interface(sdev); 164 ret = shmob_drm_init_interface(sdev);
175 if (ret < 0) 165 if (ret < 0)
176 goto done; 166 return ret;
177 167
178 ret = shmob_drm_modeset_init(sdev); 168 ret = shmob_drm_modeset_init(sdev);
179 if (ret < 0) { 169 if (ret < 0) {
180 dev_err(&pdev->dev, "failed to initialize mode setting\n"); 170 dev_err(&pdev->dev, "failed to initialize mode setting\n");
181 goto done; 171 return ret;
182 } 172 }
183 173
184 for (i = 0; i < 4; ++i) { 174 for (i = 0; i < 4; ++i) {
@@ -273,7 +263,8 @@ static const struct file_operations shmob_drm_fops = {
273}; 263};
274 264
275static struct drm_driver shmob_drm_driver = { 265static struct drm_driver shmob_drm_driver = {
276 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET, 266 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
267 | DRIVER_PRIME,
277 .load = shmob_drm_load, 268 .load = shmob_drm_load,
278 .unload = shmob_drm_unload, 269 .unload = shmob_drm_unload,
279 .preclose = shmob_drm_preclose, 270 .preclose = shmob_drm_preclose,
@@ -283,6 +274,10 @@ static struct drm_driver shmob_drm_driver = {
283 .disable_vblank = shmob_drm_disable_vblank, 274 .disable_vblank = shmob_drm_disable_vblank,
284 .gem_free_object = drm_gem_cma_free_object, 275 .gem_free_object = drm_gem_cma_free_object,
285 .gem_vm_ops = &drm_gem_cma_vm_ops, 276 .gem_vm_ops = &drm_gem_cma_vm_ops,
277 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
278 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
279 .gem_prime_import = drm_gem_cma_dmabuf_import,
280 .gem_prime_export = drm_gem_cma_dmabuf_export,
286 .dumb_create = drm_gem_cma_dumb_create, 281 .dumb_create = drm_gem_cma_dumb_create,
287 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 282 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
288 .dumb_destroy = drm_gem_cma_dumb_destroy, 283 .dumb_destroy = drm_gem_cma_dumb_destroy,
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index c291ee385b4f..fc0ef0ca7d04 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -116,7 +116,7 @@ shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
116 } 116 }
117 117
118 if (mode_cmd->pitches[0] & 7 || mode_cmd->pitches[0] >= 65536) { 118 if (mode_cmd->pitches[0] & 7 || mode_cmd->pitches[0] >= 65536) {
119 dev_dbg(dev->dev, "valid pitch value %u\n", 119 dev_dbg(dev->dev, "invalid pitch value %u\n",
120 mode_cmd->pitches[0]); 120 mode_cmd->pitches[0]);
121 return ERR_PTR(-EINVAL); 121 return ERR_PTR(-EINVAL);
122 } 122 }
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
index e1eb899b0288..060ae03e5f9b 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -166,7 +166,7 @@ void shmob_drm_plane_setup(struct drm_plane *plane)
166{ 166{
167 struct shmob_drm_plane *splane = to_shmob_plane(plane); 167 struct shmob_drm_plane *splane = to_shmob_plane(plane);
168 168
169 if (plane->fb == NULL || !plane->enabled) 169 if (plane->fb == NULL)
170 return; 170 return;
171 171
172 __shmob_drm_plane_setup(splane, plane->fb); 172 __shmob_drm_plane_setup(splane, plane->fb);
@@ -221,11 +221,8 @@ static int shmob_drm_plane_disable(struct drm_plane *plane)
221 221
222static void shmob_drm_plane_destroy(struct drm_plane *plane) 222static void shmob_drm_plane_destroy(struct drm_plane *plane)
223{ 223{
224 struct shmob_drm_plane *splane = to_shmob_plane(plane);
225
226 shmob_drm_plane_disable(plane); 224 shmob_drm_plane_disable(plane);
227 drm_plane_cleanup(plane); 225 drm_plane_cleanup(plane);
228 kfree(splane);
229} 226}
230 227
231static const struct drm_plane_funcs shmob_drm_plane_funcs = { 228static const struct drm_plane_funcs shmob_drm_plane_funcs = {
@@ -251,7 +248,7 @@ int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index)
251 struct shmob_drm_plane *splane; 248 struct shmob_drm_plane *splane;
252 int ret; 249 int ret;
253 250
254 splane = kzalloc(sizeof(*splane), GFP_KERNEL); 251 splane = devm_kzalloc(sdev->dev, sizeof(*splane), GFP_KERNEL);
255 if (splane == NULL) 252 if (splane == NULL)
256 return -ENOMEM; 253 return -ENOMEM;
257 254
@@ -261,8 +258,6 @@ int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index)
261 ret = drm_plane_init(sdev->ddev, &splane->plane, 1, 258 ret = drm_plane_init(sdev->ddev, &splane->plane, 1,
262 &shmob_drm_plane_funcs, formats, 259 &shmob_drm_plane_funcs, formats,
263 ARRAY_SIZE(formats), false); 260 ARRAY_SIZE(formats), false);
264 if (ret < 0)
265 kfree(splane);
266 261
267 return ret; 262 return ret;
268} 263}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 5dd3c7d031d5..7418dcd986d3 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -42,7 +42,8 @@ struct tilcdc_crtc {
42 42
43static void unref_worker(struct work_struct *work) 43static void unref_worker(struct work_struct *work)
44{ 44{
45 struct tilcdc_crtc *tilcdc_crtc = container_of(work, struct tilcdc_crtc, work); 45 struct tilcdc_crtc *tilcdc_crtc =
46 container_of(work, struct tilcdc_crtc, work);
46 struct drm_device *dev = tilcdc_crtc->base.dev; 47 struct drm_device *dev = tilcdc_crtc->base.dev;
47 struct drm_framebuffer *fb; 48 struct drm_framebuffer *fb;
48 49
@@ -55,10 +56,12 @@ static void unref_worker(struct work_struct *work)
55static void set_scanout(struct drm_crtc *crtc, int n) 56static void set_scanout(struct drm_crtc *crtc, int n)
56{ 57{
57 static const uint32_t base_reg[] = { 58 static const uint32_t base_reg[] = {
58 LCDC_DMA_FB_BASE_ADDR_0_REG, LCDC_DMA_FB_BASE_ADDR_1_REG, 59 LCDC_DMA_FB_BASE_ADDR_0_REG,
60 LCDC_DMA_FB_BASE_ADDR_1_REG,
59 }; 61 };
60 static const uint32_t ceil_reg[] = { 62 static const uint32_t ceil_reg[] = {
61 LCDC_DMA_FB_CEILING_ADDR_0_REG, LCDC_DMA_FB_CEILING_ADDR_1_REG, 63 LCDC_DMA_FB_CEILING_ADDR_0_REG,
64 LCDC_DMA_FB_CEILING_ADDR_1_REG,
62 }; 65 };
63 static const uint32_t stat[] = { 66 static const uint32_t stat[] = {
64 LCDC_END_OF_FRAME0, LCDC_END_OF_FRAME1, 67 LCDC_END_OF_FRAME0, LCDC_END_OF_FRAME1,
@@ -194,7 +197,8 @@ static void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
194 tilcdc_crtc->frame_done = false; 197 tilcdc_crtc->frame_done = false;
195 stop(crtc); 198 stop(crtc);
196 199
197 /* if necessary wait for framedone irq which will still come 200 /*
201 * if necessary wait for framedone irq which will still come
198 * before putting things to sleep.. 202 * before putting things to sleep..
199 */ 203 */
200 if (priv->rev == 2) { 204 if (priv->rev == 2) {
@@ -289,17 +293,24 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
289 reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00; 293 reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
290 reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) | 294 reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
291 LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt); 295 LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
296
297 /*
298 * subtract one from hfp, hbp, hsw because the hardware uses
299 * a value of 0 as 1
300 */
292 if (priv->rev == 2) { 301 if (priv->rev == 2) {
293 reg |= (hfp & 0x300) >> 8; 302 /* clear bits we're going to set */
294 reg |= (hbp & 0x300) >> 4; 303 reg &= ~0x78000033;
295 reg |= (hsw & 0x3c0) << 21; 304 reg |= ((hfp-1) & 0x300) >> 8;
305 reg |= ((hbp-1) & 0x300) >> 4;
306 reg |= ((hsw-1) & 0x3c0) << 21;
296 } 307 }
297 tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg); 308 tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
298 309
299 reg = (((mode->hdisplay >> 4) - 1) << 4) | 310 reg = (((mode->hdisplay >> 4) - 1) << 4) |
300 ((hbp & 0xff) << 24) | 311 (((hbp-1) & 0xff) << 24) |
301 ((hfp & 0xff) << 16) | 312 (((hfp-1) & 0xff) << 16) |
302 ((hsw & 0x3f) << 10); 313 (((hsw-1) & 0x3f) << 10);
303 if (priv->rev == 2) 314 if (priv->rev == 2)
304 reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3; 315 reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
305 tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg); 316 tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
@@ -307,9 +318,24 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
307 reg = ((mode->vdisplay - 1) & 0x3ff) | 318 reg = ((mode->vdisplay - 1) & 0x3ff) |
308 ((vbp & 0xff) << 24) | 319 ((vbp & 0xff) << 24) |
309 ((vfp & 0xff) << 16) | 320 ((vfp & 0xff) << 16) |
310 ((vsw & 0x3f) << 10); 321 (((vsw-1) & 0x3f) << 10);
311 tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg); 322 tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
312 323
324 /*
325 * be sure to set Bit 10 for the V2 LCDC controller,
326 * otherwise limited to 1024 pixels width, stopping
327 * 1920x1080 being suppoted.
328 */
329 if (priv->rev == 2) {
330 if ((mode->vdisplay - 1) & 0x400) {
331 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG,
332 LCDC_LPP_B10);
333 } else {
334 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG,
335 LCDC_LPP_B10);
336 }
337 }
338
313 /* Configure display type: */ 339 /* Configure display type: */
314 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) & 340 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
315 ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE | 341 ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
@@ -384,10 +410,6 @@ static int tilcdc_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
384 return 0; 410 return 0;
385} 411}
386 412
387static void tilcdc_crtc_load_lut(struct drm_crtc *crtc)
388{
389}
390
391static const struct drm_crtc_funcs tilcdc_crtc_funcs = { 413static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
392 .destroy = tilcdc_crtc_destroy, 414 .destroy = tilcdc_crtc_destroy,
393 .set_config = drm_crtc_helper_set_config, 415 .set_config = drm_crtc_helper_set_config,
@@ -401,7 +423,6 @@ static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
401 .commit = tilcdc_crtc_commit, 423 .commit = tilcdc_crtc_commit,
402 .mode_set = tilcdc_crtc_mode_set, 424 .mode_set = tilcdc_crtc_mode_set,
403 .mode_set_base = tilcdc_crtc_mode_set_base, 425 .mode_set_base = tilcdc_crtc_mode_set_base,
404 .load_lut = tilcdc_crtc_load_lut,
405}; 426};
406 427
407int tilcdc_crtc_max_width(struct drm_crtc *crtc) 428int tilcdc_crtc_max_width(struct drm_crtc *crtc)
@@ -422,7 +443,12 @@ int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
422{ 443{
423 struct tilcdc_drm_private *priv = crtc->dev->dev_private; 444 struct tilcdc_drm_private *priv = crtc->dev->dev_private;
424 unsigned int bandwidth; 445 unsigned int bandwidth;
446 uint32_t hbp, hfp, hsw, vbp, vfp, vsw;
425 447
448 /*
449 * check to see if the width is within the range that
450 * the LCD Controller physically supports
451 */
426 if (mode->hdisplay > tilcdc_crtc_max_width(crtc)) 452 if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
427 return MODE_VIRTUAL_X; 453 return MODE_VIRTUAL_X;
428 454
@@ -433,10 +459,70 @@ int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
433 if (mode->vdisplay > 2048) 459 if (mode->vdisplay > 2048)
434 return MODE_VIRTUAL_Y; 460 return MODE_VIRTUAL_Y;
435 461
462 DBG("Processing mode %dx%d@%d with pixel clock %d",
463 mode->hdisplay, mode->vdisplay,
464 drm_mode_vrefresh(mode), mode->clock);
465
466 hbp = mode->htotal - mode->hsync_end;
467 hfp = mode->hsync_start - mode->hdisplay;
468 hsw = mode->hsync_end - mode->hsync_start;
469 vbp = mode->vtotal - mode->vsync_end;
470 vfp = mode->vsync_start - mode->vdisplay;
471 vsw = mode->vsync_end - mode->vsync_start;
472
473 if ((hbp-1) & ~0x3ff) {
474 DBG("Pruning mode: Horizontal Back Porch out of range");
475 return MODE_HBLANK_WIDE;
476 }
477
478 if ((hfp-1) & ~0x3ff) {
479 DBG("Pruning mode: Horizontal Front Porch out of range");
480 return MODE_HBLANK_WIDE;
481 }
482
483 if ((hsw-1) & ~0x3ff) {
484 DBG("Pruning mode: Horizontal Sync Width out of range");
485 return MODE_HSYNC_WIDE;
486 }
487
488 if (vbp & ~0xff) {
489 DBG("Pruning mode: Vertical Back Porch out of range");
490 return MODE_VBLANK_WIDE;
491 }
492
493 if (vfp & ~0xff) {
494 DBG("Pruning mode: Vertical Front Porch out of range");
495 return MODE_VBLANK_WIDE;
496 }
497
498 if ((vsw-1) & ~0x3f) {
499 DBG("Pruning mode: Vertical Sync Width out of range");
500 return MODE_VSYNC_WIDE;
501 }
502
503 /*
504 * some devices have a maximum allowed pixel clock
505 * configured from the DT
506 */
507 if (mode->clock > priv->max_pixelclock) {
508 DBG("Pruning mode: pixel clock too high");
509 return MODE_CLOCK_HIGH;
510 }
511
512 /*
513 * some devices further limit the max horizontal resolution
514 * configured from the DT
515 */
516 if (mode->hdisplay > priv->max_width)
517 return MODE_BAD_WIDTH;
518
436 /* filter out modes that would require too much memory bandwidth: */ 519 /* filter out modes that would require too much memory bandwidth: */
437 bandwidth = mode->hdisplay * mode->vdisplay * drm_mode_vrefresh(mode); 520 bandwidth = mode->hdisplay * mode->vdisplay *
438 if (bandwidth > priv->max_bandwidth) 521 drm_mode_vrefresh(mode);
522 if (bandwidth > priv->max_bandwidth) {
523 DBG("Pruning mode: exceeds defined bandwidth limit");
439 return MODE_BAD; 524 return MODE_BAD;
525 }
440 526
441 return MODE_OK; 527 return MODE_OK;
442} 528}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 2b5461bcd9fb..40b71da5a214 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -26,6 +26,7 @@
26#include "drm_fb_helper.h" 26#include "drm_fb_helper.h"
27 27
28static LIST_HEAD(module_list); 28static LIST_HEAD(module_list);
29static bool slave_probing;
29 30
30void tilcdc_module_init(struct tilcdc_module *mod, const char *name, 31void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
31 const struct tilcdc_module_ops *funcs) 32 const struct tilcdc_module_ops *funcs)
@@ -41,6 +42,11 @@ void tilcdc_module_cleanup(struct tilcdc_module *mod)
41 list_del(&mod->list); 42 list_del(&mod->list);
42} 43}
43 44
45void tilcdc_slave_probedefer(bool defered)
46{
47 slave_probing = defered;
48}
49
44static struct of_device_id tilcdc_of_match[]; 50static struct of_device_id tilcdc_of_match[];
45 51
46static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev, 52static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
@@ -157,7 +163,9 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
157 struct platform_device *pdev = dev->platformdev; 163 struct platform_device *pdev = dev->platformdev;
158 struct device_node *node = pdev->dev.of_node; 164 struct device_node *node = pdev->dev.of_node;
159 struct tilcdc_drm_private *priv; 165 struct tilcdc_drm_private *priv;
166 struct tilcdc_module *mod;
160 struct resource *res; 167 struct resource *res;
168 u32 bpp = 0;
161 int ret; 169 int ret;
162 170
163 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 171 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -210,7 +218,20 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
210#endif 218#endif
211 219
212 if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth)) 220 if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
213 priv->max_bandwidth = 1280 * 1024 * 60; 221 priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH;
222
223 DBG("Maximum Bandwidth Value %d", priv->max_bandwidth);
224
225 if (of_property_read_u32(node, "ti,max-width", &priv->max_width))
226 priv->max_width = TILCDC_DEFAULT_MAX_WIDTH;
227
228 DBG("Maximum Horizontal Pixel Width Value %dpixels", priv->max_width);
229
230 if (of_property_read_u32(node, "ti,max-pixelclock",
231 &priv->max_pixelclock))
232 priv->max_pixelclock = TILCDC_DEFAULT_MAX_PIXELCLOCK;
233
234 DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock);
214 235
215 pm_runtime_enable(dev->dev); 236 pm_runtime_enable(dev->dev);
216 237
@@ -256,7 +277,15 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
256 277
257 platform_set_drvdata(pdev, dev); 278 platform_set_drvdata(pdev, dev);
258 279
259 priv->fbdev = drm_fbdev_cma_init(dev, 16, 280
281 list_for_each_entry(mod, &module_list, list) {
282 DBG("%s: preferred_bpp: %d", mod->name, mod->preferred_bpp);
283 bpp = mod->preferred_bpp;
284 if (bpp > 0)
285 break;
286 }
287
288 priv->fbdev = drm_fbdev_cma_init(dev, bpp,
260 dev->mode_config.num_crtc, 289 dev->mode_config.num_crtc,
261 dev->mode_config.num_connector); 290 dev->mode_config.num_connector);
262 291
@@ -557,6 +586,10 @@ static int tilcdc_pdev_probe(struct platform_device *pdev)
557 return -ENXIO; 586 return -ENXIO;
558 } 587 }
559 588
589 /* defer probing if slave is in deferred probing */
590 if (slave_probing == true)
591 return -EPROBE_DEFER;
592
560 return drm_platform_init(&tilcdc_driver, pdev); 593 return drm_platform_init(&tilcdc_driver, pdev);
561} 594}
562 595
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 8242b5a4307b..093803683b25 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -34,6 +34,18 @@
34#include <drm/drm_gem_cma_helper.h> 34#include <drm/drm_gem_cma_helper.h>
35#include <drm/drm_fb_cma_helper.h> 35#include <drm/drm_fb_cma_helper.h>
36 36
37/* Defaulting to pixel clock defined on AM335x */
38#define TILCDC_DEFAULT_MAX_PIXELCLOCK 126000
39/* Defaulting to max width as defined on AM335x */
40#define TILCDC_DEFAULT_MAX_WIDTH 2048
41/*
42 * This may need some tweaking, but want to allow at least 1280x1024@60
43 * with optimized DDR & EMIF settings tweaked 1920x1080@24 appears to
44 * be supportable
45 */
46#define TILCDC_DEFAULT_MAX_BANDWIDTH (1280*1024*60)
47
48
37struct tilcdc_drm_private { 49struct tilcdc_drm_private {
38 void __iomem *mmio; 50 void __iomem *mmio;
39 51
@@ -43,6 +55,16 @@ struct tilcdc_drm_private {
43 55
44 /* don't attempt resolutions w/ higher W * H * Hz: */ 56 /* don't attempt resolutions w/ higher W * H * Hz: */
45 uint32_t max_bandwidth; 57 uint32_t max_bandwidth;
58 /*
59 * Pixel Clock will be restricted to some value as
60 * defined in the device datasheet measured in KHz
61 */
62 uint32_t max_pixelclock;
63 /*
64 * Max allowable width is limited on a per device basis
65 * measured in pixels
66 */
67 uint32_t max_width;
46 68
47 /* register contents saved across suspend/resume: */ 69 /* register contents saved across suspend/resume: */
48 u32 saved_register[12]; 70 u32 saved_register[12];
@@ -89,12 +111,13 @@ struct tilcdc_module {
89 const char *name; 111 const char *name;
90 struct list_head list; 112 struct list_head list;
91 const struct tilcdc_module_ops *funcs; 113 const struct tilcdc_module_ops *funcs;
114 unsigned int preferred_bpp;
92}; 115};
93 116
94void tilcdc_module_init(struct tilcdc_module *mod, const char *name, 117void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
95 const struct tilcdc_module_ops *funcs); 118 const struct tilcdc_module_ops *funcs);
96void tilcdc_module_cleanup(struct tilcdc_module *mod); 119void tilcdc_module_cleanup(struct tilcdc_module *mod);
97 120void tilcdc_slave_probedefer(bool defered);
98 121
99/* Panel config that needs to be set in the crtc, but is not coming from 122/* Panel config that needs to be set in the crtc, but is not coming from
100 * the mode timings. The display module is expected to call 123 * the mode timings. The display module is expected to call
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 09176654fddb..86c67329b605 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -393,6 +393,8 @@ static int panel_probe(struct platform_device *pdev)
393 goto fail; 393 goto fail;
394 } 394 }
395 395
396 mod->preferred_bpp = panel_mod->info->bpp;
397
396 panel_mod->backlight = of_find_backlight_by_node(node); 398 panel_mod->backlight = of_find_backlight_by_node(node);
397 if (panel_mod->backlight) 399 if (panel_mod->backlight)
398 dev_info(&pdev->dev, "found backlight\n"); 400 dev_info(&pdev->dev, "found backlight\n");
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
index 17fd1b45428a..1bf5e2553acc 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_regs.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -80,6 +80,7 @@
80#define LCDC_INVERT_PIXEL_CLOCK BIT(22) 80#define LCDC_INVERT_PIXEL_CLOCK BIT(22)
81#define LCDC_INVERT_HSYNC BIT(21) 81#define LCDC_INVERT_HSYNC BIT(21)
82#define LCDC_INVERT_VSYNC BIT(20) 82#define LCDC_INVERT_VSYNC BIT(20)
83#define LCDC_LPP_B10 BIT(26)
83 84
84/* LCDC Block */ 85/* LCDC Block */
85#define LCDC_PID_REG 0x0 86#define LCDC_PID_REG 0x0
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index db1d2fc9dfb5..dfffaf014022 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -298,6 +298,7 @@ static int slave_probe(struct platform_device *pdev)
298 struct tilcdc_module *mod; 298 struct tilcdc_module *mod;
299 struct pinctrl *pinctrl; 299 struct pinctrl *pinctrl;
300 uint32_t i2c_phandle; 300 uint32_t i2c_phandle;
301 struct i2c_adapter *slavei2c;
301 int ret = -EINVAL; 302 int ret = -EINVAL;
302 303
303 /* bail out early if no DT data: */ 304 /* bail out early if no DT data: */
@@ -306,42 +307,48 @@ static int slave_probe(struct platform_device *pdev)
306 return -ENXIO; 307 return -ENXIO;
307 } 308 }
308 309
309 slave_mod = kzalloc(sizeof(*slave_mod), GFP_KERNEL); 310 /* Bail out early if i2c not specified */
310 if (!slave_mod)
311 return -ENOMEM;
312
313 mod = &slave_mod->base;
314
315 tilcdc_module_init(mod, "slave", &slave_module_ops);
316
317 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
318 if (IS_ERR(pinctrl))
319 dev_warn(&pdev->dev, "pins are not configured\n");
320
321 if (of_property_read_u32(node, "i2c", &i2c_phandle)) { 311 if (of_property_read_u32(node, "i2c", &i2c_phandle)) {
322 dev_err(&pdev->dev, "could not get i2c bus phandle\n"); 312 dev_err(&pdev->dev, "could not get i2c bus phandle\n");
323 goto fail; 313 return ret;
324 } 314 }
325 315
326 i2c_node = of_find_node_by_phandle(i2c_phandle); 316 i2c_node = of_find_node_by_phandle(i2c_phandle);
327 if (!i2c_node) { 317 if (!i2c_node) {
328 dev_err(&pdev->dev, "could not get i2c bus node\n"); 318 dev_err(&pdev->dev, "could not get i2c bus node\n");
329 goto fail; 319 return ret;
330 } 320 }
331 321
332 slave_mod->i2c = of_find_i2c_adapter_by_node(i2c_node); 322 /* but defer the probe if it can't be initialized it might come later */
333 if (!slave_mod->i2c) { 323 slavei2c = of_find_i2c_adapter_by_node(i2c_node);
324 of_node_put(i2c_node);
325
326 if (!slavei2c) {
327 ret = -EPROBE_DEFER;
328 tilcdc_slave_probedefer(true);
334 dev_err(&pdev->dev, "could not get i2c\n"); 329 dev_err(&pdev->dev, "could not get i2c\n");
335 goto fail; 330 return ret;
336 } 331 }
337 332
338 of_node_put(i2c_node); 333 slave_mod = kzalloc(sizeof(*slave_mod), GFP_KERNEL);
334 if (!slave_mod)
335 return -ENOMEM;
339 336
340 return 0; 337 mod = &slave_mod->base;
341 338
342fail: 339 mod->preferred_bpp = slave_info.bpp;
343 slave_destroy(mod); 340
344 return ret; 341 slave_mod->i2c = slavei2c;
342
343 tilcdc_module_init(mod, "slave", &slave_module_ops);
344
345 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
346 if (IS_ERR(pinctrl))
347 dev_warn(&pdev->dev, "pins are not configured\n");
348
349 tilcdc_slave_probedefer(false);
350
351 return 0;
345} 352}
346 353
347static int slave_remove(struct platform_device *pdev) 354static int slave_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index a36788fbcd98..925c7cddeff9 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -354,6 +354,8 @@ static int tfp410_probe(struct platform_device *pdev)
354 goto fail; 354 goto fail;
355 } 355 }
356 356
357 mod->preferred_bpp = dvi_info.bpp;
358
357 i2c_node = of_find_node_by_phandle(i2c_phandle); 359 i2c_node = of_find_node_by_phandle(i2c_phandle);
358 if (!i2c_node) { 360 if (!i2c_node) {
359 dev_err(&pdev->dev, "could not get i2c bus node\n"); 361 dev_err(&pdev->dev, "could not get i2c bus node\n");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9b07b7d44a58..cb9dd674670c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -150,6 +150,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
150 if (bo->ttm) 150 if (bo->ttm)
151 ttm_tt_destroy(bo->ttm); 151 ttm_tt_destroy(bo->ttm);
152 atomic_dec(&bo->glob->bo_count); 152 atomic_dec(&bo->glob->bo_count);
153 if (bo->resv == &bo->ttm_resv)
154 reservation_object_fini(&bo->ttm_resv);
155
153 if (bo->destroy) 156 if (bo->destroy)
154 bo->destroy(bo); 157 bo->destroy(bo);
155 else { 158 else {
@@ -158,24 +161,12 @@ static void ttm_bo_release_list(struct kref *list_kref)
158 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 161 ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
159} 162}
160 163
161static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
162 bool interruptible)
163{
164 if (interruptible) {
165 return wait_event_interruptible(bo->event_queue,
166 !ttm_bo_is_reserved(bo));
167 } else {
168 wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
169 return 0;
170 }
171}
172
173void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 164void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
174{ 165{
175 struct ttm_bo_device *bdev = bo->bdev; 166 struct ttm_bo_device *bdev = bo->bdev;
176 struct ttm_mem_type_manager *man; 167 struct ttm_mem_type_manager *man;
177 168
178 BUG_ON(!ttm_bo_is_reserved(bo)); 169 lockdep_assert_held(&bo->resv->lock.base);
179 170
180 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 171 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
181 172
@@ -191,6 +182,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
191 } 182 }
192 } 183 }
193} 184}
185EXPORT_SYMBOL(ttm_bo_add_to_lru);
194 186
195int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 187int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
196{ 188{
@@ -213,71 +205,6 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
213 return put_count; 205 return put_count;
214} 206}
215 207
216int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
217 bool interruptible,
218 bool no_wait, bool use_sequence, uint32_t sequence)
219{
220 int ret;
221
222 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
223 /**
224 * Deadlock avoidance for multi-bo reserving.
225 */
226 if (use_sequence && bo->seq_valid) {
227 /**
228 * We've already reserved this one.
229 */
230 if (unlikely(sequence == bo->val_seq))
231 return -EDEADLK;
232 /**
233 * Already reserved by a thread that will not back
234 * off for us. We need to back off.
235 */
236 if (unlikely(sequence - bo->val_seq < (1 << 31)))
237 return -EAGAIN;
238 }
239
240 if (no_wait)
241 return -EBUSY;
242
243 ret = ttm_bo_wait_unreserved(bo, interruptible);
244
245 if (unlikely(ret))
246 return ret;
247 }
248
249 if (use_sequence) {
250 bool wake_up = false;
251 /**
252 * Wake up waiters that may need to recheck for deadlock,
253 * if we decreased the sequence number.
254 */
255 if (unlikely((bo->val_seq - sequence < (1 << 31))
256 || !bo->seq_valid))
257 wake_up = true;
258
259 /*
260 * In the worst case with memory ordering these values can be
261 * seen in the wrong order. However since we call wake_up_all
262 * in that case, this will hopefully not pose a problem,
263 * and the worst case would only cause someone to accidentally
264 * hit -EAGAIN in ttm_bo_reserve when they see old value of
265 * val_seq. However this would only happen if seq_valid was
266 * written before val_seq was, and just means some slightly
267 * increased cpu usage
268 */
269 bo->val_seq = sequence;
270 bo->seq_valid = true;
271 if (wake_up)
272 wake_up_all(&bo->event_queue);
273 } else {
274 bo->seq_valid = false;
275 }
276
277 return 0;
278}
279EXPORT_SYMBOL(ttm_bo_reserve);
280
281static void ttm_bo_ref_bug(struct kref *list_kref) 208static void ttm_bo_ref_bug(struct kref *list_kref)
282{ 209{
283 BUG(); 210 BUG();
@@ -290,89 +217,16 @@ void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
290 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); 217 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
291} 218}
292 219
293int ttm_bo_reserve(struct ttm_buffer_object *bo, 220void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
294 bool interruptible,
295 bool no_wait, bool use_sequence, uint32_t sequence)
296{
297 struct ttm_bo_global *glob = bo->glob;
298 int put_count = 0;
299 int ret;
300
301 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
302 sequence);
303 if (likely(ret == 0)) {
304 spin_lock(&glob->lru_lock);
305 put_count = ttm_bo_del_from_lru(bo);
306 spin_unlock(&glob->lru_lock);
307 ttm_bo_list_ref_sub(bo, put_count, true);
308 }
309
310 return ret;
311}
312
313int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
314 bool interruptible, uint32_t sequence)
315{
316 bool wake_up = false;
317 int ret;
318
319 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
320 WARN_ON(bo->seq_valid && sequence == bo->val_seq);
321
322 ret = ttm_bo_wait_unreserved(bo, interruptible);
323
324 if (unlikely(ret))
325 return ret;
326 }
327
328 if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
329 wake_up = true;
330
331 /**
332 * Wake up waiters that may need to recheck for deadlock,
333 * if we decreased the sequence number.
334 */
335 bo->val_seq = sequence;
336 bo->seq_valid = true;
337 if (wake_up)
338 wake_up_all(&bo->event_queue);
339
340 return 0;
341}
342
343int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
344 bool interruptible, uint32_t sequence)
345{
346 struct ttm_bo_global *glob = bo->glob;
347 int put_count, ret;
348
349 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
350 if (likely(!ret)) {
351 spin_lock(&glob->lru_lock);
352 put_count = ttm_bo_del_from_lru(bo);
353 spin_unlock(&glob->lru_lock);
354 ttm_bo_list_ref_sub(bo, put_count, true);
355 }
356 return ret;
357}
358EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
359
360void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
361{
362 ttm_bo_add_to_lru(bo);
363 atomic_set(&bo->reserved, 0);
364 wake_up_all(&bo->event_queue);
365}
366
367void ttm_bo_unreserve(struct ttm_buffer_object *bo)
368{ 221{
369 struct ttm_bo_global *glob = bo->glob; 222 int put_count;
370 223
371 spin_lock(&glob->lru_lock); 224 spin_lock(&bo->glob->lru_lock);
372 ttm_bo_unreserve_locked(bo); 225 put_count = ttm_bo_del_from_lru(bo);
373 spin_unlock(&glob->lru_lock); 226 spin_unlock(&bo->glob->lru_lock);
227 ttm_bo_list_ref_sub(bo, put_count, true);
374} 228}
375EXPORT_SYMBOL(ttm_bo_unreserve); 229EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
376 230
377/* 231/*
378 * Call bo->mutex locked. 232 * Call bo->mutex locked.
@@ -544,17 +398,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
544 } 398 }
545 ttm_bo_mem_put(bo, &bo->mem); 399 ttm_bo_mem_put(bo, &bo->mem);
546 400
547 atomic_set(&bo->reserved, 0); 401 ww_mutex_unlock (&bo->resv->lock);
548 wake_up_all(&bo->event_queue);
549
550 /*
551 * Since the final reference to this bo may not be dropped by
552 * the current task we have to put a memory barrier here to make
553 * sure the changes done in this function are always visible.
554 *
555 * This function only needs protection against the final kref_put.
556 */
557 smp_mb__before_atomic_dec();
558} 402}
559 403
560static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 404static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
@@ -586,10 +430,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
586 sync_obj = driver->sync_obj_ref(bo->sync_obj); 430 sync_obj = driver->sync_obj_ref(bo->sync_obj);
587 spin_unlock(&bdev->fence_lock); 431 spin_unlock(&bdev->fence_lock);
588 432
589 if (!ret) { 433 if (!ret)
590 atomic_set(&bo->reserved, 0); 434 ww_mutex_unlock(&bo->resv->lock);
591 wake_up_all(&bo->event_queue);
592 }
593 435
594 kref_get(&bo->list_kref); 436 kref_get(&bo->list_kref);
595 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 437 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
@@ -639,8 +481,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
639 sync_obj = driver->sync_obj_ref(bo->sync_obj); 481 sync_obj = driver->sync_obj_ref(bo->sync_obj);
640 spin_unlock(&bdev->fence_lock); 482 spin_unlock(&bdev->fence_lock);
641 483
642 atomic_set(&bo->reserved, 0); 484 ww_mutex_unlock(&bo->resv->lock);
643 wake_up_all(&bo->event_queue);
644 spin_unlock(&glob->lru_lock); 485 spin_unlock(&glob->lru_lock);
645 486
646 ret = driver->sync_obj_wait(sync_obj, false, interruptible); 487 ret = driver->sync_obj_wait(sync_obj, false, interruptible);
@@ -678,8 +519,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
678 spin_unlock(&bdev->fence_lock); 519 spin_unlock(&bdev->fence_lock);
679 520
680 if (ret || unlikely(list_empty(&bo->ddestroy))) { 521 if (ret || unlikely(list_empty(&bo->ddestroy))) {
681 atomic_set(&bo->reserved, 0); 522 ww_mutex_unlock(&bo->resv->lock);
682 wake_up_all(&bo->event_queue);
683 spin_unlock(&glob->lru_lock); 523 spin_unlock(&glob->lru_lock);
684 return ret; 524 return ret;
685 } 525 }
@@ -831,7 +671,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
831 goto out; 671 goto out;
832 } 672 }
833 673
834 BUG_ON(!ttm_bo_is_reserved(bo)); 674 lockdep_assert_held(&bo->resv->lock.base);
835 675
836 evict_mem = bo->mem; 676 evict_mem = bo->mem;
837 evict_mem.mm_node = NULL; 677 evict_mem.mm_node = NULL;
@@ -1121,7 +961,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1121 struct ttm_mem_reg mem; 961 struct ttm_mem_reg mem;
1122 struct ttm_bo_device *bdev = bo->bdev; 962 struct ttm_bo_device *bdev = bo->bdev;
1123 963
1124 BUG_ON(!ttm_bo_is_reserved(bo)); 964 lockdep_assert_held(&bo->resv->lock.base);
1125 965
1126 /* 966 /*
1127 * FIXME: It's possible to pipeline buffer moves. 967 * FIXME: It's possible to pipeline buffer moves.
@@ -1180,7 +1020,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1180{ 1020{
1181 int ret; 1021 int ret;
1182 1022
1183 BUG_ON(!ttm_bo_is_reserved(bo)); 1023 lockdep_assert_held(&bo->resv->lock.base);
1184 /* Check that range is valid */ 1024 /* Check that range is valid */
1185 if (placement->lpfn || placement->fpfn) 1025 if (placement->lpfn || placement->fpfn)
1186 if (placement->fpfn > placement->lpfn || 1026 if (placement->fpfn > placement->lpfn ||
@@ -1239,6 +1079,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1239 int ret = 0; 1079 int ret = 0;
1240 unsigned long num_pages; 1080 unsigned long num_pages;
1241 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1081 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1082 bool locked;
1242 1083
1243 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1084 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1244 if (ret) { 1085 if (ret) {
@@ -1265,8 +1106,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1265 kref_init(&bo->kref); 1106 kref_init(&bo->kref);
1266 kref_init(&bo->list_kref); 1107 kref_init(&bo->list_kref);
1267 atomic_set(&bo->cpu_writers, 0); 1108 atomic_set(&bo->cpu_writers, 0);
1268 atomic_set(&bo->reserved, 1);
1269 init_waitqueue_head(&bo->event_queue);
1270 INIT_LIST_HEAD(&bo->lru); 1109 INIT_LIST_HEAD(&bo->lru);
1271 INIT_LIST_HEAD(&bo->ddestroy); 1110 INIT_LIST_HEAD(&bo->ddestroy);
1272 INIT_LIST_HEAD(&bo->swap); 1111 INIT_LIST_HEAD(&bo->swap);
@@ -1284,37 +1123,34 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1284 bo->mem.bus.io_reserved_count = 0; 1123 bo->mem.bus.io_reserved_count = 0;
1285 bo->priv_flags = 0; 1124 bo->priv_flags = 0;
1286 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1125 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1287 bo->seq_valid = false;
1288 bo->persistent_swap_storage = persistent_swap_storage; 1126 bo->persistent_swap_storage = persistent_swap_storage;
1289 bo->acc_size = acc_size; 1127 bo->acc_size = acc_size;
1290 bo->sg = sg; 1128 bo->sg = sg;
1129 bo->resv = &bo->ttm_resv;
1130 reservation_object_init(bo->resv);
1291 atomic_inc(&bo->glob->bo_count); 1131 atomic_inc(&bo->glob->bo_count);
1292 1132
1293 ret = ttm_bo_check_placement(bo, placement); 1133 ret = ttm_bo_check_placement(bo, placement);
1294 if (unlikely(ret != 0))
1295 goto out_err;
1296 1134
1297 /* 1135 /*
1298 * For ttm_bo_type_device buffers, allocate 1136 * For ttm_bo_type_device buffers, allocate
1299 * address space from the device. 1137 * address space from the device.
1300 */ 1138 */
1301 if (bo->type == ttm_bo_type_device || 1139 if (likely(!ret) &&
1302 bo->type == ttm_bo_type_sg) { 1140 (bo->type == ttm_bo_type_device ||
1141 bo->type == ttm_bo_type_sg))
1303 ret = ttm_bo_setup_vm(bo); 1142 ret = ttm_bo_setup_vm(bo);
1304 if (ret)
1305 goto out_err;
1306 }
1307 1143
1308 ret = ttm_bo_validate(bo, placement, interruptible, false); 1144 locked = ww_mutex_trylock(&bo->resv->lock);
1309 if (ret) 1145 WARN_ON(!locked);
1310 goto out_err;
1311 1146
1312 ttm_bo_unreserve(bo); 1147 if (likely(!ret))
1313 return 0; 1148 ret = ttm_bo_validate(bo, placement, interruptible, false);
1314 1149
1315out_err:
1316 ttm_bo_unreserve(bo); 1150 ttm_bo_unreserve(bo);
1317 ttm_bo_unref(&bo); 1151
1152 if (unlikely(ret))
1153 ttm_bo_unref(&bo);
1318 1154
1319 return ret; 1155 return ret;
1320} 1156}
@@ -1619,9 +1455,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1619 goto out_no_sys; 1455 goto out_no_sys;
1620 1456
1621 bdev->addr_space_rb = RB_ROOT; 1457 bdev->addr_space_rb = RB_ROOT;
1622 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); 1458 drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1623 if (unlikely(ret != 0))
1624 goto out_no_addr_mm;
1625 1459
1626 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1460 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1627 INIT_LIST_HEAD(&bdev->ddestroy); 1461 INIT_LIST_HEAD(&bdev->ddestroy);
@@ -1635,8 +1469,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1635 mutex_unlock(&glob->device_list_mutex); 1469 mutex_unlock(&glob->device_list_mutex);
1636 1470
1637 return 0; 1471 return 0;
1638out_no_addr_mm:
1639 ttm_bo_clean_mm(bdev, 0);
1640out_no_sys: 1472out_no_sys:
1641 return ret; 1473 return ret;
1642} 1474}
@@ -1927,8 +1759,7 @@ out:
1927 * already swapped buffer. 1759 * already swapped buffer.
1928 */ 1760 */
1929 1761
1930 atomic_set(&bo->reserved, 0); 1762 ww_mutex_unlock(&bo->resv->lock);
1931 wake_up_all(&bo->event_queue);
1932 kref_put(&bo->list_kref, ttm_bo_release_list); 1763 kref_put(&bo->list_kref, ttm_bo_release_list);
1933 return ret; 1764 return ret;
1934} 1765}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 9212494e9072..e4367f91472a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -103,18 +103,12 @@ static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
103 unsigned long p_size) 103 unsigned long p_size)
104{ 104{
105 struct ttm_range_manager *rman; 105 struct ttm_range_manager *rman;
106 int ret;
107 106
108 rman = kzalloc(sizeof(*rman), GFP_KERNEL); 107 rman = kzalloc(sizeof(*rman), GFP_KERNEL);
109 if (!rman) 108 if (!rman)
110 return -ENOMEM; 109 return -ENOMEM;
111 110
112 ret = drm_mm_init(&rman->mm, 0, p_size); 111 drm_mm_init(&rman->mm, 0, p_size);
113 if (ret) {
114 kfree(rman);
115 return ret;
116 }
117
118 spin_lock_init(&rman->lock); 112 spin_lock_init(&rman->lock);
119 man->priv = rman; 113 man->priv = rman;
120 return 0; 114 return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index af894584dd90..319cf4127c5b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -433,6 +433,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
433 struct ttm_buffer_object *fbo; 433 struct ttm_buffer_object *fbo;
434 struct ttm_bo_device *bdev = bo->bdev; 434 struct ttm_bo_device *bdev = bo->bdev;
435 struct ttm_bo_driver *driver = bdev->driver; 435 struct ttm_bo_driver *driver = bdev->driver;
436 int ret;
436 437
437 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); 438 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
438 if (!fbo) 439 if (!fbo)
@@ -445,7 +446,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
445 * TODO: Explicit member copy would probably be better here. 446 * TODO: Explicit member copy would probably be better here.
446 */ 447 */
447 448
448 init_waitqueue_head(&fbo->event_queue);
449 INIT_LIST_HEAD(&fbo->ddestroy); 449 INIT_LIST_HEAD(&fbo->ddestroy);
450 INIT_LIST_HEAD(&fbo->lru); 450 INIT_LIST_HEAD(&fbo->lru);
451 INIT_LIST_HEAD(&fbo->swap); 451 INIT_LIST_HEAD(&fbo->swap);
@@ -463,6 +463,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
463 kref_init(&fbo->kref); 463 kref_init(&fbo->kref);
464 fbo->destroy = &ttm_transfered_destroy; 464 fbo->destroy = &ttm_transfered_destroy;
465 fbo->acc_size = 0; 465 fbo->acc_size = 0;
466 fbo->resv = &fbo->ttm_resv;
467 reservation_object_init(fbo->resv);
468 ret = ww_mutex_trylock(&fbo->resv->lock);
469 WARN_ON(!ret);
466 470
467 *new_obj = fbo; 471 *new_obj = fbo;
468 return 0; 472 return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 7b90def15674..6c911789ae5c 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,7 +32,8 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/module.h> 33#include <linux/module.h>
34 34
35static void ttm_eu_backoff_reservation_locked(struct list_head *list) 35static void ttm_eu_backoff_reservation_locked(struct list_head *list,
36 struct ww_acquire_ctx *ticket)
36{ 37{
37 struct ttm_validate_buffer *entry; 38 struct ttm_validate_buffer *entry;
38 39
@@ -41,14 +42,12 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list)
41 if (!entry->reserved) 42 if (!entry->reserved)
42 continue; 43 continue;
43 44
45 entry->reserved = false;
44 if (entry->removed) { 46 if (entry->removed) {
45 ttm_bo_add_to_lru(bo); 47 ttm_bo_add_to_lru(bo);
46 entry->removed = false; 48 entry->removed = false;
47
48 } 49 }
49 entry->reserved = false; 50 ww_mutex_unlock(&bo->resv->lock);
50 atomic_set(&bo->reserved, 0);
51 wake_up_all(&bo->event_queue);
52 } 51 }
53} 52}
54 53
@@ -82,7 +81,8 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
82 } 81 }
83} 82}
84 83
85void ttm_eu_backoff_reservation(struct list_head *list) 84void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
85 struct list_head *list)
86{ 86{
87 struct ttm_validate_buffer *entry; 87 struct ttm_validate_buffer *entry;
88 struct ttm_bo_global *glob; 88 struct ttm_bo_global *glob;
@@ -93,7 +93,8 @@ void ttm_eu_backoff_reservation(struct list_head *list)
93 entry = list_first_entry(list, struct ttm_validate_buffer, head); 93 entry = list_first_entry(list, struct ttm_validate_buffer, head);
94 glob = entry->bo->glob; 94 glob = entry->bo->glob;
95 spin_lock(&glob->lru_lock); 95 spin_lock(&glob->lru_lock);
96 ttm_eu_backoff_reservation_locked(list); 96 ttm_eu_backoff_reservation_locked(list, ticket);
97 ww_acquire_fini(ticket);
97 spin_unlock(&glob->lru_lock); 98 spin_unlock(&glob->lru_lock);
98} 99}
99EXPORT_SYMBOL(ttm_eu_backoff_reservation); 100EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -110,12 +111,12 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
110 * buffers in different orders. 111 * buffers in different orders.
111 */ 112 */
112 113
113int ttm_eu_reserve_buffers(struct list_head *list) 114int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
115 struct list_head *list)
114{ 116{
115 struct ttm_bo_global *glob; 117 struct ttm_bo_global *glob;
116 struct ttm_validate_buffer *entry; 118 struct ttm_validate_buffer *entry;
117 int ret; 119 int ret;
118 uint32_t val_seq;
119 120
120 if (list_empty(list)) 121 if (list_empty(list))
121 return 0; 122 return 0;
@@ -129,9 +130,7 @@ int ttm_eu_reserve_buffers(struct list_head *list)
129 entry = list_first_entry(list, struct ttm_validate_buffer, head); 130 entry = list_first_entry(list, struct ttm_validate_buffer, head);
130 glob = entry->bo->glob; 131 glob = entry->bo->glob;
131 132
132 spin_lock(&glob->lru_lock); 133 ww_acquire_init(ticket, &reservation_ww_class);
133 val_seq = entry->bo->bdev->val_seq++;
134
135retry: 134retry:
136 list_for_each_entry(entry, list, head) { 135 list_for_each_entry(entry, list, head) {
137 struct ttm_buffer_object *bo = entry->bo; 136 struct ttm_buffer_object *bo = entry->bo;
@@ -140,49 +139,34 @@ retry:
140 if (entry->reserved) 139 if (entry->reserved)
141 continue; 140 continue;
142 141
143 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
144 switch (ret) {
145 case 0:
146 break;
147 case -EBUSY:
148 ttm_eu_del_from_lru_locked(list);
149 spin_unlock(&glob->lru_lock);
150 ret = ttm_bo_reserve_nolru(bo, true, false,
151 true, val_seq);
152 spin_lock(&glob->lru_lock);
153 if (!ret)
154 break;
155
156 if (unlikely(ret != -EAGAIN))
157 goto err;
158 142
159 /* fallthrough */ 143 ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
160 case -EAGAIN:
161 ttm_eu_backoff_reservation_locked(list);
162 144
163 /* 145 if (ret == -EDEADLK) {
164 * temporarily increase sequence number every retry, 146 /* uh oh, we lost out, drop every reservation and try
165 * to prevent us from seeing our old reservation 147 * to only reserve this buffer, then start over if
166 * sequence when someone else reserved the buffer, 148 * this succeeds.
167 * but hasn't updated the seq_valid/seqno members yet.
168 */ 149 */
169 val_seq = entry->bo->bdev->val_seq++; 150 spin_lock(&glob->lru_lock);
170 151 ttm_eu_backoff_reservation_locked(list, ticket);
171 spin_unlock(&glob->lru_lock); 152 spin_unlock(&glob->lru_lock);
172 ttm_eu_list_ref_sub(list); 153 ttm_eu_list_ref_sub(list);
173 ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq); 154 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
174 if (unlikely(ret != 0)) 155 ticket);
175 return ret; 156 if (unlikely(ret != 0)) {
176 spin_lock(&glob->lru_lock); 157 if (ret == -EINTR)
158 ret = -ERESTARTSYS;
159 goto err_fini;
160 }
161
177 entry->reserved = true; 162 entry->reserved = true;
178 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 163 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
179 ret = -EBUSY; 164 ret = -EBUSY;
180 goto err; 165 goto err;
181 } 166 }
182 goto retry; 167 goto retry;
183 default: 168 } else if (ret)
184 goto err; 169 goto err;
185 }
186 170
187 entry->reserved = true; 171 entry->reserved = true;
188 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 172 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
@@ -191,21 +175,27 @@ retry:
191 } 175 }
192 } 176 }
193 177
178 ww_acquire_done(ticket);
179 spin_lock(&glob->lru_lock);
194 ttm_eu_del_from_lru_locked(list); 180 ttm_eu_del_from_lru_locked(list);
195 spin_unlock(&glob->lru_lock); 181 spin_unlock(&glob->lru_lock);
196 ttm_eu_list_ref_sub(list); 182 ttm_eu_list_ref_sub(list);
197
198 return 0; 183 return 0;
199 184
200err: 185err:
201 ttm_eu_backoff_reservation_locked(list); 186 spin_lock(&glob->lru_lock);
187 ttm_eu_backoff_reservation_locked(list, ticket);
202 spin_unlock(&glob->lru_lock); 188 spin_unlock(&glob->lru_lock);
203 ttm_eu_list_ref_sub(list); 189 ttm_eu_list_ref_sub(list);
190err_fini:
191 ww_acquire_done(ticket);
192 ww_acquire_fini(ticket);
204 return ret; 193 return ret;
205} 194}
206EXPORT_SYMBOL(ttm_eu_reserve_buffers); 195EXPORT_SYMBOL(ttm_eu_reserve_buffers);
207 196
208void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) 197void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
198 struct list_head *list, void *sync_obj)
209{ 199{
210 struct ttm_validate_buffer *entry; 200 struct ttm_validate_buffer *entry;
211 struct ttm_buffer_object *bo; 201 struct ttm_buffer_object *bo;
@@ -228,11 +218,13 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
228 bo = entry->bo; 218 bo = entry->bo;
229 entry->old_sync_obj = bo->sync_obj; 219 entry->old_sync_obj = bo->sync_obj;
230 bo->sync_obj = driver->sync_obj_ref(sync_obj); 220 bo->sync_obj = driver->sync_obj_ref(sync_obj);
231 ttm_bo_unreserve_locked(bo); 221 ttm_bo_add_to_lru(bo);
222 ww_mutex_unlock(&bo->resv->lock);
232 entry->reserved = false; 223 entry->reserved = false;
233 } 224 }
234 spin_unlock(&bdev->fence_lock); 225 spin_unlock(&bdev->fence_lock);
235 spin_unlock(&glob->lru_lock); 226 spin_unlock(&glob->lru_lock);
227 ww_acquire_fini(ticket);
236 228
237 list_for_each_entry(entry, list, head) { 229 list_for_each_entry(entry, list, head) {
238 if (entry->old_sync_obj) 230 if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index dc0c065f8d39..97e9d614700f 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -393,19 +393,6 @@ static struct fb_ops udlfb_ops = {
393 .fb_release = udl_fb_release, 393 .fb_release = udl_fb_release,
394}; 394};
395 395
396static void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
397 u16 blue, int regno)
398{
399}
400
401static void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
402 u16 *blue, int regno)
403{
404 *red = 0;
405 *green = 0;
406 *blue = 0;
407}
408
409static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, 396static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
410 struct drm_file *file, 397 struct drm_file *file,
411 unsigned flags, unsigned color, 398 unsigned flags, unsigned color,
@@ -558,8 +545,6 @@ out:
558} 545}
559 546
560static struct drm_fb_helper_funcs udl_fb_helper_funcs = { 547static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
561 .gamma_set = udl_crtc_fb_gamma_set,
562 .gamma_get = udl_crtc_fb_gamma_get,
563 .fb_probe = udlfb_create, 548 .fb_probe = udlfb_create,
564}; 549};
565 550
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index e96d2349bd54..2ae1eb7d1635 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -363,10 +363,6 @@ static void udl_crtc_destroy(struct drm_crtc *crtc)
363 kfree(crtc); 363 kfree(crtc);
364} 364}
365 365
366static void udl_load_lut(struct drm_crtc *crtc)
367{
368}
369
370static void udl_crtc_prepare(struct drm_crtc *crtc) 366static void udl_crtc_prepare(struct drm_crtc *crtc)
371{ 367{
372} 368}
@@ -383,7 +379,6 @@ static struct drm_crtc_helper_funcs udl_helper_funcs = {
383 .prepare = udl_crtc_prepare, 379 .prepare = udl_crtc_prepare,
384 .commit = udl_crtc_commit, 380 .commit = udl_crtc_commit,
385 .disable = udl_crtc_disable, 381 .disable = udl_crtc_disable,
386 .load_lut = udl_load_lut,
387}; 382};
388 383
389static const struct drm_crtc_funcs udl_crtc_funcs = { 384static const struct drm_crtc_funcs udl_crtc_funcs = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 5fae06ad7e25..d4e54fcc0acd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -302,7 +302,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
302 uint32_t old_mem_type = bo->mem.mem_type; 302 uint32_t old_mem_type = bo->mem.mem_type;
303 int ret; 303 int ret;
304 304
305 BUG_ON(!ttm_bo_is_reserved(bo)); 305 lockdep_assert_held(&bo->resv->lock.base);
306 BUG_ON(old_mem_type != TTM_PL_VRAM && 306 BUG_ON(old_mem_type != TTM_PL_VRAM &&
307 old_mem_type != VMW_PL_GMR); 307 old_mem_type != VMW_PL_GMR);
308 308
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 07dfd823cc30..78e21649d48a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -565,8 +565,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
565 dev_priv->has_gmr = false; 565 dev_priv->has_gmr = false;
566 } 566 }
567 567
568 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, 568 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
569 dev_priv->mmio_size, DRM_MTRR_WC); 569 dev_priv->mmio_size);
570 570
571 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, 571 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
572 dev_priv->mmio_size); 572 dev_priv->mmio_size);
@@ -664,8 +664,7 @@ out_no_device:
664out_err4: 664out_err4:
665 iounmap(dev_priv->mmio_virt); 665 iounmap(dev_priv->mmio_virt);
666out_err3: 666out_err3:
667 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, 667 arch_phys_wc_del(dev_priv->mmio_mtrr);
668 dev_priv->mmio_size, DRM_MTRR_WC);
669 if (dev_priv->has_gmr) 668 if (dev_priv->has_gmr)
670 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 669 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
671 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 670 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
@@ -709,8 +708,7 @@ static int vmw_driver_unload(struct drm_device *dev)
709 708
710 ttm_object_device_release(&dev_priv->tdev); 709 ttm_object_device_release(&dev_priv->tdev);
711 iounmap(dev_priv->mmio_virt); 710 iounmap(dev_priv->mmio_virt);
712 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, 711 arch_phys_wc_del(dev_priv->mmio_mtrr);
713 dev_priv->mmio_size, DRM_MTRR_WC);
714 if (dev_priv->has_gmr) 712 if (dev_priv->has_gmr)
715 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 713 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
716 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 714 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 394e6476105b..599f6469a1eb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1432,6 +1432,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1432 struct vmw_fence_obj *fence = NULL; 1432 struct vmw_fence_obj *fence = NULL;
1433 struct vmw_resource *error_resource; 1433 struct vmw_resource *error_resource;
1434 struct list_head resource_list; 1434 struct list_head resource_list;
1435 struct ww_acquire_ctx ticket;
1435 uint32_t handle; 1436 uint32_t handle;
1436 void *cmd; 1437 void *cmd;
1437 int ret; 1438 int ret;
@@ -1488,7 +1489,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1488 if (unlikely(ret != 0)) 1489 if (unlikely(ret != 0))
1489 goto out_err; 1490 goto out_err;
1490 1491
1491 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); 1492 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
1492 if (unlikely(ret != 0)) 1493 if (unlikely(ret != 0))
1493 goto out_err; 1494 goto out_err;
1494 1495
@@ -1537,7 +1538,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1537 DRM_ERROR("Fence submission error. Syncing.\n"); 1538 DRM_ERROR("Fence submission error. Syncing.\n");
1538 1539
1539 vmw_resource_list_unreserve(&sw_context->resource_list, false); 1540 vmw_resource_list_unreserve(&sw_context->resource_list, false);
1540 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, 1541 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
1541 (void *) fence); 1542 (void *) fence);
1542 1543
1543 if (unlikely(dev_priv->pinned_bo != NULL && 1544 if (unlikely(dev_priv->pinned_bo != NULL &&
@@ -1570,7 +1571,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1570out_err: 1571out_err:
1571 vmw_resource_relocations_free(&sw_context->res_relocations); 1572 vmw_resource_relocations_free(&sw_context->res_relocations);
1572 vmw_free_relocations(sw_context); 1573 vmw_free_relocations(sw_context);
1573 ttm_eu_backoff_reservation(&sw_context->validate_nodes); 1574 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
1574 vmw_resource_list_unreserve(&sw_context->resource_list, true); 1575 vmw_resource_list_unreserve(&sw_context->resource_list, true);
1575 vmw_clear_validations(sw_context); 1576 vmw_clear_validations(sw_context);
1576 if (unlikely(dev_priv->pinned_bo != NULL && 1577 if (unlikely(dev_priv->pinned_bo != NULL &&
@@ -1644,6 +1645,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
1644 struct list_head validate_list; 1645 struct list_head validate_list;
1645 struct ttm_validate_buffer pinned_val, query_val; 1646 struct ttm_validate_buffer pinned_val, query_val;
1646 struct vmw_fence_obj *lfence = NULL; 1647 struct vmw_fence_obj *lfence = NULL;
1648 struct ww_acquire_ctx ticket;
1647 1649
1648 if (dev_priv->pinned_bo == NULL) 1650 if (dev_priv->pinned_bo == NULL)
1649 goto out_unlock; 1651 goto out_unlock;
@@ -1657,7 +1659,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
1657 list_add_tail(&query_val.head, &validate_list); 1659 list_add_tail(&query_val.head, &validate_list);
1658 1660
1659 do { 1661 do {
1660 ret = ttm_eu_reserve_buffers(&validate_list); 1662 ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
1661 } while (ret == -ERESTARTSYS); 1663 } while (ret == -ERESTARTSYS);
1662 1664
1663 if (unlikely(ret != 0)) { 1665 if (unlikely(ret != 0)) {
@@ -1684,7 +1686,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
1684 NULL); 1686 NULL);
1685 fence = lfence; 1687 fence = lfence;
1686 } 1688 }
1687 ttm_eu_fence_buffer_objects(&validate_list, (void *) fence); 1689 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
1688 if (lfence != NULL) 1690 if (lfence != NULL)
1689 vmw_fence_obj_unreference(&lfence); 1691 vmw_fence_obj_unreference(&lfence);
1690 1692
@@ -1696,7 +1698,7 @@ out_unlock:
1696 return; 1698 return;
1697 1699
1698out_no_emit: 1700out_no_emit:
1699 ttm_eu_backoff_reservation(&validate_list); 1701 ttm_eu_backoff_reservation(&ticket, &validate_list);
1700out_no_reserve: 1702out_no_reserve:
1701 ttm_bo_unref(&query_val.bo); 1703 ttm_bo_unref(&query_val.bo);
1702 ttm_bo_unref(&pinned_val.bo); 1704 ttm_bo_unref(&pinned_val.bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3e3c7ab33ca2..d4607b2530d6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -174,7 +174,6 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
174 uint32_t handle, uint32_t width, uint32_t height) 174 uint32_t handle, uint32_t width, uint32_t height)
175{ 175{
176 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 176 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
177 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
178 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 177 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
179 struct vmw_surface *surface = NULL; 178 struct vmw_surface *surface = NULL;
180 struct vmw_dma_buffer *dmabuf = NULL; 179 struct vmw_dma_buffer *dmabuf = NULL;
@@ -197,6 +196,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
197 } 196 }
198 197
199 if (handle) { 198 if (handle) {
199 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
200
200 ret = vmw_user_lookup_handle(dev_priv, tfile, 201 ret = vmw_user_lookup_handle(dev_priv, tfile,
201 handle, &surface, &dmabuf); 202 handle, &surface, &dmabuf);
202 if (ret) { 203 if (ret) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index bc784254e78e..7953d1f90b63 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -958,13 +958,13 @@ void vmw_resource_unreserve(struct vmw_resource *res,
958 if (new_backup && new_backup != res->backup) { 958 if (new_backup && new_backup != res->backup) {
959 959
960 if (res->backup) { 960 if (res->backup) {
961 BUG_ON(!ttm_bo_is_reserved(&res->backup->base)); 961 lockdep_assert_held(&res->backup->base.resv->lock.base);
962 list_del_init(&res->mob_head); 962 list_del_init(&res->mob_head);
963 vmw_dmabuf_unreference(&res->backup); 963 vmw_dmabuf_unreference(&res->backup);
964 } 964 }
965 965
966 res->backup = vmw_dmabuf_reference(new_backup); 966 res->backup = vmw_dmabuf_reference(new_backup);
967 BUG_ON(!ttm_bo_is_reserved(&new_backup->base)); 967 lockdep_assert_held(&new_backup->base.resv->lock.base);
968 list_add_tail(&res->mob_head, &new_backup->res_list); 968 list_add_tail(&res->mob_head, &new_backup->res_list);
969 } 969 }
970 if (new_backup) 970 if (new_backup)
@@ -990,9 +990,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
990 * @val_buf: On successful return contains data about the 990 * @val_buf: On successful return contains data about the
991 * reserved and validated backup buffer. 991 * reserved and validated backup buffer.
992 */ 992 */
993int vmw_resource_check_buffer(struct vmw_resource *res, 993static int
994 bool interruptible, 994vmw_resource_check_buffer(struct vmw_resource *res,
995 struct ttm_validate_buffer *val_buf) 995 struct ww_acquire_ctx *ticket,
996 bool interruptible,
997 struct ttm_validate_buffer *val_buf)
996{ 998{
997 struct list_head val_list; 999 struct list_head val_list;
998 bool backup_dirty = false; 1000 bool backup_dirty = false;
@@ -1007,7 +1009,7 @@ int vmw_resource_check_buffer(struct vmw_resource *res,
1007 INIT_LIST_HEAD(&val_list); 1009 INIT_LIST_HEAD(&val_list);
1008 val_buf->bo = ttm_bo_reference(&res->backup->base); 1010 val_buf->bo = ttm_bo_reference(&res->backup->base);
1009 list_add_tail(&val_buf->head, &val_list); 1011 list_add_tail(&val_buf->head, &val_list);
1010 ret = ttm_eu_reserve_buffers(&val_list); 1012 ret = ttm_eu_reserve_buffers(ticket, &val_list);
1011 if (unlikely(ret != 0)) 1013 if (unlikely(ret != 0))
1012 goto out_no_reserve; 1014 goto out_no_reserve;
1013 1015
@@ -1025,7 +1027,7 @@ int vmw_resource_check_buffer(struct vmw_resource *res,
1025 return 0; 1027 return 0;
1026 1028
1027out_no_validate: 1029out_no_validate:
1028 ttm_eu_backoff_reservation(&val_list); 1030 ttm_eu_backoff_reservation(ticket, &val_list);
1029out_no_reserve: 1031out_no_reserve:
1030 ttm_bo_unref(&val_buf->bo); 1032 ttm_bo_unref(&val_buf->bo);
1031 if (backup_dirty) 1033 if (backup_dirty)
@@ -1069,7 +1071,9 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1069 *. 1071 *.
1070 * @val_buf: Backup buffer information. 1072 * @val_buf: Backup buffer information.
1071 */ 1073 */
1072void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) 1074static void
1075vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
1076 struct ttm_validate_buffer *val_buf)
1073{ 1077{
1074 struct list_head val_list; 1078 struct list_head val_list;
1075 1079
@@ -1078,7 +1082,7 @@ void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1078 1082
1079 INIT_LIST_HEAD(&val_list); 1083 INIT_LIST_HEAD(&val_list);
1080 list_add_tail(&val_buf->head, &val_list); 1084 list_add_tail(&val_buf->head, &val_list);
1081 ttm_eu_backoff_reservation(&val_list); 1085 ttm_eu_backoff_reservation(ticket, &val_list);
1082 ttm_bo_unref(&val_buf->bo); 1086 ttm_bo_unref(&val_buf->bo);
1083} 1087}
1084 1088
@@ -1092,12 +1096,13 @@ int vmw_resource_do_evict(struct vmw_resource *res)
1092{ 1096{
1093 struct ttm_validate_buffer val_buf; 1097 struct ttm_validate_buffer val_buf;
1094 const struct vmw_res_func *func = res->func; 1098 const struct vmw_res_func *func = res->func;
1099 struct ww_acquire_ctx ticket;
1095 int ret; 1100 int ret;
1096 1101
1097 BUG_ON(!func->may_evict); 1102 BUG_ON(!func->may_evict);
1098 1103
1099 val_buf.bo = NULL; 1104 val_buf.bo = NULL;
1100 ret = vmw_resource_check_buffer(res, true, &val_buf); 1105 ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf);
1101 if (unlikely(ret != 0)) 1106 if (unlikely(ret != 0))
1102 return ret; 1107 return ret;
1103 1108
@@ -1112,7 +1117,7 @@ int vmw_resource_do_evict(struct vmw_resource *res)
1112 res->backup_dirty = true; 1117 res->backup_dirty = true;
1113 res->res_dirty = false; 1118 res->res_dirty = false;
1114out_no_unbind: 1119out_no_unbind:
1115 vmw_resource_backoff_reservation(&val_buf); 1120 vmw_resource_backoff_reservation(&ticket, &val_buf);
1116 1121
1117 return ret; 1122 return ret;
1118} 1123}
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index a1607d6e135b..790ddf114e58 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -73,7 +73,7 @@ struct host1x_syncpt_ops {
73 void (*restore_wait_base)(struct host1x_syncpt *syncpt); 73 void (*restore_wait_base)(struct host1x_syncpt *syncpt);
74 void (*load_wait_base)(struct host1x_syncpt *syncpt); 74 void (*load_wait_base)(struct host1x_syncpt *syncpt);
75 u32 (*load)(struct host1x_syncpt *syncpt); 75 u32 (*load)(struct host1x_syncpt *syncpt);
76 void (*cpu_incr)(struct host1x_syncpt *syncpt); 76 int (*cpu_incr)(struct host1x_syncpt *syncpt);
77 int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr); 77 int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr);
78}; 78};
79 79
@@ -157,10 +157,10 @@ static inline u32 host1x_hw_syncpt_load(struct host1x *host,
157 return host->syncpt_op->load(sp); 157 return host->syncpt_op->load(sp);
158} 158}
159 159
160static inline void host1x_hw_syncpt_cpu_incr(struct host1x *host, 160static inline int host1x_hw_syncpt_cpu_incr(struct host1x *host,
161 struct host1x_syncpt *sp) 161 struct host1x_syncpt *sp)
162{ 162{
163 host->syncpt_op->cpu_incr(sp); 163 return host->syncpt_op->cpu_incr(sp);
164} 164}
165 165
166static inline int host1x_hw_syncpt_patch_wait(struct host1x *host, 166static inline int host1x_hw_syncpt_patch_wait(struct host1x *host,
diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
index 8c04943f82e3..5360e5a57ecc 100644
--- a/drivers/gpu/host1x/drm/dc.c
+++ b/drivers/gpu/host1x/drm/dc.c
@@ -79,6 +79,9 @@ static int tegra_plane_disable(struct drm_plane *plane)
79 struct tegra_plane *p = to_tegra_plane(plane); 79 struct tegra_plane *p = to_tegra_plane(plane);
80 unsigned long value; 80 unsigned long value;
81 81
82 if (!plane->crtc)
83 return 0;
84
82 value = WINDOW_A_SELECT << p->index; 85 value = WINDOW_A_SELECT << p->index;
83 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); 86 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
84 87
@@ -140,6 +143,7 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
140static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, 143static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
141 struct drm_framebuffer *fb) 144 struct drm_framebuffer *fb)
142{ 145{
146 unsigned int format = tegra_dc_format(fb->pixel_format);
143 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); 147 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
144 unsigned long value; 148 unsigned long value;
145 149
@@ -150,6 +154,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
150 154
151 tegra_dc_writel(dc, bo->paddr + value, DC_WINBUF_START_ADDR); 155 tegra_dc_writel(dc, bo->paddr + value, DC_WINBUF_START_ADDR);
152 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE); 156 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
157 tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
153 158
154 value = GENERAL_UPDATE | WIN_A_UPDATE; 159 value = GENERAL_UPDATE | WIN_A_UPDATE;
155 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); 160 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
index 2b561c9118c6..e184b00faacd 100644
--- a/drivers/gpu/host1x/drm/drm.c
+++ b/drivers/gpu/host1x/drm/drm.c
@@ -148,6 +148,7 @@ int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm)
148 dev_err(host1x->dev, 148 dev_err(host1x->dev,
149 "DRM setup failed for %s: %d\n", 149 "DRM setup failed for %s: %d\n",
150 dev_name(client->dev), err); 150 dev_name(client->dev), err);
151 mutex_unlock(&host1x->clients_lock);
151 return err; 152 return err;
152 } 153 }
153 } 154 }
@@ -175,6 +176,7 @@ int host1x_drm_exit(struct host1x_drm *host1x)
175 dev_err(host1x->dev, 176 dev_err(host1x->dev,
176 "DRM cleanup failed for %s: %d\n", 177 "DRM cleanup failed for %s: %d\n",
177 dev_name(client->dev), err); 178 dev_name(client->dev), err);
179 mutex_unlock(&host1x->clients_lock);
178 return err; 180 return err;
179 } 181 }
180 } 182 }
@@ -257,6 +259,13 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
257 if (err < 0) 259 if (err < 0)
258 return err; 260 return err;
259 261
262 /*
263 * We don't use the drm_irq_install() helpers provided by the DRM
264 * core, so we need to set this manually in order to allow the
265 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
266 */
267 drm->irq_enabled = 1;
268
260 err = drm_vblank_init(drm, drm->mode_config.num_crtc); 269 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
261 if (err < 0) 270 if (err < 0)
262 return err; 271 return err;
@@ -378,8 +387,7 @@ static int tegra_syncpt_incr(struct drm_device *drm, void *data,
378 if (!sp) 387 if (!sp)
379 return -EINVAL; 388 return -EINVAL;
380 389
381 host1x_syncpt_incr(sp); 390 return host1x_syncpt_incr(sp);
382 return 0;
383} 391}
384 392
385static int tegra_syncpt_wait(struct drm_device *drm, void *data, 393static int tegra_syncpt_wait(struct drm_device *drm, void *data,
@@ -605,7 +613,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor)
605#endif 613#endif
606 614
607struct drm_driver tegra_drm_driver = { 615struct drm_driver tegra_drm_driver = {
608 .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM, 616 .driver_features = DRIVER_MODESET | DRIVER_GEM,
609 .load = tegra_drm_load, 617 .load = tegra_drm_load,
610 .unload = tegra_drm_unload, 618 .unload = tegra_drm_unload,
611 .open = tegra_drm_open, 619 .open = tegra_drm_open,
diff --git a/drivers/gpu/host1x/drm/gr2d.c b/drivers/gpu/host1x/drm/gr2d.c
index 6a45ae090ee7..27ffcf15a4b4 100644
--- a/drivers/gpu/host1x/drm/gr2d.c
+++ b/drivers/gpu/host1x/drm/gr2d.c
@@ -84,7 +84,7 @@ static struct host1x_bo *host1x_bo_lookup(struct drm_device *drm,
84 84
85 gem = drm_gem_object_lookup(drm, file, handle); 85 gem = drm_gem_object_lookup(drm, file, handle);
86 if (!gem) 86 if (!gem)
87 return 0; 87 return NULL;
88 88
89 mutex_lock(&drm->struct_mutex); 89 mutex_lock(&drm->struct_mutex);
90 drm_gem_object_unreference(gem); 90 drm_gem_object_unreference(gem);
@@ -135,8 +135,10 @@ static int gr2d_submit(struct host1x_drm_context *context,
135 goto fail; 135 goto fail;
136 136
137 bo = host1x_bo_lookup(drm, file, cmdbuf.handle); 137 bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
138 if (!bo) 138 if (!bo) {
139 err = -ENOENT;
139 goto fail; 140 goto fail;
141 }
140 142
141 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset); 143 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
142 num_cmdbufs--; 144 num_cmdbufs--;
@@ -158,8 +160,10 @@ static int gr2d_submit(struct host1x_drm_context *context,
158 reloc->cmdbuf = cmdbuf; 160 reloc->cmdbuf = cmdbuf;
159 reloc->target = target; 161 reloc->target = target;
160 162
161 if (!reloc->target || !reloc->cmdbuf) 163 if (!reloc->target || !reloc->cmdbuf) {
164 err = -ENOENT;
162 goto fail; 165 goto fail;
166 }
163 } 167 }
164 168
165 err = copy_from_user(job->waitchk, waitchks, 169 err = copy_from_user(job->waitchk, waitchks,
@@ -281,7 +285,7 @@ static int gr2d_probe(struct platform_device *pdev)
281 if (!gr2d->channel) 285 if (!gr2d->channel)
282 return -ENOMEM; 286 return -ENOMEM;
283 287
284 *syncpts = host1x_syncpt_request(dev, 0); 288 *syncpts = host1x_syncpt_request(dev, false);
285 if (!(*syncpts)) { 289 if (!(*syncpts)) {
286 host1x_channel_free(gr2d->channel); 290 host1x_channel_free(gr2d->channel);
287 return -ENOMEM; 291 return -ENOMEM;
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 590b69d91dab..2ee4ad55c4db 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -44,7 +44,7 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
44 u32 i; 44 u32 i;
45 45
46 for (i = 0; i < syncpt_incrs; i++) 46 for (i = 0; i < syncpt_incrs; i++)
47 host1x_syncpt_cpu_incr(cdma->timeout.syncpt); 47 host1x_syncpt_incr(cdma->timeout.syncpt);
48 48
49 /* after CPU incr, ensure shadow is up to date */ 49 /* after CPU incr, ensure shadow is up to date */
50 host1x_syncpt_load(cdma->timeout.syncpt); 50 host1x_syncpt_load(cdma->timeout.syncpt);
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index 61174990102a..0cf6095d3367 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -77,21 +77,19 @@ static u32 syncpt_load(struct host1x_syncpt *sp)
77 * Write a cpu syncpoint increment to the hardware, without touching 77 * Write a cpu syncpoint increment to the hardware, without touching
78 * the cache. 78 * the cache.
79 */ 79 */
80static void syncpt_cpu_incr(struct host1x_syncpt *sp) 80static int syncpt_cpu_incr(struct host1x_syncpt *sp)
81{ 81{
82 struct host1x *host = sp->host; 82 struct host1x *host = sp->host;
83 u32 reg_offset = sp->id / 32; 83 u32 reg_offset = sp->id / 32;
84 84
85 if (!host1x_syncpt_client_managed(sp) && 85 if (!host1x_syncpt_client_managed(sp) &&
86 host1x_syncpt_idle(sp)) { 86 host1x_syncpt_idle(sp))
87 dev_err(host->dev, "Trying to increment syncpoint id %d beyond max\n", 87 return -EINVAL;
88 sp->id);
89 host1x_debug_dump(sp->host);
90 return;
91 }
92 host1x_sync_writel(host, BIT_MASK(sp->id), 88 host1x_sync_writel(host, BIT_MASK(sp->id),
93 HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset)); 89 HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
94 wmb(); 90 wmb();
91
92 return 0;
95} 93}
96 94
97/* remove a wait pointed to by patch_addr */ 95/* remove a wait pointed to by patch_addr */
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index f665d679031c..cc807667d8f1 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -228,17 +228,15 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
228 void *cmdbuf_page_addr = NULL; 228 void *cmdbuf_page_addr = NULL;
229 229
230 /* pin & patch the relocs for one gather */ 230 /* pin & patch the relocs for one gather */
231 while (i < job->num_relocs) { 231 for (i = 0; i < job->num_relocs; i++) {
232 struct host1x_reloc *reloc = &job->relocarray[i]; 232 struct host1x_reloc *reloc = &job->relocarray[i];
233 u32 reloc_addr = (job->reloc_addr_phys[i] + 233 u32 reloc_addr = (job->reloc_addr_phys[i] +
234 reloc->target_offset) >> reloc->shift; 234 reloc->target_offset) >> reloc->shift;
235 u32 *target; 235 u32 *target;
236 236
237 /* skip all other gathers */ 237 /* skip all other gathers */
238 if (!(reloc->cmdbuf && cmdbuf == reloc->cmdbuf)) { 238 if (cmdbuf != reloc->cmdbuf)
239 i++;
240 continue; 239 continue;
241 }
242 240
243 if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) { 241 if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) {
244 if (cmdbuf_page_addr) 242 if (cmdbuf_page_addr)
@@ -257,9 +255,6 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
257 255
258 target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK); 256 target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK);
259 *target = reloc_addr; 257 *target = reloc_addr;
260
261 /* mark this gather as handled */
262 reloc->cmdbuf = 0;
263 } 258 }
264 259
265 if (cmdbuf_page_addr) 260 if (cmdbuf_page_addr)
@@ -268,15 +263,15 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
268 return 0; 263 return 0;
269} 264}
270 265
271static int check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf, 266static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
272 unsigned int offset) 267 unsigned int offset)
273{ 268{
274 offset *= sizeof(u32); 269 offset *= sizeof(u32);
275 270
276 if (reloc->cmdbuf != cmdbuf || reloc->cmdbuf_offset != offset) 271 if (reloc->cmdbuf != cmdbuf || reloc->cmdbuf_offset != offset)
277 return -EINVAL; 272 return false;
278 273
279 return 0; 274 return true;
280} 275}
281 276
282struct host1x_firewall { 277struct host1x_firewall {
@@ -307,10 +302,10 @@ static int check_mask(struct host1x_firewall *fw)
307 302
308 if (mask & 1) { 303 if (mask & 1) {
309 if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) { 304 if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
310 bool bad_reloc = check_reloc(fw->reloc, 305 if (!fw->num_relocs)
311 fw->cmdbuf_id, 306 return -EINVAL;
312 fw->offset); 307 if (!check_reloc(fw->reloc, fw->cmdbuf_id,
313 if (!fw->num_relocs || bad_reloc) 308 fw->offset))
314 return -EINVAL; 309 return -EINVAL;
315 fw->reloc++; 310 fw->reloc++;
316 fw->num_relocs--; 311 fw->num_relocs--;
@@ -330,14 +325,14 @@ static int check_incr(struct host1x_firewall *fw)
330 u32 count = fw->count; 325 u32 count = fw->count;
331 u32 reg = fw->reg; 326 u32 reg = fw->reg;
332 327
333 while (fw) { 328 while (count) {
334 if (fw->words == 0) 329 if (fw->words == 0)
335 return -EINVAL; 330 return -EINVAL;
336 331
337 if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) { 332 if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
338 bool bad_reloc = check_reloc(fw->reloc, fw->cmdbuf_id, 333 if (!fw->num_relocs)
339 fw->offset); 334 return -EINVAL;
340 if (!fw->num_relocs || bad_reloc) 335 if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset))
341 return -EINVAL; 336 return -EINVAL;
342 fw->reloc++; 337 fw->reloc++;
343 fw->num_relocs--; 338 fw->num_relocs--;
@@ -361,9 +356,9 @@ static int check_nonincr(struct host1x_firewall *fw)
361 return -EINVAL; 356 return -EINVAL;
362 357
363 if (is_addr_reg) { 358 if (is_addr_reg) {
364 bool bad_reloc = check_reloc(fw->reloc, fw->cmdbuf_id, 359 if (!fw->num_relocs)
365 fw->offset); 360 return -EINVAL;
366 if (!fw->num_relocs || bad_reloc) 361 if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset))
367 return -EINVAL; 362 return -EINVAL;
368 fw->reloc++; 363 fw->reloc++;
369 fw->num_relocs--; 364 fw->num_relocs--;
@@ -376,69 +371,58 @@ static int check_nonincr(struct host1x_firewall *fw)
376 return 0; 371 return 0;
377} 372}
378 373
379static int validate(struct host1x_job *job, struct device *dev, 374static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
380 struct host1x_job_gather *g)
381{ 375{
382 u32 *cmdbuf_base; 376 u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
377 (g->offset / sizeof(u32));
383 int err = 0; 378 int err = 0;
384 struct host1x_firewall fw;
385 379
386 fw.job = job; 380 if (!fw->job->is_addr_reg)
387 fw.dev = dev;
388 fw.reloc = job->relocarray;
389 fw.num_relocs = job->num_relocs;
390 fw.cmdbuf_id = g->bo;
391
392 fw.offset = 0;
393 fw.class = 0;
394
395 if (!job->is_addr_reg)
396 return 0; 381 return 0;
397 382
398 cmdbuf_base = host1x_bo_mmap(g->bo); 383 fw->words = g->words;
399 if (!cmdbuf_base) 384 fw->cmdbuf_id = g->bo;
400 return -ENOMEM; 385 fw->offset = 0;
401 386
402 fw.words = g->words; 387 while (fw->words && !err) {
403 while (fw.words && !err) { 388 u32 word = cmdbuf_base[fw->offset];
404 u32 word = cmdbuf_base[fw.offset];
405 u32 opcode = (word & 0xf0000000) >> 28; 389 u32 opcode = (word & 0xf0000000) >> 28;
406 390
407 fw.mask = 0; 391 fw->mask = 0;
408 fw.reg = 0; 392 fw->reg = 0;
409 fw.count = 0; 393 fw->count = 0;
410 fw.words--; 394 fw->words--;
411 fw.offset++; 395 fw->offset++;
412 396
413 switch (opcode) { 397 switch (opcode) {
414 case 0: 398 case 0:
415 fw.class = word >> 6 & 0x3ff; 399 fw->class = word >> 6 & 0x3ff;
416 fw.mask = word & 0x3f; 400 fw->mask = word & 0x3f;
417 fw.reg = word >> 16 & 0xfff; 401 fw->reg = word >> 16 & 0xfff;
418 err = check_mask(&fw); 402 err = check_mask(fw);
419 if (err) 403 if (err)
420 goto out; 404 goto out;
421 break; 405 break;
422 case 1: 406 case 1:
423 fw.reg = word >> 16 & 0xfff; 407 fw->reg = word >> 16 & 0xfff;
424 fw.count = word & 0xffff; 408 fw->count = word & 0xffff;
425 err = check_incr(&fw); 409 err = check_incr(fw);
426 if (err) 410 if (err)
427 goto out; 411 goto out;
428 break; 412 break;
429 413
430 case 2: 414 case 2:
431 fw.reg = word >> 16 & 0xfff; 415 fw->reg = word >> 16 & 0xfff;
432 fw.count = word & 0xffff; 416 fw->count = word & 0xffff;
433 err = check_nonincr(&fw); 417 err = check_nonincr(fw);
434 if (err) 418 if (err)
435 goto out; 419 goto out;
436 break; 420 break;
437 421
438 case 3: 422 case 3:
439 fw.mask = word & 0xffff; 423 fw->mask = word & 0xffff;
440 fw.reg = word >> 16 & 0xfff; 424 fw->reg = word >> 16 & 0xfff;
441 err = check_mask(&fw); 425 err = check_mask(fw);
442 if (err) 426 if (err)
443 goto out; 427 goto out;
444 break; 428 break;
@@ -453,21 +437,26 @@ static int validate(struct host1x_job *job, struct device *dev,
453 } 437 }
454 438
455 /* No relocs should remain at this point */ 439 /* No relocs should remain at this point */
456 if (fw.num_relocs) 440 if (fw->num_relocs)
457 err = -EINVAL; 441 err = -EINVAL;
458 442
459out: 443out:
460 host1x_bo_munmap(g->bo, cmdbuf_base);
461
462 return err; 444 return err;
463} 445}
464 446
465static inline int copy_gathers(struct host1x_job *job, struct device *dev) 447static inline int copy_gathers(struct host1x_job *job, struct device *dev)
466{ 448{
449 struct host1x_firewall fw;
467 size_t size = 0; 450 size_t size = 0;
468 size_t offset = 0; 451 size_t offset = 0;
469 int i; 452 int i;
470 453
454 fw.job = job;
455 fw.dev = dev;
456 fw.reloc = job->relocarray;
457 fw.num_relocs = job->num_relocs;
458 fw.class = 0;
459
471 for (i = 0; i < job->num_gathers; i++) { 460 for (i = 0; i < job->num_gathers; i++) {
472 struct host1x_job_gather *g = &job->gathers[i]; 461 struct host1x_job_gather *g = &job->gathers[i];
473 size += g->words * sizeof(u32); 462 size += g->words * sizeof(u32);
@@ -488,14 +477,19 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
488 struct host1x_job_gather *g = &job->gathers[i]; 477 struct host1x_job_gather *g = &job->gathers[i];
489 void *gather; 478 void *gather;
490 479
480 /* Copy the gather */
491 gather = host1x_bo_mmap(g->bo); 481 gather = host1x_bo_mmap(g->bo);
492 memcpy(job->gather_copy_mapped + offset, gather + g->offset, 482 memcpy(job->gather_copy_mapped + offset, gather + g->offset,
493 g->words * sizeof(u32)); 483 g->words * sizeof(u32));
494 host1x_bo_munmap(g->bo, gather); 484 host1x_bo_munmap(g->bo, gather);
495 485
486 /* Store the location in the buffer */
496 g->base = job->gather_copy; 487 g->base = job->gather_copy;
497 g->offset = offset; 488 g->offset = offset;
498 g->bo = NULL; 489
490 /* Validate the job */
491 if (validate(&fw, g))
492 return -EINVAL;
499 493
500 offset += g->words * sizeof(u32); 494 offset += g->words * sizeof(u32);
501 } 495 }
@@ -540,20 +534,11 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
540 if (job->gathers[j].bo == g->bo) 534 if (job->gathers[j].bo == g->bo)
541 job->gathers[j].handled = true; 535 job->gathers[j].handled = true;
542 536
543 err = 0; 537 err = do_relocs(job, g->bo);
544
545 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
546 err = validate(job, dev, g);
547
548 if (err) 538 if (err)
549 dev_err(dev, "Job invalid (err=%d)\n", err); 539 break;
550
551 if (!err)
552 err = do_relocs(job, g->bo);
553
554 if (!err)
555 err = do_waitchks(job, host, g->bo);
556 540
541 err = do_waitchks(job, host, g->bo);
557 if (err) 542 if (err)
558 break; 543 break;
559 } 544 }
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 4b493453e805..409745b949db 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -32,7 +32,7 @@
32 32
33static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host, 33static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
34 struct device *dev, 34 struct device *dev,
35 int client_managed) 35 bool client_managed)
36{ 36{
37 int i; 37 int i;
38 struct host1x_syncpt *sp = host->syncpt; 38 struct host1x_syncpt *sp = host->syncpt;
@@ -40,7 +40,8 @@ static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
40 40
41 for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++) 41 for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
42 ; 42 ;
43 if (sp->dev) 43
44 if (i >= host->info->nb_pts)
44 return NULL; 45 return NULL;
45 46
46 name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id, 47 name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
@@ -128,22 +129,11 @@ u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
128} 129}
129 130
130/* 131/*
131 * Write a cpu syncpoint increment to the hardware, without touching
132 * the cache. Caller is responsible for host being powered.
133 */
134void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp)
135{
136 host1x_hw_syncpt_cpu_incr(sp->host, sp);
137}
138
139/*
140 * Increment syncpoint value from cpu, updating cache 132 * Increment syncpoint value from cpu, updating cache
141 */ 133 */
142void host1x_syncpt_incr(struct host1x_syncpt *sp) 134int host1x_syncpt_incr(struct host1x_syncpt *sp)
143{ 135{
144 if (host1x_syncpt_client_managed(sp)) 136 return host1x_hw_syncpt_cpu_incr(sp->host, sp);
145 host1x_syncpt_incr_max(sp, 1);
146 host1x_syncpt_cpu_incr(sp);
147} 137}
148 138
149/* 139/*
@@ -331,7 +321,7 @@ int host1x_syncpt_init(struct host1x *host)
331 host1x_syncpt_restore(host); 321 host1x_syncpt_restore(host);
332 322
333 /* Allocate sync point to use for clearing waits for expired fences */ 323 /* Allocate sync point to use for clearing waits for expired fences */
334 host->nop_sp = _host1x_syncpt_alloc(host, NULL, 0); 324 host->nop_sp = _host1x_syncpt_alloc(host, NULL, false);
335 if (!host->nop_sp) 325 if (!host->nop_sp)
336 return -ENOMEM; 326 return -ENOMEM;
337 327
@@ -339,7 +329,7 @@ int host1x_syncpt_init(struct host1x *host)
339} 329}
340 330
341struct host1x_syncpt *host1x_syncpt_request(struct device *dev, 331struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
342 int client_managed) 332 bool client_managed)
343{ 333{
344 struct host1x *host = dev_get_drvdata(dev->parent); 334 struct host1x *host = dev_get_drvdata(dev->parent);
345 return _host1x_syncpt_alloc(host, dev, client_managed); 335 return _host1x_syncpt_alloc(host, dev, client_managed);
@@ -353,7 +343,7 @@ void host1x_syncpt_free(struct host1x_syncpt *sp)
353 kfree(sp->name); 343 kfree(sp->name);
354 sp->dev = NULL; 344 sp->dev = NULL;
355 sp->name = NULL; 345 sp->name = NULL;
356 sp->client_managed = 0; 346 sp->client_managed = false;
357} 347}
358 348
359void host1x_syncpt_deinit(struct host1x *host) 349void host1x_syncpt_deinit(struct host1x *host)
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index c99806130f2e..267c0b9d3647 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -36,7 +36,7 @@ struct host1x_syncpt {
36 atomic_t max_val; 36 atomic_t max_val;
37 u32 base_val; 37 u32 base_val;
38 const char *name; 38 const char *name;
39 int client_managed; 39 bool client_managed;
40 struct host1x *host; 40 struct host1x *host;
41 struct device *dev; 41 struct device *dev;
42 42
@@ -94,7 +94,7 @@ static inline bool host1x_syncpt_check_max(struct host1x_syncpt *sp, u32 real)
94} 94}
95 95
96/* Return true if sync point is client managed. */ 96/* Return true if sync point is client managed. */
97static inline int host1x_syncpt_client_managed(struct host1x_syncpt *sp) 97static inline bool host1x_syncpt_client_managed(struct host1x_syncpt *sp)
98{ 98{
99 return sp->client_managed; 99 return sp->client_managed;
100} 100}
@@ -115,9 +115,6 @@ static inline bool host1x_syncpt_idle(struct host1x_syncpt *sp)
115/* Return pointer to struct denoting sync point id. */ 115/* Return pointer to struct denoting sync point id. */
116struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id); 116struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
117 117
118/* Request incrementing a sync point. */
119void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp);
120
121/* Load current value from hardware to the shadow register. */ 118/* Load current value from hardware to the shadow register. */
122u32 host1x_syncpt_load(struct host1x_syncpt *sp); 119u32 host1x_syncpt_load(struct host1x_syncpt *sp);
123 120
@@ -133,8 +130,8 @@ void host1x_syncpt_restore(struct host1x *host);
133/* Read current wait base value into shadow register and return it. */ 130/* Read current wait base value into shadow register and return it. */
134u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp); 131u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
135 132
136/* Increment sync point and its max. */ 133/* Request incrementing a sync point. */
137void host1x_syncpt_incr(struct host1x_syncpt *sp); 134int host1x_syncpt_incr(struct host1x_syncpt *sp);
138 135
139/* Indicate future operations by incrementing the sync point max. */ 136/* Indicate future operations by incrementing the sync point max. */
140u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs); 137u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
@@ -157,7 +154,7 @@ u32 host1x_syncpt_id(struct host1x_syncpt *sp);
157 154
158/* Allocate a sync point for a device. */ 155/* Allocate a sync point for a device. */
159struct host1x_syncpt *host1x_syncpt_request(struct device *dev, 156struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
160 int client_managed); 157 bool client_managed);
161 158
162/* Free a sync point. */ 159/* Free a sync point. */
163void host1x_syncpt_free(struct host1x_syncpt *sp); 160void host1x_syncpt_free(struct host1x_syncpt *sp);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d6cbfe9df218..fa061d46527f 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -137,7 +137,7 @@ static const struct xpad_device {
137 { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 137 { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
138 { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, 138 { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
139 { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, 139 { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
140 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 }, 140 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
141 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 141 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
142 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 142 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
143 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, 143 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 62a2c0e4cc99..7ac9c9818d55 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -431,6 +431,7 @@ config KEYBOARD_TEGRA
431 431
432config KEYBOARD_OPENCORES 432config KEYBOARD_OPENCORES
433 tristate "OpenCores Keyboard Controller" 433 tristate "OpenCores Keyboard Controller"
434 depends on HAS_IOMEM
434 help 435 help
435 Say Y here if you want to use the OpenCores Keyboard Controller 436 Say Y here if you want to use the OpenCores Keyboard Controller
436 http://www.opencores.org/project,keyboardcontroller 437 http://www.opencores.org/project,keyboardcontroller
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index aebfe3ecb945..1bda828f4b55 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -205,6 +205,7 @@ config SERIO_XILINX_XPS_PS2
205 205
206config SERIO_ALTERA_PS2 206config SERIO_ALTERA_PS2
207 tristate "Altera UP PS/2 controller" 207 tristate "Altera UP PS/2 controller"
208 depends on HAS_IOMEM
208 help 209 help
209 Say Y here if you have Altera University Program PS/2 ports. 210 Say Y here if you have Altera University Program PS/2 ports.
210 211
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 518282da6d85..384fbcd0cee0 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -363,6 +363,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
363 case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */ 363 case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
364 case 0x160802: /* Cintiq 13HD Pro Pen */ 364 case 0x160802: /* Cintiq 13HD Pro Pen */
365 case 0x180802: /* DTH2242 Pen */ 365 case 0x180802: /* DTH2242 Pen */
366 case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
366 wacom->tool[idx] = BTN_TOOL_PEN; 367 wacom->tool[idx] = BTN_TOOL_PEN;
367 break; 368 break;
368 369
@@ -401,6 +402,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
401 case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */ 402 case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
402 case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */ 403 case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
403 case 0x18080a: /* DTH2242 Eraser */ 404 case 0x18080a: /* DTH2242 Eraser */
405 case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
404 wacom->tool[idx] = BTN_TOOL_RUBBER; 406 wacom->tool[idx] = BTN_TOOL_RUBBER;
405 break; 407 break;
406 408
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 8e60437ac85b..ae89d2609ab0 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -116,6 +116,15 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd)
116 return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd); 116 return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
117} 117}
118 118
119static int cyttsp_handshake(struct cyttsp *ts)
120{
121 if (ts->pdata->use_hndshk)
122 return ttsp_send_command(ts,
123 ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
124
125 return 0;
126}
127
119static int cyttsp_load_bl_regs(struct cyttsp *ts) 128static int cyttsp_load_bl_regs(struct cyttsp *ts)
120{ 129{
121 memset(&ts->bl_data, 0, sizeof(ts->bl_data)); 130 memset(&ts->bl_data, 0, sizeof(ts->bl_data));
@@ -133,7 +142,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts)
133 memcpy(bl_cmd, bl_command, sizeof(bl_command)); 142 memcpy(bl_cmd, bl_command, sizeof(bl_command));
134 if (ts->pdata->bl_keys) 143 if (ts->pdata->bl_keys)
135 memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS], 144 memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS],
136 ts->pdata->bl_keys, sizeof(bl_command)); 145 ts->pdata->bl_keys, CY_NUM_BL_KEYS);
137 146
138 error = ttsp_write_block_data(ts, CY_REG_BASE, 147 error = ttsp_write_block_data(ts, CY_REG_BASE,
139 sizeof(bl_cmd), bl_cmd); 148 sizeof(bl_cmd), bl_cmd);
@@ -167,6 +176,10 @@ static int cyttsp_set_operational_mode(struct cyttsp *ts)
167 if (error) 176 if (error)
168 return error; 177 return error;
169 178
179 error = cyttsp_handshake(ts);
180 if (error)
181 return error;
182
170 return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0; 183 return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0;
171} 184}
172 185
@@ -188,6 +201,10 @@ static int cyttsp_set_sysinfo_mode(struct cyttsp *ts)
188 if (error) 201 if (error)
189 return error; 202 return error;
190 203
204 error = cyttsp_handshake(ts);
205 if (error)
206 return error;
207
191 if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl) 208 if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl)
192 return -EIO; 209 return -EIO;
193 210
@@ -344,12 +361,9 @@ static irqreturn_t cyttsp_irq(int irq, void *handle)
344 goto out; 361 goto out;
345 362
346 /* provide flow control handshake */ 363 /* provide flow control handshake */
347 if (ts->pdata->use_hndshk) { 364 error = cyttsp_handshake(ts);
348 error = ttsp_send_command(ts, 365 if (error)
349 ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); 366 goto out;
350 if (error)
351 goto out;
352 }
353 367
354 if (unlikely(ts->state == CY_IDLE_STATE)) 368 if (unlikely(ts->state == CY_IDLE_STATE))
355 goto out; 369 goto out;
diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h
index 1aa3c6967e70..f1ebde369f86 100644
--- a/drivers/input/touchscreen/cyttsp_core.h
+++ b/drivers/input/touchscreen/cyttsp_core.h
@@ -67,8 +67,8 @@ struct cyttsp_xydata {
67/* TTSP System Information interface definition */ 67/* TTSP System Information interface definition */
68struct cyttsp_sysinfo_data { 68struct cyttsp_sysinfo_data {
69 u8 hst_mode; 69 u8 hst_mode;
70 u8 mfg_cmd;
71 u8 mfg_stat; 70 u8 mfg_stat;
71 u8 mfg_cmd;
72 u8 cid[3]; 72 u8 cid[3];
73 u8 tt_undef1; 73 u8 tt_undef1;
74 u8 uid[8]; 74 u8 uid[8];
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index c735c5a008a2..6427600b5bbe 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -59,7 +59,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
59 int ret; 59 int ret;
60 60
61 sg_free_table(sgt); 61 sg_free_table(sgt);
62 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); 62 ret = sg_alloc_table(sgt, nents, GFP_ATOMIC);
63 if (ret) 63 if (ret)
64 return ret; 64 return ret;
65 } 65 }
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index f5d84d6f8222..48b396fced0a 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1075,7 +1075,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1075 acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) 1075 acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
1076 return NULL; 1076 return NULL;
1077 1077
1078 pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL); 1078 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1079 if (!pdata) { 1079 if (!pdata) {
1080 dev_err(&pdev->dev, 1080 dev_err(&pdev->dev,
1081 "failed to allocate memory for platform data\n"); 1081 "failed to allocate memory for platform data\n");
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 5000586cb98d..71cc3e6ef47c 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -444,7 +444,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
444 } 444 }
445 445
446 ret = pm_runtime_get_sync(&sdd->pdev->dev); 446 ret = pm_runtime_get_sync(&sdd->pdev->dev);
447 if (ret != 0) { 447 if (ret < 0) {
448 dev_err(dev, "Failed to enable device: %d\n", ret); 448 dev_err(dev, "Failed to enable device: %d\n", ret);
449 goto out_tx; 449 goto out_tx;
450 } 450 }
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index ff5c63350932..b2730b1af5b4 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -364,17 +364,12 @@ static void ipu_crtc_commit(struct drm_crtc *crtc)
364 ipu_fb_enable(ipu_crtc); 364 ipu_fb_enable(ipu_crtc);
365} 365}
366 366
367static void ipu_crtc_load_lut(struct drm_crtc *crtc)
368{
369}
370
371static struct drm_crtc_helper_funcs ipu_helper_funcs = { 367static struct drm_crtc_helper_funcs ipu_helper_funcs = {
372 .dpms = ipu_crtc_dpms, 368 .dpms = ipu_crtc_dpms,
373 .mode_fixup = ipu_crtc_mode_fixup, 369 .mode_fixup = ipu_crtc_mode_fixup,
374 .mode_set = ipu_crtc_mode_set, 370 .mode_set = ipu_crtc_mode_set,
375 .prepare = ipu_crtc_prepare, 371 .prepare = ipu_crtc_prepare,
376 .commit = ipu_crtc_commit, 372 .commit = ipu_crtc_commit,
377 .load_lut = ipu_crtc_load_lut,
378}; 373};
379 374
380static int ipu_enable_vblank(struct drm_crtc *crtc) 375static int ipu_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 5855d17d19ac..9d8feac67637 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -42,6 +42,7 @@
42#include <linux/kd.h> 42#include <linux/kd.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/vt_kern.h> 44#include <linux/vt_kern.h>
45#include <linux/sched.h>
45#include <linux/selection.h> 46#include <linux/selection.h>
46#include <linux/spinlock.h> 47#include <linux/spinlock.h>
47#include <linux/ioport.h> 48#include <linux/ioport.h>
@@ -1124,11 +1125,15 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
1124 1125
1125 if (arg) { 1126 if (arg) {
1126 if (set) 1127 if (set)
1127 for (i = 0; i < cmapsz; i++) 1128 for (i = 0; i < cmapsz; i++) {
1128 vga_writeb(arg[i], charmap + i); 1129 vga_writeb(arg[i], charmap + i);
1130 cond_resched();
1131 }
1129 else 1132 else
1130 for (i = 0; i < cmapsz; i++) 1133 for (i = 0; i < cmapsz; i++) {
1131 arg[i] = vga_readb(charmap + i); 1134 arg[i] = vga_readb(charmap + i);
1135 cond_resched();
1136 }
1132 1137
1133 /* 1138 /*
1134 * In 512-character mode, the character map is not contiguous if 1139 * In 512-character mode, the character map is not contiguous if
@@ -1139,11 +1144,15 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
1139 charmap += 2 * cmapsz; 1144 charmap += 2 * cmapsz;
1140 arg += cmapsz; 1145 arg += cmapsz;
1141 if (set) 1146 if (set)
1142 for (i = 0; i < cmapsz; i++) 1147 for (i = 0; i < cmapsz; i++) {
1143 vga_writeb(arg[i], charmap + i); 1148 vga_writeb(arg[i], charmap + i);
1149 cond_resched();
1150 }
1144 else 1151 else
1145 for (i = 0; i < cmapsz; i++) 1152 for (i = 0; i < cmapsz; i++) {
1146 arg[i] = vga_readb(charmap + i); 1153 arg[i] = vga_readb(charmap + i);
1154 cond_resched();
1155 }
1147 } 1156 }
1148 } 1157 }
1149 1158
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 56009bc02b02..2894e0300a33 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -23,7 +23,7 @@
23 * Every display_timing can be specified with either just the typical value or 23 * Every display_timing can be specified with either just the typical value or
24 * a range consisting of min/typ/max. This function helps handling this 24 * a range consisting of min/typ/max. This function helps handling this
25 **/ 25 **/
26static int parse_timing_property(struct device_node *np, const char *name, 26static int parse_timing_property(const struct device_node *np, const char *name,
27 struct timing_entry *result) 27 struct timing_entry *result)
28{ 28{
29 struct property *prop; 29 struct property *prop;
@@ -56,7 +56,8 @@ static int parse_timing_property(struct device_node *np, const char *name,
56 * of_get_display_timing - parse display_timing entry from device_node 56 * of_get_display_timing - parse display_timing entry from device_node
57 * @np: device_node with the properties 57 * @np: device_node with the properties
58 **/ 58 **/
59static struct display_timing *of_get_display_timing(struct device_node *np) 59static struct display_timing *of_get_display_timing(const struct device_node
60 *np)
60{ 61{
61 struct display_timing *dt; 62 struct display_timing *dt;
62 u32 val = 0; 63 u32 val = 0;
@@ -97,6 +98,8 @@ static struct display_timing *of_get_display_timing(struct device_node *np)
97 dt->flags |= DISPLAY_FLAGS_INTERLACED; 98 dt->flags |= DISPLAY_FLAGS_INTERLACED;
98 if (of_property_read_bool(np, "doublescan")) 99 if (of_property_read_bool(np, "doublescan"))
99 dt->flags |= DISPLAY_FLAGS_DOUBLESCAN; 100 dt->flags |= DISPLAY_FLAGS_DOUBLESCAN;
101 if (of_property_read_bool(np, "doubleclk"))
102 dt->flags |= DISPLAY_FLAGS_DOUBLECLK;
100 103
101 if (ret) { 104 if (ret) {
102 pr_err("%s: error reading timing properties\n", 105 pr_err("%s: error reading timing properties\n",
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index e328a61b64ba..296279bc71d2 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -24,9 +24,6 @@
24#ifdef CONFIG_X86 24#ifdef CONFIG_X86
25#include <video/vga.h> 25#include <video/vga.h>
26#endif 26#endif
27#ifdef CONFIG_MTRR
28#include <asm/mtrr.h>
29#endif
30#include "edid.h" 27#include "edid.h"
31 28
32static struct cb_id uvesafb_cn_id = { 29static struct cb_id uvesafb_cn_id = {
@@ -1540,67 +1537,30 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
1540 1537
1541static void uvesafb_init_mtrr(struct fb_info *info) 1538static void uvesafb_init_mtrr(struct fb_info *info)
1542{ 1539{
1543#ifdef CONFIG_MTRR 1540 struct uvesafb_par *par = info->par;
1541
1544 if (mtrr && !(info->fix.smem_start & (PAGE_SIZE - 1))) { 1542 if (mtrr && !(info->fix.smem_start & (PAGE_SIZE - 1))) {
1545 int temp_size = info->fix.smem_len; 1543 int temp_size = info->fix.smem_len;
1546 unsigned int type = 0;
1547 1544
1548 switch (mtrr) { 1545 int rc;
1549 case 1:
1550 type = MTRR_TYPE_UNCACHABLE;
1551 break;
1552 case 2:
1553 type = MTRR_TYPE_WRBACK;
1554 break;
1555 case 3:
1556 type = MTRR_TYPE_WRCOMB;
1557 break;
1558 case 4:
1559 type = MTRR_TYPE_WRTHROUGH;
1560 break;
1561 default:
1562 type = 0;
1563 break;
1564 }
1565 1546
1566 if (type) { 1547 /* Find the largest power-of-two */
1567 int rc; 1548 temp_size = roundup_pow_of_two(temp_size);
1568 1549
1569 /* Find the largest power-of-two */ 1550 /* Try and find a power of two to add */
1570 temp_size = roundup_pow_of_two(temp_size); 1551 do {
1552 rc = arch_phys_wc_add(info->fix.smem_start, temp_size);
1553 temp_size >>= 1;
1554 } while (temp_size >= PAGE_SIZE && rc == -EINVAL);
1571 1555
1572 /* Try and find a power of two to add */ 1556 if (rc >= 0)
1573 do { 1557 par->mtrr_handle = rc;
1574 rc = mtrr_add(info->fix.smem_start,
1575 temp_size, type, 1);
1576 temp_size >>= 1;
1577 } while (temp_size >= PAGE_SIZE && rc == -EINVAL);
1578 }
1579 } 1558 }
1580#endif /* CONFIG_MTRR */
1581} 1559}
1582 1560
1583static void uvesafb_ioremap(struct fb_info *info) 1561static void uvesafb_ioremap(struct fb_info *info)
1584{ 1562{
1585#ifdef CONFIG_X86 1563 info->screen_base = ioremap_wc(info->fix.smem_start, info->fix.smem_len);
1586 switch (mtrr) {
1587 case 1: /* uncachable */
1588 info->screen_base = ioremap_nocache(info->fix.smem_start, info->fix.smem_len);
1589 break;
1590 case 2: /* write-back */
1591 info->screen_base = ioremap_cache(info->fix.smem_start, info->fix.smem_len);
1592 break;
1593 case 3: /* write-combining */
1594 info->screen_base = ioremap_wc(info->fix.smem_start, info->fix.smem_len);
1595 break;
1596 case 4: /* write-through */
1597 default:
1598 info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
1599 break;
1600 }
1601#else
1602 info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
1603#endif /* CONFIG_X86 */
1604} 1564}
1605 1565
1606static ssize_t uvesafb_show_vbe_ver(struct device *dev, 1566static ssize_t uvesafb_show_vbe_ver(struct device *dev,
@@ -1851,6 +1811,7 @@ static int uvesafb_remove(struct platform_device *dev)
1851 unregister_framebuffer(info); 1811 unregister_framebuffer(info);
1852 release_region(0x3c0, 32); 1812 release_region(0x3c0, 32);
1853 iounmap(info->screen_base); 1813 iounmap(info->screen_base);
1814 arch_phys_wc_del(par->mtrr_handle);
1854 release_mem_region(info->fix.smem_start, info->fix.smem_len); 1815 release_mem_region(info->fix.smem_start, info->fix.smem_len);
1855 fb_destroy_modedb(info->monspecs.modedb); 1816 fb_destroy_modedb(info->monspecs.modedb);
1856 fb_dealloc_cmap(&info->cmap); 1817 fb_dealloc_cmap(&info->cmap);
@@ -1930,6 +1891,9 @@ static int uvesafb_setup(char *options)
1930 } 1891 }
1931 } 1892 }
1932 1893
1894 if (mtrr != 3 && mtrr != 1)
1895 pr_warn("uvesafb: mtrr should be set to 0 or 3; %d is unsupported", mtrr);
1896
1933 return 0; 1897 return 0;
1934} 1898}
1935#endif /* !MODULE */ 1899#endif /* !MODULE */
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index e570081f9f76..35f281033142 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2470,13 +2470,16 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2470 .mode = mode 2470 .mode = mode
2471 }; 2471 };
2472 int err; 2472 int err;
2473 bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
2474 (mode & FALLOC_FL_PUNCH_HOLE);
2473 2475
2474 if (fc->no_fallocate) 2476 if (fc->no_fallocate)
2475 return -EOPNOTSUPP; 2477 return -EOPNOTSUPP;
2476 2478
2477 if (mode & FALLOC_FL_PUNCH_HOLE) { 2479 if (lock_inode) {
2478 mutex_lock(&inode->i_mutex); 2480 mutex_lock(&inode->i_mutex);
2479 fuse_set_nowrite(inode); 2481 if (mode & FALLOC_FL_PUNCH_HOLE)
2482 fuse_set_nowrite(inode);
2480 } 2483 }
2481 2484
2482 req = fuse_get_req_nopages(fc); 2485 req = fuse_get_req_nopages(fc);
@@ -2511,8 +2514,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2511 fuse_invalidate_attr(inode); 2514 fuse_invalidate_attr(inode);
2512 2515
2513out: 2516out:
2514 if (mode & FALLOC_FL_PUNCH_HOLE) { 2517 if (lock_inode) {
2515 fuse_release_nowrite(inode); 2518 if (mode & FALLOC_FL_PUNCH_HOLE)
2519 fuse_release_nowrite(inode);
2516 mutex_unlock(&inode->i_mutex); 2520 mutex_unlock(&inode->i_mutex);
2517 } 2521 }
2518 2522
diff --git a/fs/splice.c b/fs/splice.c
index 9eca476227d5..d37431dd60a1 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1283,6 +1283,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
1283 * @in: file to splice from 1283 * @in: file to splice from
1284 * @ppos: input file offset 1284 * @ppos: input file offset
1285 * @out: file to splice to 1285 * @out: file to splice to
1286 * @opos: output file offset
1286 * @len: number of bytes to splice 1287 * @len: number of bytes to splice
1287 * @flags: splice modifier flags 1288 * @flags: splice modifier flags
1288 * 1289 *
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index f104af7cf437..d4f9fb4e53df 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -28,17 +28,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
28 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 28 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
29 * from 1 to a 0 value 29 * from 1 to a 0 value
30 * @count: pointer of type atomic_t 30 * @count: pointer of type atomic_t
31 * @fail_fn: function to call if the original value was not 1
32 * 31 *
33 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 32 * Change the count from 1 to a value lower than 1. This function returns 0
34 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 33 * if the fastpath succeeds, or -1 otherwise.
35 * or anything the slow path function returns.
36 */ 34 */
37static inline int 35static inline int
38__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 36__mutex_fastpath_lock_retval(atomic_t *count)
39{ 37{
40 if (unlikely(atomic_dec_return(count) < 0)) 38 if (unlikely(atomic_dec_return(count) < 0))
41 return fail_fn(count); 39 return -1;
42 return 0; 40 return 0;
43} 41}
44 42
diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
index e1bbbc72b6a2..61069ed334e2 100644
--- a/include/asm-generic/mutex-null.h
+++ b/include/asm-generic/mutex-null.h
@@ -11,7 +11,7 @@
11#define _ASM_GENERIC_MUTEX_NULL_H 11#define _ASM_GENERIC_MUTEX_NULL_H
12 12
13#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count) 13#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
14#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count) 14#define __mutex_fastpath_lock_retval(count) (-1)
15#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count) 15#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
16#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) 16#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
17#define __mutex_slowpath_needs_to_unlock() 1 17#define __mutex_slowpath_needs_to_unlock() 1
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index c04e0db8a2d6..f169ec064785 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -39,18 +39,16 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
39 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 39 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
40 * from 1 to a 0 value 40 * from 1 to a 0 value
41 * @count: pointer of type atomic_t 41 * @count: pointer of type atomic_t
42 * @fail_fn: function to call if the original value was not 1
43 * 42 *
44 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it 43 * Change the count from 1 to a value lower than 1. This function returns 0
45 * wasn't 1 originally. This function returns 0 if the fastpath succeeds, 44 * if the fastpath succeeds, or -1 otherwise.
46 * or anything the slow path function returns
47 */ 45 */
48static inline int 46static inline int
49__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 47__mutex_fastpath_lock_retval(atomic_t *count)
50{ 48{
51 if (unlikely(atomic_xchg(count, 0) != 1)) 49 if (unlikely(atomic_xchg(count, 0) != 1))
52 if (likely(atomic_xchg(count, -1) != 1)) 50 if (likely(atomic_xchg(count, -1) != 1))
53 return fail_fn(count); 51 return -1;
54 return 0; 52 return 0;
55} 53}
56 54
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 63d17ee9eb48..82670ac0d774 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -55,16 +55,13 @@
55#include <linux/mm.h> 55#include <linux/mm.h>
56#include <linux/cdev.h> 56#include <linux/cdev.h>
57#include <linux/mutex.h> 57#include <linux/mutex.h>
58#include <linux/io.h>
58#include <linux/slab.h> 59#include <linux/slab.h>
59#if defined(__alpha__) || defined(__powerpc__) 60#if defined(__alpha__) || defined(__powerpc__)
60#include <asm/pgtable.h> /* For pte_wrprotect */ 61#include <asm/pgtable.h> /* For pte_wrprotect */
61#endif 62#endif
62#include <asm/io.h>
63#include <asm/mman.h> 63#include <asm/mman.h>
64#include <asm/uaccess.h> 64#include <asm/uaccess.h>
65#ifdef CONFIG_MTRR
66#include <asm/mtrr.h>
67#endif
68#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) 65#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
69#include <linux/types.h> 66#include <linux/types.h>
70#include <linux/agp_backend.h> 67#include <linux/agp_backend.h>
@@ -933,6 +930,7 @@ struct drm_driver {
933 struct dma_buf *dma_buf); 930 struct dma_buf *dma_buf);
934 /* low-level interface used by drm_gem_prime_{import,export} */ 931 /* low-level interface used by drm_gem_prime_{import,export} */
935 int (*gem_prime_pin)(struct drm_gem_object *obj); 932 int (*gem_prime_pin)(struct drm_gem_object *obj);
933 void (*gem_prime_unpin)(struct drm_gem_object *obj);
936 struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); 934 struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
937 struct drm_gem_object *(*gem_prime_import_sg_table)( 935 struct drm_gem_object *(*gem_prime_import_sg_table)(
938 struct drm_device *dev, size_t size, 936 struct drm_device *dev, size_t size,
@@ -1250,37 +1248,8 @@ static inline int drm_core_has_MTRR(struct drm_device *dev)
1250{ 1248{
1251 return drm_core_check_feature(dev, DRIVER_USE_MTRR); 1249 return drm_core_check_feature(dev, DRIVER_USE_MTRR);
1252} 1250}
1253
1254#define DRM_MTRR_WC MTRR_TYPE_WRCOMB
1255
1256static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
1257 unsigned int flags)
1258{
1259 return mtrr_add(offset, size, flags, 1);
1260}
1261
1262static inline int drm_mtrr_del(int handle, unsigned long offset,
1263 unsigned long size, unsigned int flags)
1264{
1265 return mtrr_del(handle, offset, size);
1266}
1267
1268#else 1251#else
1269#define drm_core_has_MTRR(dev) (0) 1252#define drm_core_has_MTRR(dev) (0)
1270
1271#define DRM_MTRR_WC 0
1272
1273static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
1274 unsigned int flags)
1275{
1276 return 0;
1277}
1278
1279static inline int drm_mtrr_del(int handle, unsigned long offset,
1280 unsigned long size, unsigned int flags)
1281{
1282 return 0;
1283}
1284#endif 1253#endif
1285 1254
1286static inline void drm_device_set_unplugged(struct drm_device *dev) 1255static inline void drm_device_set_unplugged(struct drm_device *dev)
@@ -1630,7 +1599,6 @@ extern void drm_sysfs_destroy(void);
1630extern int drm_sysfs_device_add(struct drm_minor *minor); 1599extern int drm_sysfs_device_add(struct drm_minor *minor);
1631extern void drm_sysfs_hotplug_event(struct drm_device *dev); 1600extern void drm_sysfs_hotplug_event(struct drm_device *dev);
1632extern void drm_sysfs_device_remove(struct drm_minor *minor); 1601extern void drm_sysfs_device_remove(struct drm_minor *minor);
1633extern char *drm_get_connector_status_name(enum drm_connector_status status);
1634extern int drm_sysfs_connector_add(struct drm_connector *connector); 1602extern int drm_sysfs_connector_add(struct drm_connector *connector);
1635extern void drm_sysfs_connector_remove(struct drm_connector *connector); 1603extern void drm_sysfs_connector_remove(struct drm_connector *connector);
1636 1604
@@ -1648,6 +1616,8 @@ int drm_gem_private_object_init(struct drm_device *dev,
1648void drm_gem_object_handle_free(struct drm_gem_object *obj); 1616void drm_gem_object_handle_free(struct drm_gem_object *obj);
1649void drm_gem_vm_open(struct vm_area_struct *vma); 1617void drm_gem_vm_open(struct vm_area_struct *vma);
1650void drm_gem_vm_close(struct vm_area_struct *vma); 1618void drm_gem_vm_close(struct vm_area_struct *vma);
1619int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1620 struct vm_area_struct *vma);
1651int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 1621int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
1652 1622
1653#include <drm/drm_global.h> 1623#include <drm/drm_global.h>
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index adb3f9b625f6..fa12a2fa4293 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -339,6 +339,9 @@ struct drm_crtc_funcs {
339 /* cursor controls */ 339 /* cursor controls */
340 int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, 340 int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
341 uint32_t handle, uint32_t width, uint32_t height); 341 uint32_t handle, uint32_t width, uint32_t height);
342 int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv,
343 uint32_t handle, uint32_t width, uint32_t height,
344 int32_t hot_x, int32_t hot_y);
342 int (*cursor_move)(struct drm_crtc *crtc, int x, int y); 345 int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
343 346
344 /* Set gamma on the CRTC */ 347 /* Set gamma on the CRTC */
@@ -409,6 +412,10 @@ struct drm_crtc {
409 /* framebuffer the connector is currently bound to */ 412 /* framebuffer the connector is currently bound to */
410 struct drm_framebuffer *fb; 413 struct drm_framebuffer *fb;
411 414
415 /* Temporary tracking of the old fb while a modeset is ongoing. Used
416 * by drm_mode_set_config_internal to implement correct refcounting. */
417 struct drm_framebuffer *old_fb;
418
412 bool enabled; 419 bool enabled;
413 420
414 /* Requested mode from modesetting. */ 421 /* Requested mode from modesetting. */
@@ -654,11 +661,7 @@ struct drm_plane_funcs {
654 * @format_count: number of formats supported 661 * @format_count: number of formats supported
655 * @crtc: currently bound CRTC 662 * @crtc: currently bound CRTC
656 * @fb: currently bound fb 663 * @fb: currently bound fb
657 * @gamma_size: size of gamma table
658 * @gamma_store: gamma correction table
659 * @enabled: enabled flag
660 * @funcs: helper functions 664 * @funcs: helper functions
661 * @helper_private: storage for drver layer
662 * @properties: property tracking for this plane 665 * @properties: property tracking for this plane
663 */ 666 */
664struct drm_plane { 667struct drm_plane {
@@ -674,14 +677,7 @@ struct drm_plane {
674 struct drm_crtc *crtc; 677 struct drm_crtc *crtc;
675 struct drm_framebuffer *fb; 678 struct drm_framebuffer *fb;
676 679
677 /* CRTC gamma size for reporting to userspace */
678 uint32_t gamma_size;
679 uint16_t *gamma_store;
680
681 bool enabled;
682
683 const struct drm_plane_funcs *funcs; 680 const struct drm_plane_funcs *funcs;
684 void *helper_private;
685 681
686 struct drm_object_properties properties; 682 struct drm_object_properties properties;
687}; 683};
@@ -894,15 +890,17 @@ extern int drm_plane_init(struct drm_device *dev,
894 const uint32_t *formats, uint32_t format_count, 890 const uint32_t *formats, uint32_t format_count,
895 bool priv); 891 bool priv);
896extern void drm_plane_cleanup(struct drm_plane *plane); 892extern void drm_plane_cleanup(struct drm_plane *plane);
893extern void drm_plane_force_disable(struct drm_plane *plane);
897 894
898extern void drm_encoder_cleanup(struct drm_encoder *encoder); 895extern void drm_encoder_cleanup(struct drm_encoder *encoder);
899 896
900extern char *drm_get_connector_name(struct drm_connector *connector); 897extern const char *drm_get_connector_name(const struct drm_connector *connector);
901extern char *drm_get_dpms_name(int val); 898extern const char *drm_get_connector_status_name(enum drm_connector_status status);
902extern char *drm_get_dvi_i_subconnector_name(int val); 899extern const char *drm_get_dpms_name(int val);
903extern char *drm_get_dvi_i_select_name(int val); 900extern const char *drm_get_dvi_i_subconnector_name(int val);
904extern char *drm_get_tv_subconnector_name(int val); 901extern const char *drm_get_dvi_i_select_name(int val);
905extern char *drm_get_tv_select_name(int val); 902extern const char *drm_get_tv_subconnector_name(int val);
903extern const char *drm_get_tv_select_name(int val);
906extern void drm_fb_release(struct drm_file *file_priv); 904extern void drm_fb_release(struct drm_file *file_priv);
907extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); 905extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
908extern bool drm_probe_ddc(struct i2c_adapter *adapter); 906extern bool drm_probe_ddc(struct i2c_adapter *adapter);
@@ -994,7 +992,7 @@ extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats
994extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); 992extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
995extern int drm_mode_create_dithering_property(struct drm_device *dev); 993extern int drm_mode_create_dithering_property(struct drm_device *dev);
996extern int drm_mode_create_dirty_info_property(struct drm_device *dev); 994extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
997extern char *drm_get_encoder_name(struct drm_encoder *encoder); 995extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
998 996
999extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, 997extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
1000 struct drm_encoder *encoder); 998 struct drm_encoder *encoder);
@@ -1022,6 +1020,8 @@ extern int drm_mode_setplane(struct drm_device *dev,
1022 void *data, struct drm_file *file_priv); 1020 void *data, struct drm_file *file_priv);
1023extern int drm_mode_cursor_ioctl(struct drm_device *dev, 1021extern int drm_mode_cursor_ioctl(struct drm_device *dev,
1024 void *data, struct drm_file *file_priv); 1022 void *data, struct drm_file *file_priv);
1023extern int drm_mode_cursor2_ioctl(struct drm_device *dev,
1024 void *data, struct drm_file *file_priv);
1025extern int drm_mode_addfb(struct drm_device *dev, 1025extern int drm_mode_addfb(struct drm_device *dev,
1026 void *data, struct drm_file *file_priv); 1026 void *data, struct drm_file *file_priv);
1027extern int drm_mode_addfb2(struct drm_device *dev, 1027extern int drm_mode_addfb2(struct drm_device *dev,
@@ -1094,5 +1094,6 @@ extern int drm_format_num_planes(uint32_t format);
1094extern int drm_format_plane_cpp(uint32_t format, int plane); 1094extern int drm_format_plane_cpp(uint32_t format, int plane);
1095extern int drm_format_horz_chroma_subsampling(uint32_t format); 1095extern int drm_format_horz_chroma_subsampling(uint32_t format);
1096extern int drm_format_vert_chroma_subsampling(uint32_t format); 1096extern int drm_format_vert_chroma_subsampling(uint32_t format);
1097extern const char *drm_get_format_name(uint32_t format);
1097 1098
1098#endif /* __DRM_CRTC_H__ */ 1099#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index 0ead502e17d2..f5e1168c7647 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -20,10 +20,13 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: Dave Airlie 22 * Authors: Dave Airlie
23 * Christian König
23 */ 24 */
24#ifndef DRM_FIXED_H 25#ifndef DRM_FIXED_H
25#define DRM_FIXED_H 26#define DRM_FIXED_H
26 27
28#include <linux/math64.h>
29
27typedef union dfixed { 30typedef union dfixed {
28 u32 full; 31 u32 full;
29} fixed20_12; 32} fixed20_12;
@@ -65,4 +68,95 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
65 tmp /= 2; 68 tmp /= 2;
66 return lower_32_bits(tmp); 69 return lower_32_bits(tmp);
67} 70}
71
72#define DRM_FIXED_POINT 32
73#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
74#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
75#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
76
77static inline s64 drm_int2fixp(int a)
78{
79 return ((s64)a) << DRM_FIXED_POINT;
80}
81
82static inline int drm_fixp2int(int64_t a)
83{
84 return ((s64)a) >> DRM_FIXED_POINT;
85}
86
87static inline s64 drm_fixp_msbset(int64_t a)
88{
89 unsigned shift, sign = (a >> 63) & 1;
90
91 for (shift = 62; shift > 0; --shift)
92 if ((a >> shift) != sign)
93 return shift;
94
95 return 0;
96}
97
98static inline s64 drm_fixp_mul(s64 a, s64 b)
99{
100 unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b);
101 s64 result;
102
103 if (shift > 63) {
104 shift = shift - 63;
105 a >>= shift >> 1;
106 b >>= shift >> 1;
107 } else
108 shift = 0;
109
110 result = a * b;
111
112 if (shift > DRM_FIXED_POINT)
113 return result << (shift - DRM_FIXED_POINT);
114
115 if (shift < DRM_FIXED_POINT)
116 return result >> (DRM_FIXED_POINT - shift);
117
118 return result;
119}
120
121static inline s64 drm_fixp_div(s64 a, s64 b)
122{
123 unsigned shift = 63 - drm_fixp_msbset(a);
124 s64 result;
125
126 a <<= shift;
127
128 if (shift < DRM_FIXED_POINT)
129 b >>= (DRM_FIXED_POINT - shift);
130
131 result = div64_s64(a, b);
132
133 if (shift > DRM_FIXED_POINT)
134 return result >> (shift - DRM_FIXED_POINT);
135
136 return result;
137}
138
139static inline s64 drm_fixp_exp(s64 x)
140{
141 s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
142 s64 sum = DRM_FIXED_ONE, term, y = x;
143 u64 count = 1;
144
145 if (x < 0)
146 y = -1 * x;
147
148 term = y;
149
150 while (term >= tolerance) {
151 sum = sum + term;
152 count = count + 1;
153 term = drm_fixp_mul(term, div64_s64(y, count));
154 }
155
156 if (x < 0)
157 sum = drm_fixp_div(1, sum);
158
159 return sum;
160}
161
68#endif 162#endif
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 63397ced9254..6e17251e9b28 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -4,6 +4,9 @@
4struct drm_gem_cma_object { 4struct drm_gem_cma_object {
5 struct drm_gem_object base; 5 struct drm_gem_object base;
6 dma_addr_t paddr; 6 dma_addr_t paddr;
7 struct sg_table *sgt;
8
9 /* For objects with DMA memory allocated by GEM CMA */
7 void *vaddr; 10 void *vaddr;
8}; 11};
9 12
@@ -45,4 +48,10 @@ extern const struct vm_operations_struct drm_gem_cma_vm_ops;
45void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); 48void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
46#endif 49#endif
47 50
51struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm_dev,
52 struct drm_gem_object *obj,
53 int flags);
54struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm_dev,
55 struct dma_buf *dma_buf);
56
48#endif /* __DRM_GEM_CMA_HELPER_H__ */ 57#endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 88591ef8fa24..4d06edb56d5f 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -177,17 +177,6 @@ static inline struct drm_mm_node *drm_mm_get_block_range(
177 return drm_mm_get_block_range_generic(parent, size, alignment, 0, 177 return drm_mm_get_block_range_generic(parent, size, alignment, 0,
178 start, end, 0); 178 start, end, 0);
179} 179}
180static inline struct drm_mm_node *drm_mm_get_color_block_range(
181 struct drm_mm_node *parent,
182 unsigned long size,
183 unsigned alignment,
184 unsigned long color,
185 unsigned long start,
186 unsigned long end)
187{
188 return drm_mm_get_block_range_generic(parent, size, alignment, color,
189 start, end, 0);
190}
191static inline struct drm_mm_node *drm_mm_get_block_atomic_range( 180static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
192 struct drm_mm_node *parent, 181 struct drm_mm_node *parent,
193 unsigned long size, 182 unsigned long size,
@@ -255,29 +244,10 @@ static inline struct drm_mm_node *drm_mm_search_free_in_range(
255 return drm_mm_search_free_in_range_generic(mm, size, alignment, 0, 244 return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
256 start, end, best_match); 245 start, end, best_match);
257} 246}
258static inline struct drm_mm_node *drm_mm_search_free_color(const struct drm_mm *mm, 247
259 unsigned long size, 248extern void drm_mm_init(struct drm_mm *mm,
260 unsigned alignment, 249 unsigned long start,
261 unsigned long color, 250 unsigned long size);
262 bool best_match)
263{
264 return drm_mm_search_free_generic(mm,size, alignment, color, best_match);
265}
266static inline struct drm_mm_node *drm_mm_search_free_in_range_color(
267 const struct drm_mm *mm,
268 unsigned long size,
269 unsigned alignment,
270 unsigned long color,
271 unsigned long start,
272 unsigned long end,
273 bool best_match)
274{
275 return drm_mm_search_free_in_range_generic(mm, size, alignment, color,
276 start, end, best_match);
277}
278extern int drm_mm_init(struct drm_mm *mm,
279 unsigned long start,
280 unsigned long size);
281extern void drm_mm_takedown(struct drm_mm *mm); 251extern void drm_mm_takedown(struct drm_mm *mm);
282extern int drm_mm_clean(struct drm_mm *mm); 252extern int drm_mm_clean(struct drm_mm *mm);
283extern int drm_mm_pre_get(struct drm_mm *mm); 253extern int drm_mm_pre_get(struct drm_mm *mm);
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
index 675ddf4b441f..815fafc6b4ad 100644
--- a/include/drm/drm_os_linux.h
+++ b/include/drm/drm_os_linux.h
@@ -65,22 +65,6 @@ struct no_agp_kern {
65#define DRM_AGP_KERN struct no_agp_kern 65#define DRM_AGP_KERN struct no_agp_kern
66#endif 66#endif
67 67
68#if !(__OS_HAS_MTRR)
69static __inline__ int mtrr_add(unsigned long base, unsigned long size,
70 unsigned int type, char increment)
71{
72 return -ENODEV;
73}
74
75static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
76{
77 return -ENODEV;
78}
79
80#define MTRR_TYPE_WRCOMB 1
81
82#endif
83
84/** Other copying of data to kernel space */ 68/** Other copying of data to kernel space */
85#define DRM_COPY_FROM_USER(arg1, arg2, arg3) \ 69#define DRM_COPY_FROM_USER(arg1, arg2, arg3) \
86 copy_from_user(arg1, arg2, arg3) 70 copy_from_user(arg1, arg2, arg3)
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index bb1bc485390b..34efaf64cc87 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -152,6 +152,14 @@
152 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 152 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
153 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 153 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
154 {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 154 {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
155 {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
156 {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
157 {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
158 {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
159 {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
160 {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
161 {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
162 {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
155 {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 163 {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
156 {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 164 {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
157 {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 165 {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -580,6 +588,22 @@
580 {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 588 {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
581 {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 589 {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
582 {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 590 {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
591 {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
592 {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
593 {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
594 {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
595 {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
596 {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
597 {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
598 {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
599 {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
600 {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
601 {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
602 {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
603 {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
604 {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
605 {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
606 {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
583 {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 607 {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
584 {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 608 {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
585 {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 609 {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
new file mode 100644
index 000000000000..d1286297567b
--- /dev/null
+++ b/include/drm/drm_rect.h
@@ -0,0 +1,167 @@
1/*
2 * Copyright (C) 2011-2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef DRM_RECT_H
25#define DRM_RECT_H
26
27/**
28 * DOC: rect utils
29 *
30 * Utility functions to help manage rectangular areas for
31 * clipping, scaling, etc. calculations.
32 */
33
34/**
35 * struct drm_rect - two dimensional rectangle
36 * @x1: horizontal starting coordinate (inclusive)
37 * @x2: horizontal ending coordinate (exclusive)
38 * @y1: vertical starting coordinate (inclusive)
39 * @y2: vertical ending coordinate (exclusive)
40 */
41struct drm_rect {
42 int x1, y1, x2, y2;
43};
44
45/**
46 * drm_rect_adjust_size - adjust the size of the rectangle
47 * @r: rectangle to be adjusted
48 * @dw: horizontal adjustment
49 * @dh: vertical adjustment
50 *
51 * Change the size of rectangle @r by @dw in the horizontal direction,
52 * and by @dh in the vertical direction, while keeping the center
53 * of @r stationary.
54 *
55 * Positive @dw and @dh increase the size, negative values decrease it.
56 */
57static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh)
58{
59 r->x1 -= dw >> 1;
60 r->y1 -= dh >> 1;
61 r->x2 += (dw + 1) >> 1;
62 r->y2 += (dh + 1) >> 1;
63}
64
65/**
66 * drm_rect_translate - translate the rectangle
67 * @r: rectangle to be tranlated
68 * @dx: horizontal translation
69 * @dy: vertical translation
70 *
71 * Move rectangle @r by @dx in the horizontal direction,
72 * and by @dy in the vertical direction.
73 */
74static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy)
75{
76 r->x1 += dx;
77 r->y1 += dy;
78 r->x2 += dx;
79 r->y2 += dy;
80}
81
82/**
83 * drm_rect_downscale - downscale a rectangle
84 * @r: rectangle to be downscaled
85 * @horz: horizontal downscale factor
86 * @vert: vertical downscale factor
87 *
88 * Divide the coordinates of rectangle @r by @horz and @vert.
89 */
90static inline void drm_rect_downscale(struct drm_rect *r, int horz, int vert)
91{
92 r->x1 /= horz;
93 r->y1 /= vert;
94 r->x2 /= horz;
95 r->y2 /= vert;
96}
97
98/**
99 * drm_rect_width - determine the rectangle width
100 * @r: rectangle whose width is returned
101 *
102 * RETURNS:
103 * The width of the rectangle.
104 */
105static inline int drm_rect_width(const struct drm_rect *r)
106{
107 return r->x2 - r->x1;
108}
109
110/**
111 * drm_rect_height - determine the rectangle height
112 * @r: rectangle whose height is returned
113 *
114 * RETURNS:
115 * The height of the rectangle.
116 */
117static inline int drm_rect_height(const struct drm_rect *r)
118{
119 return r->y2 - r->y1;
120}
121
122/**
123 * drm_rect_visible - determine if the the rectangle is visible
124 * @r: rectangle whose visibility is returned
125 *
126 * RETURNS:
127 * %true if the rectangle is visible, %false otherwise.
128 */
129static inline bool drm_rect_visible(const struct drm_rect *r)
130{
131 return drm_rect_width(r) > 0 && drm_rect_height(r) > 0;
132}
133
134/**
135 * drm_rect_equals - determine if two rectangles are equal
136 * @r1: first rectangle
137 * @r2: second rectangle
138 *
139 * RETURNS:
140 * %true if the rectangles are equal, %false otherwise.
141 */
142static inline bool drm_rect_equals(const struct drm_rect *r1,
143 const struct drm_rect *r2)
144{
145 return r1->x1 == r2->x1 && r1->x2 == r2->x2 &&
146 r1->y1 == r2->y1 && r1->y2 == r2->y2;
147}
148
149bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
150bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
151 const struct drm_rect *clip,
152 int hscale, int vscale);
153int drm_rect_calc_hscale(const struct drm_rect *src,
154 const struct drm_rect *dst,
155 int min_hscale, int max_hscale);
156int drm_rect_calc_vscale(const struct drm_rect *src,
157 const struct drm_rect *dst,
158 int min_vscale, int max_vscale);
159int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
160 struct drm_rect *dst,
161 int min_hscale, int max_hscale);
162int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
163 struct drm_rect *dst,
164 int min_vscale, int max_vscale);
165void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point);
166
167#endif
diff --git a/include/drm/i915_powerwell.h b/include/drm/i915_powerwell.h
new file mode 100644
index 000000000000..cfdc884405b7
--- /dev/null
+++ b/include/drm/i915_powerwell.h
@@ -0,0 +1,36 @@
1/**************************************************************************
2 *
3 * Copyright 2013 Intel Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29#ifndef _I915_POWERWELL_H_
30#define _I915_POWERWELL_H_
31
32/* For use by hda_i915 driver */
33extern void i915_request_power_well(void);
34extern void i915_release_power_well(void);
35
36#endif /* _I915_POWERWELL_H_ */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 3cb5d848fb66..8a6aa56ece52 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -39,6 +39,7 @@
39#include <linux/mm.h> 39#include <linux/mm.h>
40#include <linux/rbtree.h> 40#include <linux/rbtree.h>
41#include <linux/bitmap.h> 41#include <linux/bitmap.h>
42#include <linux/reservation.h>
42 43
43struct ttm_bo_device; 44struct ttm_bo_device;
44 45
@@ -153,7 +154,6 @@ struct ttm_tt;
153 * Lru lists may keep one refcount, the delayed delete list, and kref != 0 154 * Lru lists may keep one refcount, the delayed delete list, and kref != 0
154 * keeps one refcount. When this refcount reaches zero, 155 * keeps one refcount. When this refcount reaches zero,
155 * the object is destroyed. 156 * the object is destroyed.
156 * @event_queue: Queue for processes waiting on buffer object status change.
157 * @mem: structure describing current placement. 157 * @mem: structure describing current placement.
158 * @persistent_swap_storage: Usually the swap storage is deleted for buffers 158 * @persistent_swap_storage: Usually the swap storage is deleted for buffers
159 * pinned in physical memory. If this behaviour is not desired, this member 159 * pinned in physical memory. If this behaviour is not desired, this member
@@ -164,12 +164,6 @@ struct ttm_tt;
164 * @lru: List head for the lru list. 164 * @lru: List head for the lru list.
165 * @ddestroy: List head for the delayed destroy list. 165 * @ddestroy: List head for the delayed destroy list.
166 * @swap: List head for swap LRU list. 166 * @swap: List head for swap LRU list.
167 * @val_seq: Sequence of the validation holding the @reserved lock.
168 * Used to avoid starvation when many processes compete to validate the
169 * buffer. This member is protected by the bo_device::lru_lock.
170 * @seq_valid: The value of @val_seq is valid. This value is protected by
171 * the bo_device::lru_lock.
172 * @reserved: Deadlock-free lock used for synchronization state transitions.
173 * @sync_obj: Pointer to a synchronization object. 167 * @sync_obj: Pointer to a synchronization object.
174 * @priv_flags: Flags describing buffer object internal state. 168 * @priv_flags: Flags describing buffer object internal state.
175 * @vm_rb: Rb node for the vm rb tree. 169 * @vm_rb: Rb node for the vm rb tree.
@@ -209,10 +203,9 @@ struct ttm_buffer_object {
209 203
210 struct kref kref; 204 struct kref kref;
211 struct kref list_kref; 205 struct kref list_kref;
212 wait_queue_head_t event_queue;
213 206
214 /** 207 /**
215 * Members protected by the bo::reserved lock. 208 * Members protected by the bo::resv::reserved lock.
216 */ 209 */
217 210
218 struct ttm_mem_reg mem; 211 struct ttm_mem_reg mem;
@@ -234,15 +227,6 @@ struct ttm_buffer_object {
234 struct list_head ddestroy; 227 struct list_head ddestroy;
235 struct list_head swap; 228 struct list_head swap;
236 struct list_head io_reserve_lru; 229 struct list_head io_reserve_lru;
237 uint32_t val_seq;
238 bool seq_valid;
239
240 /**
241 * Members protected by the bdev::lru_lock
242 * only when written to.
243 */
244
245 atomic_t reserved;
246 230
247 /** 231 /**
248 * Members protected by struct buffer_object_device::fence_lock 232 * Members protected by struct buffer_object_device::fence_lock
@@ -272,6 +256,9 @@ struct ttm_buffer_object {
272 uint32_t cur_placement; 256 uint32_t cur_placement;
273 257
274 struct sg_table *sg; 258 struct sg_table *sg;
259
260 struct reservation_object *resv;
261 struct reservation_object ttm_resv;
275}; 262};
276 263
277/** 264/**
@@ -725,18 +712,4 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
725 712
726extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); 713extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
727 714
728/**
729 * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
730 *
731 * @bo: The buffer object to check.
732 *
733 * This function returns an indication if a bo is reserved or not, and should
734 * only be used to print an error when it is not from incorrect api usage, since
735 * there's no guarantee that it is the caller that is holding the reservation.
736 */
737static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
738{
739 return atomic_read(&bo->reserved);
740}
741
742#endif 715#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 9c8dca79808e..984fc2d571a1 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -33,11 +33,13 @@
33#include <ttm/ttm_bo_api.h> 33#include <ttm/ttm_bo_api.h>
34#include <ttm/ttm_memory.h> 34#include <ttm/ttm_memory.h>
35#include <ttm/ttm_module.h> 35#include <ttm/ttm_module.h>
36#include <ttm/ttm_placement.h>
36#include <drm/drm_mm.h> 37#include <drm/drm_mm.h>
37#include <drm/drm_global.h> 38#include <drm/drm_global.h>
38#include <linux/workqueue.h> 39#include <linux/workqueue.h>
39#include <linux/fs.h> 40#include <linux/fs.h>
40#include <linux/spinlock.h> 41#include <linux/spinlock.h>
42#include <linux/reservation.h>
41 43
42struct ttm_backend_func { 44struct ttm_backend_func {
43 /** 45 /**
@@ -771,6 +773,55 @@ extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
771 bool interruptible); 773 bool interruptible);
772extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); 774extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
773 775
776extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
777extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
778
779/**
780 * ttm_bo_reserve_nolru:
781 *
782 * @bo: A pointer to a struct ttm_buffer_object.
783 * @interruptible: Sleep interruptible if waiting.
784 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
785 * @use_ticket: If @bo is already reserved, Only sleep waiting for
786 * it to become unreserved if @ticket->stamp is older.
787 *
788 * Will not remove reserved buffers from the lru lists.
789 * Otherwise identical to ttm_bo_reserve.
790 *
791 * Returns:
792 * -EDEADLK: The reservation may cause a deadlock.
793 * Release all buffer reservations, wait for @bo to become unreserved and
794 * try again. (only if use_sequence == 1).
795 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
796 * a signal. Release all buffer reservations and return to user-space.
797 * -EBUSY: The function needed to sleep, but @no_wait was true
798 * -EALREADY: Bo already reserved using @ticket. This error code will only
799 * be returned if @use_ticket is set to true.
800 */
801static inline int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
802 bool interruptible,
803 bool no_wait, bool use_ticket,
804 struct ww_acquire_ctx *ticket)
805{
806 int ret = 0;
807
808 if (no_wait) {
809 bool success;
810 if (WARN_ON(ticket))
811 return -EBUSY;
812
813 success = ww_mutex_trylock(&bo->resv->lock);
814 return success ? 0 : -EBUSY;
815 }
816
817 if (interruptible)
818 ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket);
819 else
820 ret = ww_mutex_lock(&bo->resv->lock, ticket);
821 if (ret == -EINTR)
822 return -ERESTARTSYS;
823 return ret;
824}
774 825
775/** 826/**
776 * ttm_bo_reserve: 827 * ttm_bo_reserve:
@@ -778,8 +829,8 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
778 * @bo: A pointer to a struct ttm_buffer_object. 829 * @bo: A pointer to a struct ttm_buffer_object.
779 * @interruptible: Sleep interruptible if waiting. 830 * @interruptible: Sleep interruptible if waiting.
780 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 831 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
781 * @use_sequence: If @bo is already reserved, Only sleep waiting for 832 * @use_ticket: If @bo is already reserved, Only sleep waiting for
782 * it to become unreserved if @sequence < (@bo)->sequence. 833 * it to become unreserved if @ticket->stamp is older.
783 * 834 *
784 * Locks a buffer object for validation. (Or prevents other processes from 835 * Locks a buffer object for validation. (Or prevents other processes from
785 * locking it for validation) and removes it from lru lists, while taking 836 * locking it for validation) and removes it from lru lists, while taking
@@ -793,7 +844,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
793 * Processes attempting to reserve multiple buffers other than for eviction, 844 * Processes attempting to reserve multiple buffers other than for eviction,
794 * (typically execbuf), should first obtain a unique 32-bit 845 * (typically execbuf), should first obtain a unique 32-bit
795 * validation sequence number, 846 * validation sequence number,
796 * and call this function with @use_sequence == 1 and @sequence == the unique 847 * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
797 * sequence number. If upon call of this function, the buffer object is already 848 * sequence number. If upon call of this function, the buffer object is already
798 * reserved, the validation sequence is checked against the validation 849 * reserved, the validation sequence is checked against the validation
799 * sequence of the process currently reserving the buffer, 850 * sequence of the process currently reserving the buffer,
@@ -808,36 +859,31 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
808 * will eventually succeed, preventing both deadlocks and starvation. 859 * will eventually succeed, preventing both deadlocks and starvation.
809 * 860 *
810 * Returns: 861 * Returns:
811 * -EAGAIN: The reservation may cause a deadlock. 862 * -EDEADLK: The reservation may cause a deadlock.
812 * Release all buffer reservations, wait for @bo to become unreserved and 863 * Release all buffer reservations, wait for @bo to become unreserved and
813 * try again. (only if use_sequence == 1). 864 * try again. (only if use_sequence == 1).
814 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 865 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
815 * a signal. Release all buffer reservations and return to user-space. 866 * a signal. Release all buffer reservations and return to user-space.
816 * -EBUSY: The function needed to sleep, but @no_wait was true 867 * -EBUSY: The function needed to sleep, but @no_wait was true
817 * -EDEADLK: Bo already reserved using @sequence. This error code will only 868 * -EALREADY: Bo already reserved using @ticket. This error code will only
818 * be returned if @use_sequence is set to true. 869 * be returned if @use_ticket is set to true.
819 */ 870 */
820extern int ttm_bo_reserve(struct ttm_buffer_object *bo, 871static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
821 bool interruptible, 872 bool interruptible,
822 bool no_wait, bool use_sequence, uint32_t sequence); 873 bool no_wait, bool use_ticket,
874 struct ww_acquire_ctx *ticket)
875{
876 int ret;
823 877
824/** 878 WARN_ON(!atomic_read(&bo->kref.refcount));
825 * ttm_bo_reserve_slowpath_nolru:
826 * @bo: A pointer to a struct ttm_buffer_object.
827 * @interruptible: Sleep interruptible if waiting.
828 * @sequence: Set (@bo)->sequence to this value after lock
829 *
830 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
831 * from all our other reservations. Because there are no other reservations
832 * held by us, this function cannot deadlock any more.
833 *
834 * Will not remove reserved buffers from the lru lists.
835 * Otherwise identical to ttm_bo_reserve_slowpath.
836 */
837extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
838 bool interruptible,
839 uint32_t sequence);
840 879
880 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
881 ticket);
882 if (likely(ret == 0))
883 ttm_bo_del_sub_from_lru(bo);
884
885 return ret;
886}
841 887
842/** 888/**
843 * ttm_bo_reserve_slowpath: 889 * ttm_bo_reserve_slowpath:
@@ -849,54 +895,57 @@ extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
849 * from all our other reservations. Because there are no other reservations 895 * from all our other reservations. Because there are no other reservations
850 * held by us, this function cannot deadlock any more. 896 * held by us, this function cannot deadlock any more.
851 */ 897 */
852extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, 898static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
853 bool interruptible, uint32_t sequence); 899 bool interruptible,
900 struct ww_acquire_ctx *ticket)
901{
902 int ret = 0;
854 903
855/** 904 WARN_ON(!atomic_read(&bo->kref.refcount));
856 * ttm_bo_reserve_nolru: 905
857 * 906 if (interruptible)
858 * @bo: A pointer to a struct ttm_buffer_object. 907 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
859 * @interruptible: Sleep interruptible if waiting. 908 ticket);
860 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 909 else
861 * @use_sequence: If @bo is already reserved, Only sleep waiting for 910 ww_mutex_lock_slow(&bo->resv->lock, ticket);
862 * it to become unreserved if @sequence < (@bo)->sequence. 911
863 * 912 if (likely(ret == 0))
864 * Will not remove reserved buffers from the lru lists. 913 ttm_bo_del_sub_from_lru(bo);
865 * Otherwise identical to ttm_bo_reserve. 914 else if (ret == -EINTR)
866 * 915 ret = -ERESTARTSYS;
867 * Returns: 916
868 * -EAGAIN: The reservation may cause a deadlock. 917 return ret;
869 * Release all buffer reservations, wait for @bo to become unreserved and 918}
870 * try again. (only if use_sequence == 1).
871 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
872 * a signal. Release all buffer reservations and return to user-space.
873 * -EBUSY: The function needed to sleep, but @no_wait was true
874 * -EDEADLK: Bo already reserved using @sequence. This error code will only
875 * be returned if @use_sequence is set to true.
876 */
877extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
878 bool interruptible,
879 bool no_wait, bool use_sequence,
880 uint32_t sequence);
881 919
882/** 920/**
883 * ttm_bo_unreserve 921 * ttm_bo_unreserve_ticket
884 *
885 * @bo: A pointer to a struct ttm_buffer_object. 922 * @bo: A pointer to a struct ttm_buffer_object.
923 * @ticket: ww_acquire_ctx used for reserving
886 * 924 *
887 * Unreserve a previous reservation of @bo. 925 * Unreserve a previous reservation of @bo made with @ticket.
888 */ 926 */
889extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); 927static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
928 struct ww_acquire_ctx *t)
929{
930 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
931 spin_lock(&bo->glob->lru_lock);
932 ttm_bo_add_to_lru(bo);
933 spin_unlock(&bo->glob->lru_lock);
934 }
935 ww_mutex_unlock(&bo->resv->lock);
936}
890 937
891/** 938/**
892 * ttm_bo_unreserve_locked 939 * ttm_bo_unreserve
893 * 940 *
894 * @bo: A pointer to a struct ttm_buffer_object. 941 * @bo: A pointer to a struct ttm_buffer_object.
895 * 942 *
896 * Unreserve a previous reservation of @bo. 943 * Unreserve a previous reservation of @bo.
897 * Needs to be called with struct ttm_bo_global::lru_lock held.
898 */ 944 */
899extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo); 945static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
946{
947 ttm_bo_unreserve_ticket(bo, NULL);
948}
900 949
901/* 950/*
902 * ttm_bo_util.c 951 * ttm_bo_util.c
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 547e19f06e57..ec8a1d306510 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -57,17 +57,20 @@ struct ttm_validate_buffer {
57/** 57/**
58 * function ttm_eu_backoff_reservation 58 * function ttm_eu_backoff_reservation
59 * 59 *
60 * @ticket: ww_acquire_ctx from reserve call
60 * @list: thread private list of ttm_validate_buffer structs. 61 * @list: thread private list of ttm_validate_buffer structs.
61 * 62 *
62 * Undoes all buffer validation reservations for bos pointed to by 63 * Undoes all buffer validation reservations for bos pointed to by
63 * the list entries. 64 * the list entries.
64 */ 65 */
65 66
66extern void ttm_eu_backoff_reservation(struct list_head *list); 67extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
68 struct list_head *list);
67 69
68/** 70/**
69 * function ttm_eu_reserve_buffers 71 * function ttm_eu_reserve_buffers
70 * 72 *
73 * @ticket: [out] ww_acquire_ctx returned by call.
71 * @list: thread private list of ttm_validate_buffer structs. 74 * @list: thread private list of ttm_validate_buffer structs.
72 * 75 *
73 * Tries to reserve bos pointed to by the list entries for validation. 76 * Tries to reserve bos pointed to by the list entries for validation.
@@ -90,11 +93,13 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
90 * has failed. 93 * has failed.
91 */ 94 */
92 95
93extern int ttm_eu_reserve_buffers(struct list_head *list); 96extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
97 struct list_head *list);
94 98
95/** 99/**
96 * function ttm_eu_fence_buffer_objects. 100 * function ttm_eu_fence_buffer_objects.
97 * 101 *
102 * @ticket: ww_acquire_ctx from reserve call
98 * @list: thread private list of ttm_validate_buffer structs. 103 * @list: thread private list of ttm_validate_buffer structs.
99 * @sync_obj: The new sync object for the buffers. 104 * @sync_obj: The new sync object for the buffers.
100 * 105 *
@@ -104,6 +109,7 @@ extern int ttm_eu_reserve_buffers(struct list_head *list);
104 * 109 *
105 */ 110 */
106 111
107extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj); 112extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
113 struct list_head *list, void *sync_obj);
108 114
109#endif 115#endif
diff --git a/include/linux/io.h b/include/linux/io.h
index 069e4075f872..f4f42faec686 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -76,4 +76,29 @@ void devm_ioremap_release(struct device *dev, void *res);
76#define arch_has_dev_port() (1) 76#define arch_has_dev_port() (1)
77#endif 77#endif
78 78
79/*
80 * Some systems (x86 without PAT) have a somewhat reliable way to mark a
81 * physical address range such that uncached mappings will actually
82 * end up write-combining. This facility should be used in conjunction
83 * with pgprot_writecombine, ioremap-wc, or set_memory_wc, since it has
84 * no effect if the per-page mechanisms are functional.
85 * (On x86 without PAT, these functions manipulate MTRRs.)
86 *
87 * arch_phys_del_wc(0) or arch_phys_del_wc(any error code) is guaranteed
88 * to have no effect.
89 */
90#ifndef arch_phys_wc_add
91static inline int __must_check arch_phys_wc_add(unsigned long base,
92 unsigned long size)
93{
94 return 0; /* It worked (i.e. did nothing). */
95}
96
97static inline void arch_phys_wc_del(int handle)
98{
99}
100
101#define arch_phys_wc_add arch_phys_wc_add
102#endif
103
79#endif /* _LINUX_IO_H */ 104#endif /* _LINUX_IO_H */
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
index 731d77d6e155..4ac8b1977b73 100644
--- a/include/linux/mutex-debug.h
+++ b/include/linux/mutex-debug.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5#include <linux/lockdep.h> 5#include <linux/lockdep.h>
6#include <linux/debug_locks.h>
6 7
7/* 8/*
8 * Mutexes - debugging helpers: 9 * Mutexes - debugging helpers:
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 433da8a1a426..3793ed7feeeb 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -10,6 +10,7 @@
10#ifndef __LINUX_MUTEX_H 10#ifndef __LINUX_MUTEX_H
11#define __LINUX_MUTEX_H 11#define __LINUX_MUTEX_H
12 12
13#include <asm/current.h>
13#include <linux/list.h> 14#include <linux/list.h>
14#include <linux/spinlock_types.h> 15#include <linux/spinlock_types.h>
15#include <linux/linkage.h> 16#include <linux/linkage.h>
@@ -77,6 +78,40 @@ struct mutex_waiter {
77#endif 78#endif
78}; 79};
79 80
81struct ww_class {
82 atomic_long_t stamp;
83 struct lock_class_key acquire_key;
84 struct lock_class_key mutex_key;
85 const char *acquire_name;
86 const char *mutex_name;
87};
88
89struct ww_acquire_ctx {
90 struct task_struct *task;
91 unsigned long stamp;
92 unsigned acquired;
93#ifdef CONFIG_DEBUG_MUTEXES
94 unsigned done_acquire;
95 struct ww_class *ww_class;
96 struct ww_mutex *contending_lock;
97#endif
98#ifdef CONFIG_DEBUG_LOCK_ALLOC
99 struct lockdep_map dep_map;
100#endif
101#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
102 unsigned deadlock_inject_interval;
103 unsigned deadlock_inject_countdown;
104#endif
105};
106
107struct ww_mutex {
108 struct mutex base;
109 struct ww_acquire_ctx *ctx;
110#ifdef CONFIG_DEBUG_MUTEXES
111 struct ww_class *ww_class;
112#endif
113};
114
80#ifdef CONFIG_DEBUG_MUTEXES 115#ifdef CONFIG_DEBUG_MUTEXES
81# include <linux/mutex-debug.h> 116# include <linux/mutex-debug.h>
82#else 117#else
@@ -101,8 +136,11 @@ static inline void mutex_destroy(struct mutex *lock) {}
101#ifdef CONFIG_DEBUG_LOCK_ALLOC 136#ifdef CONFIG_DEBUG_LOCK_ALLOC
102# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ 137# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
103 , .dep_map = { .name = #lockname } 138 , .dep_map = { .name = #lockname }
139# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \
140 , .ww_class = &ww_class
104#else 141#else
105# define __DEP_MAP_MUTEX_INITIALIZER(lockname) 142# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
143# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class)
106#endif 144#endif
107 145
108#define __MUTEX_INITIALIZER(lockname) \ 146#define __MUTEX_INITIALIZER(lockname) \
@@ -112,13 +150,49 @@ static inline void mutex_destroy(struct mutex *lock) {}
112 __DEBUG_MUTEX_INITIALIZER(lockname) \ 150 __DEBUG_MUTEX_INITIALIZER(lockname) \
113 __DEP_MAP_MUTEX_INITIALIZER(lockname) } 151 __DEP_MAP_MUTEX_INITIALIZER(lockname) }
114 152
153#define __WW_CLASS_INITIALIZER(ww_class) \
154 { .stamp = ATOMIC_LONG_INIT(0) \
155 , .acquire_name = #ww_class "_acquire" \
156 , .mutex_name = #ww_class "_mutex" }
157
158#define __WW_MUTEX_INITIALIZER(lockname, class) \
159 { .base = { \__MUTEX_INITIALIZER(lockname) } \
160 __WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
161
115#define DEFINE_MUTEX(mutexname) \ 162#define DEFINE_MUTEX(mutexname) \
116 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) 163 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
117 164
165#define DEFINE_WW_CLASS(classname) \
166 struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
167
168#define DEFINE_WW_MUTEX(mutexname, ww_class) \
169 struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
170
171
118extern void __mutex_init(struct mutex *lock, const char *name, 172extern void __mutex_init(struct mutex *lock, const char *name,
119 struct lock_class_key *key); 173 struct lock_class_key *key);
120 174
121/** 175/**
176 * ww_mutex_init - initialize the w/w mutex
177 * @lock: the mutex to be initialized
178 * @ww_class: the w/w class the mutex should belong to
179 *
180 * Initialize the w/w mutex to unlocked state and associate it with the given
181 * class.
182 *
183 * It is not allowed to initialize an already locked mutex.
184 */
185static inline void ww_mutex_init(struct ww_mutex *lock,
186 struct ww_class *ww_class)
187{
188 __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
189 lock->ctx = NULL;
190#ifdef CONFIG_DEBUG_MUTEXES
191 lock->ww_class = ww_class;
192#endif
193}
194
195/**
122 * mutex_is_locked - is the mutex locked 196 * mutex_is_locked - is the mutex locked
123 * @lock: the mutex to be queried 197 * @lock: the mutex to be queried
124 * 198 *
@@ -136,6 +210,7 @@ static inline int mutex_is_locked(struct mutex *lock)
136#ifdef CONFIG_DEBUG_LOCK_ALLOC 210#ifdef CONFIG_DEBUG_LOCK_ALLOC
137extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 211extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
138extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); 212extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
213
139extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, 214extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
140 unsigned int subclass); 215 unsigned int subclass);
141extern int __must_check mutex_lock_killable_nested(struct mutex *lock, 216extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
@@ -147,7 +222,7 @@ extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
147 222
148#define mutex_lock_nest_lock(lock, nest_lock) \ 223#define mutex_lock_nest_lock(lock, nest_lock) \
149do { \ 224do { \
150 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ 225 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
151 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 226 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
152} while (0) 227} while (0)
153 228
@@ -170,6 +245,292 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
170 */ 245 */
171extern int mutex_trylock(struct mutex *lock); 246extern int mutex_trylock(struct mutex *lock);
172extern void mutex_unlock(struct mutex *lock); 247extern void mutex_unlock(struct mutex *lock);
248
249/**
250 * ww_acquire_init - initialize a w/w acquire context
251 * @ctx: w/w acquire context to initialize
252 * @ww_class: w/w class of the context
253 *
254 * Initializes an context to acquire multiple mutexes of the given w/w class.
255 *
256 * Context-based w/w mutex acquiring can be done in any order whatsoever within
257 * a given lock class. Deadlocks will be detected and handled with the
258 * wait/wound logic.
259 *
260 * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
261 * result in undetected deadlocks and is so forbidden. Mixing different contexts
262 * for the same w/w class when acquiring mutexes can also result in undetected
263 * deadlocks, and is hence also forbidden. Both types of abuse will be caught by
264 * enabling CONFIG_PROVE_LOCKING.
265 *
266 * Nesting of acquire contexts for _different_ w/w classes is possible, subject
267 * to the usual locking rules between different lock classes.
268 *
269 * An acquire context must be released with ww_acquire_fini by the same task
270 * before the memory is freed. It is recommended to allocate the context itself
271 * on the stack.
272 */
273static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
274 struct ww_class *ww_class)
275{
276 ctx->task = current;
277 ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
278 ctx->acquired = 0;
279#ifdef CONFIG_DEBUG_MUTEXES
280 ctx->ww_class = ww_class;
281 ctx->done_acquire = 0;
282 ctx->contending_lock = NULL;
283#endif
284#ifdef CONFIG_DEBUG_LOCK_ALLOC
285 debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
286 lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
287 &ww_class->acquire_key, 0);
288 mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
289#endif
290#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
291 ctx->deadlock_inject_interval = 1;
292 ctx->deadlock_inject_countdown = ctx->stamp & 0xf;
293#endif
294}
295
296/**
297 * ww_acquire_done - marks the end of the acquire phase
298 * @ctx: the acquire context
299 *
300 * Marks the end of the acquire phase, any further w/w mutex lock calls using
301 * this context are forbidden.
302 *
303 * Calling this function is optional, it is just useful to document w/w mutex
304 * code and clearly designated the acquire phase from actually using the locked
305 * data structures.
306 */
307static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
308{
309#ifdef CONFIG_DEBUG_MUTEXES
310 lockdep_assert_held(ctx);
311
312 DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
313 ctx->done_acquire = 1;
314#endif
315}
316
317/**
318 * ww_acquire_fini - releases a w/w acquire context
319 * @ctx: the acquire context to free
320 *
321 * Releases a w/w acquire context. This must be called _after_ all acquired w/w
322 * mutexes have been released with ww_mutex_unlock.
323 */
324static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
325{
326#ifdef CONFIG_DEBUG_MUTEXES
327 mutex_release(&ctx->dep_map, 0, _THIS_IP_);
328
329 DEBUG_LOCKS_WARN_ON(ctx->acquired);
330 if (!config_enabled(CONFIG_PROVE_LOCKING))
331 /*
332 * lockdep will normally handle this,
333 * but fail without anyway
334 */
335 ctx->done_acquire = 1;
336
337 if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
338 /* ensure ww_acquire_fini will still fail if called twice */
339 ctx->acquired = ~0U;
340#endif
341}
342
343extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
344 struct ww_acquire_ctx *ctx);
345extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
346 struct ww_acquire_ctx *ctx);
347
348/**
349 * ww_mutex_lock - acquire the w/w mutex
350 * @lock: the mutex to be acquired
351 * @ctx: w/w acquire context, or NULL to acquire only a single lock.
352 *
353 * Lock the w/w mutex exclusively for this task.
354 *
355 * Deadlocks within a given w/w class of locks are detected and handled with the
356 * wait/wound algorithm. If the lock isn't immediately avaiable this function
357 * will either sleep until it is (wait case). Or it selects the current context
358 * for backing off by returning -EDEADLK (wound case). Trying to acquire the
359 * same lock with the same context twice is also detected and signalled by
360 * returning -EALREADY. Returns 0 if the mutex was successfully acquired.
361 *
362 * In the wound case the caller must release all currently held w/w mutexes for
363 * the given context and then wait for this contending lock to be available by
364 * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
365 * lock and proceed with trying to acquire further w/w mutexes (e.g. when
366 * scanning through lru lists trying to free resources).
367 *
368 * The mutex must later on be released by the same task that
369 * acquired it. The task may not exit without first unlocking the mutex. Also,
370 * kernel memory where the mutex resides must not be freed with the mutex still
371 * locked. The mutex must first be initialized (or statically defined) before it
372 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
373 * of the same w/w lock class as was used to initialize the acquire context.
374 *
375 * A mutex acquired with this function must be released with ww_mutex_unlock.
376 */
377static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
378{
379 if (ctx)
380 return __ww_mutex_lock(lock, ctx);
381 else {
382 mutex_lock(&lock->base);
383 return 0;
384 }
385}
386
387/**
388 * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
389 * @lock: the mutex to be acquired
390 * @ctx: w/w acquire context
391 *
392 * Lock the w/w mutex exclusively for this task.
393 *
394 * Deadlocks within a given w/w class of locks are detected and handled with the
395 * wait/wound algorithm. If the lock isn't immediately avaiable this function
396 * will either sleep until it is (wait case). Or it selects the current context
397 * for backing off by returning -EDEADLK (wound case). Trying to acquire the
398 * same lock with the same context twice is also detected and signalled by
399 * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
400 * signal arrives while waiting for the lock then this function returns -EINTR.
401 *
402 * In the wound case the caller must release all currently held w/w mutexes for
403 * the given context and then wait for this contending lock to be available by
404 * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
405 * not acquire this lock and proceed with trying to acquire further w/w mutexes
406 * (e.g. when scanning through lru lists trying to free resources).
407 *
408 * The mutex must later on be released by the same task that
409 * acquired it. The task may not exit without first unlocking the mutex. Also,
410 * kernel memory where the mutex resides must not be freed with the mutex still
411 * locked. The mutex must first be initialized (or statically defined) before it
412 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
413 * of the same w/w lock class as was used to initialize the acquire context.
414 *
415 * A mutex acquired with this function must be released with ww_mutex_unlock.
416 */
417static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
418 struct ww_acquire_ctx *ctx)
419{
420 if (ctx)
421 return __ww_mutex_lock_interruptible(lock, ctx);
422 else
423 return mutex_lock_interruptible(&lock->base);
424}
425
426/**
427 * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
428 * @lock: the mutex to be acquired
429 * @ctx: w/w acquire context
430 *
431 * Acquires a w/w mutex with the given context after a wound case. This function
432 * will sleep until the lock becomes available.
433 *
434 * The caller must have released all w/w mutexes already acquired with the
435 * context and then call this function on the contended lock.
436 *
437 * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
438 * needs with ww_mutex_lock. Note that the -EALREADY return code from
439 * ww_mutex_lock can be used to avoid locking this contended mutex twice.
440 *
441 * It is forbidden to call this function with any other w/w mutexes associated
442 * with the context held. It is forbidden to call this on anything else than the
443 * contending mutex.
444 *
445 * Note that the slowpath lock acquiring can also be done by calling
446 * ww_mutex_lock directly. This function here is simply to help w/w mutex
447 * locking code readability by clearly denoting the slowpath.
448 */
449static inline void
450ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
451{
452 int ret;
453#ifdef CONFIG_DEBUG_MUTEXES
454 DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
455#endif
456 ret = ww_mutex_lock(lock, ctx);
457 (void)ret;
458}
459
460/**
461 * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex,
462 * interruptible
463 * @lock: the mutex to be acquired
464 * @ctx: w/w acquire context
465 *
466 * Acquires a w/w mutex with the given context after a wound case. This function
467 * will sleep until the lock becomes available and returns 0 when the lock has
468 * been acquired. If a signal arrives while waiting for the lock then this
469 * function returns -EINTR.
470 *
471 * The caller must have released all w/w mutexes already acquired with the
472 * context and then call this function on the contended lock.
473 *
474 * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
475 * needs with ww_mutex_lock. Note that the -EALREADY return code from
476 * ww_mutex_lock can be used to avoid locking this contended mutex twice.
477 *
478 * It is forbidden to call this function with any other w/w mutexes associated
479 * with the given context held. It is forbidden to call this on anything else
480 * than the contending mutex.
481 *
482 * Note that the slowpath lock acquiring can also be done by calling
483 * ww_mutex_lock_interruptible directly. This function here is simply to help
484 * w/w mutex locking code readability by clearly denoting the slowpath.
485 */
486static inline int __must_check
487ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
488 struct ww_acquire_ctx *ctx)
489{
490#ifdef CONFIG_DEBUG_MUTEXES
491 DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
492#endif
493 return ww_mutex_lock_interruptible(lock, ctx);
494}
495
496extern void ww_mutex_unlock(struct ww_mutex *lock);
497
498/**
499 * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
500 * @lock: mutex to lock
501 *
502 * Trylocks a mutex without acquire context, so no deadlock detection is
503 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
504 */
505static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
506{
507 return mutex_trylock(&lock->base);
508}
509
510/***
511 * ww_mutex_destroy - mark a w/w mutex unusable
512 * @lock: the mutex to be destroyed
513 *
514 * This function marks the mutex uninitialized, and any subsequent
515 * use of the mutex is forbidden. The mutex must not be locked when
516 * this function is called.
517 */
518static inline void ww_mutex_destroy(struct ww_mutex *lock)
519{
520 mutex_destroy(&lock->base);
521}
522
523/**
524 * ww_mutex_is_locked - is the w/w mutex locked
525 * @lock: the mutex to be queried
526 *
527 * Returns 1 if the mutex is locked, 0 if unlocked.
528 */
529static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
530{
531 return mutex_is_locked(&lock->base);
532}
533
173extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); 534extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
174 535
175#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX 536#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h
new file mode 100644
index 000000000000..80587fdbba3e
--- /dev/null
+++ b/include/linux/platform_data/rcar-du.h
@@ -0,0 +1,54 @@
1/*
2 * rcar_du.h -- R-Car Display Unit DRM driver
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_H__
15#define __RCAR_DU_H__
16
17#include <drm/drm_mode.h>
18
19enum rcar_du_encoder_type {
20 RCAR_DU_ENCODER_UNUSED = 0,
21 RCAR_DU_ENCODER_VGA,
22 RCAR_DU_ENCODER_LVDS,
23};
24
25struct rcar_du_panel_data {
26 unsigned int width_mm; /* Panel width in mm */
27 unsigned int height_mm; /* Panel height in mm */
28 struct drm_mode_modeinfo mode;
29};
30
31struct rcar_du_encoder_lvds_data {
32 struct rcar_du_panel_data panel;
33};
34
35struct rcar_du_encoder_vga_data {
36 /* TODO: Add DDC information for EDID retrieval */
37};
38
39struct rcar_du_encoder_data {
40 enum rcar_du_encoder_type encoder;
41 unsigned int output;
42
43 union {
44 struct rcar_du_encoder_lvds_data lvds;
45 struct rcar_du_encoder_vga_data vga;
46 } u;
47};
48
49struct rcar_du_platform_data {
50 struct rcar_du_encoder_data *encoders;
51 unsigned int num_encoders;
52};
53
54#endif /* __RCAR_DU_H__ */
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
new file mode 100644
index 000000000000..e9ee806a9d72
--- /dev/null
+++ b/include/linux/reservation.h
@@ -0,0 +1,62 @@
1/*
2 * Header file for reservations for dma-buf and ttm
3 *
4 * Copyright(C) 2011 Linaro Limited. All rights reserved.
5 * Copyright (C) 2012-2013 Canonical Ltd
6 * Copyright (C) 2012 Texas Instruments
7 *
8 * Authors:
9 * Rob Clark <rob.clark@linaro.org>
10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11 * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
12 *
13 * Based on bo.c which bears the following copyright notice,
14 * but is dual licensed:
15 *
16 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
17 * All Rights Reserved.
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a
20 * copy of this software and associated documentation files (the
21 * "Software"), to deal in the Software without restriction, including
22 * without limitation the rights to use, copy, modify, merge, publish,
23 * distribute, sub license, and/or sell copies of the Software, and to
24 * permit persons to whom the Software is furnished to do so, subject to
25 * the following conditions:
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
36 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37 * USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39#ifndef _LINUX_RESERVATION_H
40#define _LINUX_RESERVATION_H
41
42#include <linux/mutex.h>
43
44extern struct ww_class reservation_ww_class;
45
46struct reservation_object {
47 struct ww_mutex lock;
48};
49
50static inline void
51reservation_object_init(struct reservation_object *obj)
52{
53 ww_mutex_init(&obj->lock, &reservation_ww_class);
54}
55
56static inline void
57reservation_object_fini(struct reservation_object *obj)
58{
59 ww_mutex_destroy(&obj->lock);
60}
61
62#endif /* _LINUX_RESERVATION_H */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 5a57be68bab7..238a166b9fe6 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -732,6 +732,7 @@ struct drm_prime_handle {
732#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2) 732#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
733#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties) 733#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
734#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) 734#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
735#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
735 736
736/** 737/**
737 * Device specific ioctls should only be in their respective headers 738 * Device specific ioctls should only be in their respective headers
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 090e5331ab7e..53db7cea373b 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -388,6 +388,19 @@ struct drm_mode_cursor {
388 __u32 handle; 388 __u32 handle;
389}; 389};
390 390
391struct drm_mode_cursor2 {
392 __u32 flags;
393 __u32 crtc_id;
394 __s32 x;
395 __s32 y;
396 __u32 width;
397 __u32 height;
398 /* driver specific handle */
399 __u32 handle;
400 __s32 hot_x;
401 __s32 hot_y;
402};
403
391struct drm_mode_crtc_lut { 404struct drm_mode_crtc_lut {
392 __u32 crtc_id; 405 __u32 crtc_id;
393 __u32 gamma_size; 406 __u32 gamma_size;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 07d59419fe6b..923ed7fe5775 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -305,7 +305,7 @@ typedef struct drm_i915_irq_wait {
305#define I915_PARAM_HAS_WAIT_TIMEOUT 19 305#define I915_PARAM_HAS_WAIT_TIMEOUT 19
306#define I915_PARAM_HAS_SEMAPHORES 20 306#define I915_PARAM_HAS_SEMAPHORES 20
307#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 307#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
308#define I915_PARAM_RSVD_FOR_FUTURE_USE 22 308#define I915_PARAM_HAS_VEBOX 22
309#define I915_PARAM_HAS_SECURE_BATCHES 23 309#define I915_PARAM_HAS_SECURE_BATCHES 23
310#define I915_PARAM_HAS_PINNED_BATCHES 24 310#define I915_PARAM_HAS_PINNED_BATCHES 24
311#define I915_PARAM_HAS_EXEC_NO_RELOC 25 311#define I915_PARAM_HAS_EXEC_NO_RELOC 25
@@ -660,6 +660,7 @@ struct drm_i915_gem_execbuffer2 {
660#define I915_EXEC_RENDER (1<<0) 660#define I915_EXEC_RENDER (1<<0)
661#define I915_EXEC_BSD (2<<0) 661#define I915_EXEC_BSD (2<<0)
662#define I915_EXEC_BLT (3<<0) 662#define I915_EXEC_BLT (3<<0)
663#define I915_EXEC_VEBOX (4<<0)
663 664
664/* Used for switching the constants addressing mode on gen4+ RENDER ring. 665/* Used for switching the constants addressing mode on gen4+ RENDER ring.
665 * Gen6+ only supports relative addressing to dynamic state (default) and 666 * Gen6+ only supports relative addressing to dynamic state (default) and
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index 6e132a2f7420..73bde4eaf16c 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -17,6 +17,8 @@
17#ifndef _UAPI_TEGRA_DRM_H_ 17#ifndef _UAPI_TEGRA_DRM_H_
18#define _UAPI_TEGRA_DRM_H_ 18#define _UAPI_TEGRA_DRM_H_
19 19
20#include <drm/drm.h>
21
20struct drm_tegra_gem_create { 22struct drm_tegra_gem_create {
21 __u64 size; 23 __u64 size;
22 __u32 flags; 24 __u32 flags;
diff --git a/include/video/display_timing.h b/include/video/display_timing.h
index 5d0259b08e01..28d9d0d566ca 100644
--- a/include/video/display_timing.h
+++ b/include/video/display_timing.h
@@ -27,6 +27,7 @@ enum display_flags {
27 DISPLAY_FLAGS_PIXDATA_NEGEDGE = BIT(7), 27 DISPLAY_FLAGS_PIXDATA_NEGEDGE = BIT(7),
28 DISPLAY_FLAGS_INTERLACED = BIT(8), 28 DISPLAY_FLAGS_INTERLACED = BIT(8),
29 DISPLAY_FLAGS_DOUBLESCAN = BIT(9), 29 DISPLAY_FLAGS_DOUBLESCAN = BIT(9),
30 DISPLAY_FLAGS_DOUBLECLK = BIT(10),
30}; 31};
31 32
32/* 33/*
diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
index 1a91850cb961..30f53625415c 100644
--- a/include/video/uvesafb.h
+++ b/include/video/uvesafb.h
@@ -134,6 +134,7 @@ struct uvesafb_par {
134 134
135 int mode_idx; 135 int mode_idx;
136 struct vbe_crtc_ib crtc; 136 struct vbe_crtc_ib crtc;
137 int mtrr_handle;
137}; 138};
138 139
139#endif /* _UVESAFB_H */ 140#endif /* _UVESAFB_H */
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ad53a664f113..e581ada5faf4 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -254,16 +254,165 @@ void __sched mutex_unlock(struct mutex *lock)
254 254
255EXPORT_SYMBOL(mutex_unlock); 255EXPORT_SYMBOL(mutex_unlock);
256 256
257/**
258 * ww_mutex_unlock - release the w/w mutex
259 * @lock: the mutex to be released
260 *
261 * Unlock a mutex that has been locked by this task previously with any of the
262 * ww_mutex_lock* functions (with or without an acquire context). It is
263 * forbidden to release the locks after releasing the acquire context.
264 *
265 * This function must not be used in interrupt context. Unlocking
266 * of a unlocked mutex is not allowed.
267 */
268void __sched ww_mutex_unlock(struct ww_mutex *lock)
269{
270 /*
271 * The unlocking fastpath is the 0->1 transition from 'locked'
272 * into 'unlocked' state:
273 */
274 if (lock->ctx) {
275#ifdef CONFIG_DEBUG_MUTEXES
276 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
277#endif
278 if (lock->ctx->acquired > 0)
279 lock->ctx->acquired--;
280 lock->ctx = NULL;
281 }
282
283#ifndef CONFIG_DEBUG_MUTEXES
284 /*
285 * When debugging is enabled we must not clear the owner before time,
286 * the slow path will always be taken, and that clears the owner field
287 * after verifying that it was indeed current.
288 */
289 mutex_clear_owner(&lock->base);
290#endif
291 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
292}
293EXPORT_SYMBOL(ww_mutex_unlock);
294
295static inline int __sched
296__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
297{
298 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
299 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
300
301 if (!hold_ctx)
302 return 0;
303
304 if (unlikely(ctx == hold_ctx))
305 return -EALREADY;
306
307 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
308 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
309#ifdef CONFIG_DEBUG_MUTEXES
310 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
311 ctx->contending_lock = ww;
312#endif
313 return -EDEADLK;
314 }
315
316 return 0;
317}
318
319static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
320 struct ww_acquire_ctx *ww_ctx)
321{
322#ifdef CONFIG_DEBUG_MUTEXES
323 /*
324 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
325 * but released with a normal mutex_unlock in this call.
326 *
327 * This should never happen, always use ww_mutex_unlock.
328 */
329 DEBUG_LOCKS_WARN_ON(ww->ctx);
330
331 /*
332 * Not quite done after calling ww_acquire_done() ?
333 */
334 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
335
336 if (ww_ctx->contending_lock) {
337 /*
338 * After -EDEADLK you tried to
339 * acquire a different ww_mutex? Bad!
340 */
341 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
342
343 /*
344 * You called ww_mutex_lock after receiving -EDEADLK,
345 * but 'forgot' to unlock everything else first?
346 */
347 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
348 ww_ctx->contending_lock = NULL;
349 }
350
351 /*
352 * Naughty, using a different class will lead to undefined behavior!
353 */
354 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
355#endif
356 ww_ctx->acquired++;
357}
358
359/*
360 * after acquiring lock with fastpath or when we lost out in contested
361 * slowpath, set ctx and wake up any waiters so they can recheck.
362 *
363 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
364 * as the fastpath and opportunistic spinning are disabled in that case.
365 */
366static __always_inline void
367ww_mutex_set_context_fastpath(struct ww_mutex *lock,
368 struct ww_acquire_ctx *ctx)
369{
370 unsigned long flags;
371 struct mutex_waiter *cur;
372
373 ww_mutex_lock_acquired(lock, ctx);
374
375 lock->ctx = ctx;
376
377 /*
378 * The lock->ctx update should be visible on all cores before
379 * the atomic read is done, otherwise contended waiters might be
380 * missed. The contended waiters will either see ww_ctx == NULL
381 * and keep spinning, or it will acquire wait_lock, add itself
382 * to waiter list and sleep.
383 */
384 smp_mb(); /* ^^^ */
385
386 /*
387 * Check if lock is contended, if not there is nobody to wake up
388 */
389 if (likely(atomic_read(&lock->base.count) == 0))
390 return;
391
392 /*
393 * Uh oh, we raced in fastpath, wake up everyone in this case,
394 * so they can see the new lock->ctx.
395 */
396 spin_lock_mutex(&lock->base.wait_lock, flags);
397 list_for_each_entry(cur, &lock->base.wait_list, list) {
398 debug_mutex_wake_waiter(&lock->base, cur);
399 wake_up_process(cur->task);
400 }
401 spin_unlock_mutex(&lock->base.wait_lock, flags);
402}
403
257/* 404/*
258 * Lock a mutex (possibly interruptible), slowpath: 405 * Lock a mutex (possibly interruptible), slowpath:
259 */ 406 */
260static inline int __sched 407static __always_inline int __sched
261__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 408__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
262 struct lockdep_map *nest_lock, unsigned long ip) 409 struct lockdep_map *nest_lock, unsigned long ip,
410 struct ww_acquire_ctx *ww_ctx)
263{ 411{
264 struct task_struct *task = current; 412 struct task_struct *task = current;
265 struct mutex_waiter waiter; 413 struct mutex_waiter waiter;
266 unsigned long flags; 414 unsigned long flags;
415 int ret;
267 416
268 preempt_disable(); 417 preempt_disable();
269 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 418 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
@@ -298,6 +447,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
298 struct task_struct *owner; 447 struct task_struct *owner;
299 struct mspin_node node; 448 struct mspin_node node;
300 449
450 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
451 struct ww_mutex *ww;
452
453 ww = container_of(lock, struct ww_mutex, base);
454 /*
455 * If ww->ctx is set the contents are undefined, only
456 * by acquiring wait_lock there is a guarantee that
457 * they are not invalid when reading.
458 *
459 * As such, when deadlock detection needs to be
460 * performed the optimistic spinning cannot be done.
461 */
462 if (ACCESS_ONCE(ww->ctx))
463 break;
464 }
465
301 /* 466 /*
302 * If there's an owner, wait for it to either 467 * If there's an owner, wait for it to either
303 * release the lock or go to sleep. 468 * release the lock or go to sleep.
@@ -312,6 +477,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
312 if ((atomic_read(&lock->count) == 1) && 477 if ((atomic_read(&lock->count) == 1) &&
313 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { 478 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
314 lock_acquired(&lock->dep_map, ip); 479 lock_acquired(&lock->dep_map, ip);
480 if (!__builtin_constant_p(ww_ctx == NULL)) {
481 struct ww_mutex *ww;
482 ww = container_of(lock, struct ww_mutex, base);
483
484 ww_mutex_set_context_fastpath(ww, ww_ctx);
485 }
486
315 mutex_set_owner(lock); 487 mutex_set_owner(lock);
316 mspin_unlock(MLOCK(lock), &node); 488 mspin_unlock(MLOCK(lock), &node);
317 preempt_enable(); 489 preempt_enable();
@@ -371,15 +543,16 @@ slowpath:
371 * TASK_UNINTERRUPTIBLE case.) 543 * TASK_UNINTERRUPTIBLE case.)
372 */ 544 */
373 if (unlikely(signal_pending_state(state, task))) { 545 if (unlikely(signal_pending_state(state, task))) {
374 mutex_remove_waiter(lock, &waiter, 546 ret = -EINTR;
375 task_thread_info(task)); 547 goto err;
376 mutex_release(&lock->dep_map, 1, ip); 548 }
377 spin_unlock_mutex(&lock->wait_lock, flags);
378 549
379 debug_mutex_free_waiter(&waiter); 550 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
380 preempt_enable(); 551 ret = __mutex_lock_check_stamp(lock, ww_ctx);
381 return -EINTR; 552 if (ret)
553 goto err;
382 } 554 }
555
383 __set_task_state(task, state); 556 __set_task_state(task, state);
384 557
385 /* didn't get the lock, go to sleep: */ 558 /* didn't get the lock, go to sleep: */
@@ -394,6 +567,30 @@ done:
394 mutex_remove_waiter(lock, &waiter, current_thread_info()); 567 mutex_remove_waiter(lock, &waiter, current_thread_info());
395 mutex_set_owner(lock); 568 mutex_set_owner(lock);
396 569
570 if (!__builtin_constant_p(ww_ctx == NULL)) {
571 struct ww_mutex *ww = container_of(lock,
572 struct ww_mutex,
573 base);
574 struct mutex_waiter *cur;
575
576 /*
577 * This branch gets optimized out for the common case,
578 * and is only important for ww_mutex_lock.
579 */
580
581 ww_mutex_lock_acquired(ww, ww_ctx);
582 ww->ctx = ww_ctx;
583
584 /*
585 * Give any possible sleeping processes the chance to wake up,
586 * so they can recheck if they have to back off.
587 */
588 list_for_each_entry(cur, &lock->wait_list, list) {
589 debug_mutex_wake_waiter(lock, cur);
590 wake_up_process(cur->task);
591 }
592 }
593
397 /* set it to 0 if there are no waiters left: */ 594 /* set it to 0 if there are no waiters left: */
398 if (likely(list_empty(&lock->wait_list))) 595 if (likely(list_empty(&lock->wait_list)))
399 atomic_set(&lock->count, 0); 596 atomic_set(&lock->count, 0);
@@ -404,6 +601,14 @@ done:
404 preempt_enable(); 601 preempt_enable();
405 602
406 return 0; 603 return 0;
604
605err:
606 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
607 spin_unlock_mutex(&lock->wait_lock, flags);
608 debug_mutex_free_waiter(&waiter);
609 mutex_release(&lock->dep_map, 1, ip);
610 preempt_enable();
611 return ret;
407} 612}
408 613
409#ifdef CONFIG_DEBUG_LOCK_ALLOC 614#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -411,7 +616,8 @@ void __sched
411mutex_lock_nested(struct mutex *lock, unsigned int subclass) 616mutex_lock_nested(struct mutex *lock, unsigned int subclass)
412{ 617{
413 might_sleep(); 618 might_sleep();
414 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 619 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
620 subclass, NULL, _RET_IP_, NULL);
415} 621}
416 622
417EXPORT_SYMBOL_GPL(mutex_lock_nested); 623EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -420,7 +626,8 @@ void __sched
420_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 626_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
421{ 627{
422 might_sleep(); 628 might_sleep();
423 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 629 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
630 0, nest, _RET_IP_, NULL);
424} 631}
425 632
426EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 633EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
@@ -429,7 +636,8 @@ int __sched
429mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 636mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
430{ 637{
431 might_sleep(); 638 might_sleep();
432 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); 639 return __mutex_lock_common(lock, TASK_KILLABLE,
640 subclass, NULL, _RET_IP_, NULL);
433} 641}
434EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 642EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
435 643
@@ -438,10 +646,68 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
438{ 646{
439 might_sleep(); 647 might_sleep();
440 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 648 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
441 subclass, NULL, _RET_IP_); 649 subclass, NULL, _RET_IP_, NULL);
442} 650}
443 651
444EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 652EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
653
654static inline int
655ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
656{
657#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
658 unsigned tmp;
659
660 if (ctx->deadlock_inject_countdown-- == 0) {
661 tmp = ctx->deadlock_inject_interval;
662 if (tmp > UINT_MAX/4)
663 tmp = UINT_MAX;
664 else
665 tmp = tmp*2 + tmp + tmp/2;
666
667 ctx->deadlock_inject_interval = tmp;
668 ctx->deadlock_inject_countdown = tmp;
669 ctx->contending_lock = lock;
670
671 ww_mutex_unlock(lock);
672
673 return -EDEADLK;
674 }
675#endif
676
677 return 0;
678}
679
680int __sched
681__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
682{
683 int ret;
684
685 might_sleep();
686 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
687 0, &ctx->dep_map, _RET_IP_, ctx);
688 if (!ret && ctx->acquired > 0)
689 return ww_mutex_deadlock_injection(lock, ctx);
690
691 return ret;
692}
693EXPORT_SYMBOL_GPL(__ww_mutex_lock);
694
695int __sched
696__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
697{
698 int ret;
699
700 might_sleep();
701 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
702 0, &ctx->dep_map, _RET_IP_, ctx);
703
704 if (!ret && ctx->acquired > 0)
705 return ww_mutex_deadlock_injection(lock, ctx);
706
707 return ret;
708}
709EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
710
445#endif 711#endif
446 712
447/* 713/*
@@ -494,10 +760,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
494 * mutex_lock_interruptible() and mutex_trylock(). 760 * mutex_lock_interruptible() and mutex_trylock().
495 */ 761 */
496static noinline int __sched 762static noinline int __sched
497__mutex_lock_killable_slowpath(atomic_t *lock_count); 763__mutex_lock_killable_slowpath(struct mutex *lock);
498 764
499static noinline int __sched 765static noinline int __sched
500__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 766__mutex_lock_interruptible_slowpath(struct mutex *lock);
501 767
502/** 768/**
503 * mutex_lock_interruptible - acquire the mutex, interruptible 769 * mutex_lock_interruptible - acquire the mutex, interruptible
@@ -515,12 +781,12 @@ int __sched mutex_lock_interruptible(struct mutex *lock)
515 int ret; 781 int ret;
516 782
517 might_sleep(); 783 might_sleep();
518 ret = __mutex_fastpath_lock_retval 784 ret = __mutex_fastpath_lock_retval(&lock->count);
519 (&lock->count, __mutex_lock_interruptible_slowpath); 785 if (likely(!ret)) {
520 if (!ret)
521 mutex_set_owner(lock); 786 mutex_set_owner(lock);
522 787 return 0;
523 return ret; 788 } else
789 return __mutex_lock_interruptible_slowpath(lock);
524} 790}
525 791
526EXPORT_SYMBOL(mutex_lock_interruptible); 792EXPORT_SYMBOL(mutex_lock_interruptible);
@@ -530,12 +796,12 @@ int __sched mutex_lock_killable(struct mutex *lock)
530 int ret; 796 int ret;
531 797
532 might_sleep(); 798 might_sleep();
533 ret = __mutex_fastpath_lock_retval 799 ret = __mutex_fastpath_lock_retval(&lock->count);
534 (&lock->count, __mutex_lock_killable_slowpath); 800 if (likely(!ret)) {
535 if (!ret)
536 mutex_set_owner(lock); 801 mutex_set_owner(lock);
537 802 return 0;
538 return ret; 803 } else
804 return __mutex_lock_killable_slowpath(lock);
539} 805}
540EXPORT_SYMBOL(mutex_lock_killable); 806EXPORT_SYMBOL(mutex_lock_killable);
541 807
@@ -544,24 +810,39 @@ __mutex_lock_slowpath(atomic_t *lock_count)
544{ 810{
545 struct mutex *lock = container_of(lock_count, struct mutex, count); 811 struct mutex *lock = container_of(lock_count, struct mutex, count);
546 812
547 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 813 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
814 NULL, _RET_IP_, NULL);
548} 815}
549 816
550static noinline int __sched 817static noinline int __sched
551__mutex_lock_killable_slowpath(atomic_t *lock_count) 818__mutex_lock_killable_slowpath(struct mutex *lock)
552{ 819{
553 struct mutex *lock = container_of(lock_count, struct mutex, count); 820 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
821 NULL, _RET_IP_, NULL);
822}
554 823
555 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 824static noinline int __sched
825__mutex_lock_interruptible_slowpath(struct mutex *lock)
826{
827 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
828 NULL, _RET_IP_, NULL);
556} 829}
557 830
558static noinline int __sched 831static noinline int __sched
559__mutex_lock_interruptible_slowpath(atomic_t *lock_count) 832__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
560{ 833{
561 struct mutex *lock = container_of(lock_count, struct mutex, count); 834 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
835 NULL, _RET_IP_, ctx);
836}
562 837
563 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 838static noinline int __sched
839__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
840 struct ww_acquire_ctx *ctx)
841{
842 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
843 NULL, _RET_IP_, ctx);
564} 844}
845
565#endif 846#endif
566 847
567/* 848/*
@@ -617,6 +898,45 @@ int __sched mutex_trylock(struct mutex *lock)
617} 898}
618EXPORT_SYMBOL(mutex_trylock); 899EXPORT_SYMBOL(mutex_trylock);
619 900
901#ifndef CONFIG_DEBUG_LOCK_ALLOC
902int __sched
903__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
904{
905 int ret;
906
907 might_sleep();
908
909 ret = __mutex_fastpath_lock_retval(&lock->base.count);
910
911 if (likely(!ret)) {
912 ww_mutex_set_context_fastpath(lock, ctx);
913 mutex_set_owner(&lock->base);
914 } else
915 ret = __ww_mutex_lock_slowpath(lock, ctx);
916 return ret;
917}
918EXPORT_SYMBOL(__ww_mutex_lock);
919
920int __sched
921__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
922{
923 int ret;
924
925 might_sleep();
926
927 ret = __mutex_fastpath_lock_retval(&lock->base.count);
928
929 if (likely(!ret)) {
930 ww_mutex_set_context_fastpath(lock, ctx);
931 mutex_set_owner(&lock->base);
932 } else
933 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
934 return ret;
935}
936EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
937
938#endif
939
620/** 940/**
621 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 941 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
622 * @cnt: the atomic which we are to dec 942 * @cnt: the atomic which we are to dec
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 566cf2bc08ea..7154f799541a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -547,6 +547,19 @@ config DEBUG_MUTEXES
547 This feature allows mutex semantics violations to be detected and 547 This feature allows mutex semantics violations to be detected and
548 reported. 548 reported.
549 549
550config DEBUG_WW_MUTEX_SLOWPATH
551 bool "Wait/wound mutex debugging: Slowpath testing"
552 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
553 select DEBUG_LOCK_ALLOC
554 select DEBUG_SPINLOCK
555 select DEBUG_MUTEXES
556 help
557 This feature enables slowpath testing for w/w mutex users by
558 injecting additional -EDEADLK wound/backoff cases. Together with
559 the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
560 will test all possible w/w mutex interface abuse with the
561 exception of simply not acquiring all the required locks.
562
550config DEBUG_LOCK_ALLOC 563config DEBUG_LOCK_ALLOC
551 bool "Lock debugging: detect incorrect freeing of live locks" 564 bool "Lock debugging: detect incorrect freeing of live locks"
552 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 565 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index f2fa60c59343..96c4c633d95e 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -30,6 +30,7 @@ EXPORT_SYMBOL_GPL(debug_locks);
30 * a locking bug is detected. 30 * a locking bug is detected.
31 */ 31 */
32int debug_locks_silent; 32int debug_locks_silent;
33EXPORT_SYMBOL_GPL(debug_locks_silent);
33 34
34/* 35/*
35 * Generic 'turn off all lock debugging' function: 36 * Generic 'turn off all lock debugging' function:
@@ -44,3 +45,4 @@ int debug_locks_off(void)
44 } 45 }
45 return 0; 46 return 0;
46} 47}
48EXPORT_SYMBOL_GPL(debug_locks_off);
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index c3eb261a7df3..aad024dde3c4 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -26,6 +26,8 @@
26 */ 26 */
27static unsigned int debug_locks_verbose; 27static unsigned int debug_locks_verbose;
28 28
29static DEFINE_WW_CLASS(ww_lockdep);
30
29static int __init setup_debug_locks_verbose(char *str) 31static int __init setup_debug_locks_verbose(char *str)
30{ 32{
31 get_option(&str, &debug_locks_verbose); 33 get_option(&str, &debug_locks_verbose);
@@ -42,6 +44,10 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
42#define LOCKTYPE_RWLOCK 0x2 44#define LOCKTYPE_RWLOCK 0x2
43#define LOCKTYPE_MUTEX 0x4 45#define LOCKTYPE_MUTEX 0x4
44#define LOCKTYPE_RWSEM 0x8 46#define LOCKTYPE_RWSEM 0x8
47#define LOCKTYPE_WW 0x10
48
49static struct ww_acquire_ctx t, t2;
50static struct ww_mutex o, o2, o3;
45 51
46/* 52/*
47 * Normal standalone locks, for the circular and irq-context 53 * Normal standalone locks, for the circular and irq-context
@@ -193,6 +199,20 @@ static void init_shared_classes(void)
193#define RSU(x) up_read(&rwsem_##x) 199#define RSU(x) up_read(&rwsem_##x)
194#define RWSI(x) init_rwsem(&rwsem_##x) 200#define RWSI(x) init_rwsem(&rwsem_##x)
195 201
202#ifndef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
203#define WWAI(x) ww_acquire_init(x, &ww_lockdep)
204#else
205#define WWAI(x) do { ww_acquire_init(x, &ww_lockdep); (x)->deadlock_inject_countdown = ~0U; } while (0)
206#endif
207#define WWAD(x) ww_acquire_done(x)
208#define WWAF(x) ww_acquire_fini(x)
209
210#define WWL(x, c) ww_mutex_lock(x, c)
211#define WWT(x) ww_mutex_trylock(x)
212#define WWL1(x) ww_mutex_lock(x, NULL)
213#define WWU(x) ww_mutex_unlock(x)
214
215
196#define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x) 216#define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x)
197 217
198/* 218/*
@@ -894,11 +914,13 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
894# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map) 914# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
895# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) 915# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
896# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) 916# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
917# define I_WW(x) lockdep_reset_lock(&x.dep_map)
897#else 918#else
898# define I_SPINLOCK(x) 919# define I_SPINLOCK(x)
899# define I_RWLOCK(x) 920# define I_RWLOCK(x)
900# define I_MUTEX(x) 921# define I_MUTEX(x)
901# define I_RWSEM(x) 922# define I_RWSEM(x)
923# define I_WW(x)
902#endif 924#endif
903 925
904#define I1(x) \ 926#define I1(x) \
@@ -920,11 +942,20 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
920static void reset_locks(void) 942static void reset_locks(void)
921{ 943{
922 local_irq_disable(); 944 local_irq_disable();
945 lockdep_free_key_range(&ww_lockdep.acquire_key, 1);
946 lockdep_free_key_range(&ww_lockdep.mutex_key, 1);
947
923 I1(A); I1(B); I1(C); I1(D); 948 I1(A); I1(B); I1(C); I1(D);
924 I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2); 949 I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
950 I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
925 lockdep_reset(); 951 lockdep_reset();
926 I2(A); I2(B); I2(C); I2(D); 952 I2(A); I2(B); I2(C); I2(D);
927 init_shared_classes(); 953 init_shared_classes();
954
955 ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
956 memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
957 memset(&ww_lockdep.acquire_key, 0, sizeof(ww_lockdep.acquire_key));
958 memset(&ww_lockdep.mutex_key, 0, sizeof(ww_lockdep.mutex_key));
928 local_irq_enable(); 959 local_irq_enable();
929} 960}
930 961
@@ -938,7 +969,6 @@ static int unexpected_testcase_failures;
938static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) 969static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
939{ 970{
940 unsigned long saved_preempt_count = preempt_count(); 971 unsigned long saved_preempt_count = preempt_count();
941 int expected_failure = 0;
942 972
943 WARN_ON(irqs_disabled()); 973 WARN_ON(irqs_disabled());
944 974
@@ -947,25 +977,17 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
947 * Filter out expected failures: 977 * Filter out expected failures:
948 */ 978 */
949#ifndef CONFIG_PROVE_LOCKING 979#ifndef CONFIG_PROVE_LOCKING
950 if ((lockclass_mask & LOCKTYPE_SPIN) && debug_locks != expected) 980 if (expected == FAILURE && debug_locks) {
951 expected_failure = 1; 981 expected_testcase_failures++;
952 if ((lockclass_mask & LOCKTYPE_RWLOCK) && debug_locks != expected) 982 printk("failed|");
953 expected_failure = 1; 983 }
954 if ((lockclass_mask & LOCKTYPE_MUTEX) && debug_locks != expected) 984 else
955 expected_failure = 1;
956 if ((lockclass_mask & LOCKTYPE_RWSEM) && debug_locks != expected)
957 expected_failure = 1;
958#endif 985#endif
959 if (debug_locks != expected) { 986 if (debug_locks != expected) {
960 if (expected_failure) { 987 unexpected_testcase_failures++;
961 expected_testcase_failures++; 988 printk("FAILED|");
962 printk("failed|"); 989
963 } else { 990 dump_stack();
964 unexpected_testcase_failures++;
965
966 printk("FAILED|");
967 dump_stack();
968 }
969 } else { 991 } else {
970 testcase_successes++; 992 testcase_successes++;
971 printk(" ok |"); 993 printk(" ok |");
@@ -1108,6 +1130,666 @@ static inline void print_testname(const char *testname)
1108 DO_TESTCASE_6IRW(desc, name, 312); \ 1130 DO_TESTCASE_6IRW(desc, name, 312); \
1109 DO_TESTCASE_6IRW(desc, name, 321); 1131 DO_TESTCASE_6IRW(desc, name, 321);
1110 1132
1133static void ww_test_fail_acquire(void)
1134{
1135 int ret;
1136
1137 WWAI(&t);
1138 t.stamp++;
1139
1140 ret = WWL(&o, &t);
1141
1142 if (WARN_ON(!o.ctx) ||
1143 WARN_ON(ret))
1144 return;
1145
1146 /* No lockdep test, pure API */
1147 ret = WWL(&o, &t);
1148 WARN_ON(ret != -EALREADY);
1149
1150 ret = WWT(&o);
1151 WARN_ON(ret);
1152
1153 t2 = t;
1154 t2.stamp++;
1155 ret = WWL(&o, &t2);
1156 WARN_ON(ret != -EDEADLK);
1157 WWU(&o);
1158
1159 if (WWT(&o))
1160 WWU(&o);
1161#ifdef CONFIG_DEBUG_LOCK_ALLOC
1162 else
1163 DEBUG_LOCKS_WARN_ON(1);
1164#endif
1165}
1166
1167static void ww_test_normal(void)
1168{
1169 int ret;
1170
1171 WWAI(&t);
1172
1173 /*
1174 * None of the ww_mutex codepaths should be taken in the 'normal'
1175 * mutex calls. The easiest way to verify this is by using the
1176 * normal mutex calls, and making sure o.ctx is unmodified.
1177 */
1178
1179 /* mutex_lock (and indirectly, mutex_lock_nested) */
1180 o.ctx = (void *)~0UL;
1181 mutex_lock(&o.base);
1182 mutex_unlock(&o.base);
1183 WARN_ON(o.ctx != (void *)~0UL);
1184
1185 /* mutex_lock_interruptible (and *_nested) */
1186 o.ctx = (void *)~0UL;
1187 ret = mutex_lock_interruptible(&o.base);
1188 if (!ret)
1189 mutex_unlock(&o.base);
1190 else
1191 WARN_ON(1);
1192 WARN_ON(o.ctx != (void *)~0UL);
1193
1194 /* mutex_lock_killable (and *_nested) */
1195 o.ctx = (void *)~0UL;
1196 ret = mutex_lock_killable(&o.base);
1197 if (!ret)
1198 mutex_unlock(&o.base);
1199 else
1200 WARN_ON(1);
1201 WARN_ON(o.ctx != (void *)~0UL);
1202
1203 /* trylock, succeeding */
1204 o.ctx = (void *)~0UL;
1205 ret = mutex_trylock(&o.base);
1206 WARN_ON(!ret);
1207 if (ret)
1208 mutex_unlock(&o.base);
1209 else
1210 WARN_ON(1);
1211 WARN_ON(o.ctx != (void *)~0UL);
1212
1213 /* trylock, failing */
1214 o.ctx = (void *)~0UL;
1215 mutex_lock(&o.base);
1216 ret = mutex_trylock(&o.base);
1217 WARN_ON(ret);
1218 mutex_unlock(&o.base);
1219 WARN_ON(o.ctx != (void *)~0UL);
1220
1221 /* nest_lock */
1222 o.ctx = (void *)~0UL;
1223 mutex_lock_nest_lock(&o.base, &t);
1224 mutex_unlock(&o.base);
1225 WARN_ON(o.ctx != (void *)~0UL);
1226}
1227
1228static void ww_test_two_contexts(void)
1229{
1230 WWAI(&t);
1231 WWAI(&t2);
1232}
1233
1234static void ww_test_diff_class(void)
1235{
1236 WWAI(&t);
1237#ifdef CONFIG_DEBUG_MUTEXES
1238 t.ww_class = NULL;
1239#endif
1240 WWL(&o, &t);
1241}
1242
1243static void ww_test_context_done_twice(void)
1244{
1245 WWAI(&t);
1246 WWAD(&t);
1247 WWAD(&t);
1248 WWAF(&t);
1249}
1250
1251static void ww_test_context_unlock_twice(void)
1252{
1253 WWAI(&t);
1254 WWAD(&t);
1255 WWAF(&t);
1256 WWAF(&t);
1257}
1258
1259static void ww_test_context_fini_early(void)
1260{
1261 WWAI(&t);
1262 WWL(&o, &t);
1263 WWAD(&t);
1264 WWAF(&t);
1265}
1266
1267static void ww_test_context_lock_after_done(void)
1268{
1269 WWAI(&t);
1270 WWAD(&t);
1271 WWL(&o, &t);
1272}
1273
1274static void ww_test_object_unlock_twice(void)
1275{
1276 WWL1(&o);
1277 WWU(&o);
1278 WWU(&o);
1279}
1280
1281static void ww_test_object_lock_unbalanced(void)
1282{
1283 WWAI(&t);
1284 WWL(&o, &t);
1285 t.acquired = 0;
1286 WWU(&o);
1287 WWAF(&t);
1288}
1289
1290static void ww_test_object_lock_stale_context(void)
1291{
1292 WWAI(&t);
1293 o.ctx = &t2;
1294 WWL(&o, &t);
1295}
1296
1297static void ww_test_edeadlk_normal(void)
1298{
1299 int ret;
1300
1301 mutex_lock(&o2.base);
1302 o2.ctx = &t2;
1303 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1304
1305 WWAI(&t);
1306 t2 = t;
1307 t2.stamp--;
1308
1309 ret = WWL(&o, &t);
1310 WARN_ON(ret);
1311
1312 ret = WWL(&o2, &t);
1313 WARN_ON(ret != -EDEADLK);
1314
1315 o2.ctx = NULL;
1316 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1317 mutex_unlock(&o2.base);
1318 WWU(&o);
1319
1320 WWL(&o2, &t);
1321}
1322
1323static void ww_test_edeadlk_normal_slow(void)
1324{
1325 int ret;
1326
1327 mutex_lock(&o2.base);
1328 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1329 o2.ctx = &t2;
1330
1331 WWAI(&t);
1332 t2 = t;
1333 t2.stamp--;
1334
1335 ret = WWL(&o, &t);
1336 WARN_ON(ret);
1337
1338 ret = WWL(&o2, &t);
1339 WARN_ON(ret != -EDEADLK);
1340
1341 o2.ctx = NULL;
1342 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1343 mutex_unlock(&o2.base);
1344 WWU(&o);
1345
1346 ww_mutex_lock_slow(&o2, &t);
1347}
1348
1349static void ww_test_edeadlk_no_unlock(void)
1350{
1351 int ret;
1352
1353 mutex_lock(&o2.base);
1354 o2.ctx = &t2;
1355 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1356
1357 WWAI(&t);
1358 t2 = t;
1359 t2.stamp--;
1360
1361 ret = WWL(&o, &t);
1362 WARN_ON(ret);
1363
1364 ret = WWL(&o2, &t);
1365 WARN_ON(ret != -EDEADLK);
1366
1367 o2.ctx = NULL;
1368 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1369 mutex_unlock(&o2.base);
1370
1371 WWL(&o2, &t);
1372}
1373
1374static void ww_test_edeadlk_no_unlock_slow(void)
1375{
1376 int ret;
1377
1378 mutex_lock(&o2.base);
1379 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1380 o2.ctx = &t2;
1381
1382 WWAI(&t);
1383 t2 = t;
1384 t2.stamp--;
1385
1386 ret = WWL(&o, &t);
1387 WARN_ON(ret);
1388
1389 ret = WWL(&o2, &t);
1390 WARN_ON(ret != -EDEADLK);
1391
1392 o2.ctx = NULL;
1393 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1394 mutex_unlock(&o2.base);
1395
1396 ww_mutex_lock_slow(&o2, &t);
1397}
1398
1399static void ww_test_edeadlk_acquire_more(void)
1400{
1401 int ret;
1402
1403 mutex_lock(&o2.base);
1404 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1405 o2.ctx = &t2;
1406
1407 WWAI(&t);
1408 t2 = t;
1409 t2.stamp--;
1410
1411 ret = WWL(&o, &t);
1412 WARN_ON(ret);
1413
1414 ret = WWL(&o2, &t);
1415 WARN_ON(ret != -EDEADLK);
1416
1417 ret = WWL(&o3, &t);
1418}
1419
1420static void ww_test_edeadlk_acquire_more_slow(void)
1421{
1422 int ret;
1423
1424 mutex_lock(&o2.base);
1425 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1426 o2.ctx = &t2;
1427
1428 WWAI(&t);
1429 t2 = t;
1430 t2.stamp--;
1431
1432 ret = WWL(&o, &t);
1433 WARN_ON(ret);
1434
1435 ret = WWL(&o2, &t);
1436 WARN_ON(ret != -EDEADLK);
1437
1438 ww_mutex_lock_slow(&o3, &t);
1439}
1440
1441static void ww_test_edeadlk_acquire_more_edeadlk(void)
1442{
1443 int ret;
1444
1445 mutex_lock(&o2.base);
1446 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1447 o2.ctx = &t2;
1448
1449 mutex_lock(&o3.base);
1450 mutex_release(&o3.base.dep_map, 1, _THIS_IP_);
1451 o3.ctx = &t2;
1452
1453 WWAI(&t);
1454 t2 = t;
1455 t2.stamp--;
1456
1457 ret = WWL(&o, &t);
1458 WARN_ON(ret);
1459
1460 ret = WWL(&o2, &t);
1461 WARN_ON(ret != -EDEADLK);
1462
1463 ret = WWL(&o3, &t);
1464 WARN_ON(ret != -EDEADLK);
1465}
1466
1467static void ww_test_edeadlk_acquire_more_edeadlk_slow(void)
1468{
1469 int ret;
1470
1471 mutex_lock(&o2.base);
1472 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1473 o2.ctx = &t2;
1474
1475 mutex_lock(&o3.base);
1476 mutex_release(&o3.base.dep_map, 1, _THIS_IP_);
1477 o3.ctx = &t2;
1478
1479 WWAI(&t);
1480 t2 = t;
1481 t2.stamp--;
1482
1483 ret = WWL(&o, &t);
1484 WARN_ON(ret);
1485
1486 ret = WWL(&o2, &t);
1487 WARN_ON(ret != -EDEADLK);
1488
1489 ww_mutex_lock_slow(&o3, &t);
1490}
1491
1492static void ww_test_edeadlk_acquire_wrong(void)
1493{
1494 int ret;
1495
1496 mutex_lock(&o2.base);
1497 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1498 o2.ctx = &t2;
1499
1500 WWAI(&t);
1501 t2 = t;
1502 t2.stamp--;
1503
1504 ret = WWL(&o, &t);
1505 WARN_ON(ret);
1506
1507 ret = WWL(&o2, &t);
1508 WARN_ON(ret != -EDEADLK);
1509 if (!ret)
1510 WWU(&o2);
1511
1512 WWU(&o);
1513
1514 ret = WWL(&o3, &t);
1515}
1516
1517static void ww_test_edeadlk_acquire_wrong_slow(void)
1518{
1519 int ret;
1520
1521 mutex_lock(&o2.base);
1522 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1523 o2.ctx = &t2;
1524
1525 WWAI(&t);
1526 t2 = t;
1527 t2.stamp--;
1528
1529 ret = WWL(&o, &t);
1530 WARN_ON(ret);
1531
1532 ret = WWL(&o2, &t);
1533 WARN_ON(ret != -EDEADLK);
1534 if (!ret)
1535 WWU(&o2);
1536
1537 WWU(&o);
1538
1539 ww_mutex_lock_slow(&o3, &t);
1540}
1541
1542static void ww_test_spin_nest_unlocked(void)
1543{
1544 raw_spin_lock_nest_lock(&lock_A, &o.base);
1545 U(A);
1546}
1547
1548static void ww_test_unneeded_slow(void)
1549{
1550 WWAI(&t);
1551
1552 ww_mutex_lock_slow(&o, &t);
1553}
1554
1555static void ww_test_context_block(void)
1556{
1557 int ret;
1558
1559 WWAI(&t);
1560
1561 ret = WWL(&o, &t);
1562 WARN_ON(ret);
1563 WWL1(&o2);
1564}
1565
1566static void ww_test_context_try(void)
1567{
1568 int ret;
1569
1570 WWAI(&t);
1571
1572 ret = WWL(&o, &t);
1573 WARN_ON(ret);
1574
1575 ret = WWT(&o2);
1576 WARN_ON(!ret);
1577 WWU(&o2);
1578 WWU(&o);
1579}
1580
1581static void ww_test_context_context(void)
1582{
1583 int ret;
1584
1585 WWAI(&t);
1586
1587 ret = WWL(&o, &t);
1588 WARN_ON(ret);
1589
1590 ret = WWL(&o2, &t);
1591 WARN_ON(ret);
1592
1593 WWU(&o2);
1594 WWU(&o);
1595}
1596
1597static void ww_test_try_block(void)
1598{
1599 bool ret;
1600
1601 ret = WWT(&o);
1602 WARN_ON(!ret);
1603
1604 WWL1(&o2);
1605 WWU(&o2);
1606 WWU(&o);
1607}
1608
1609static void ww_test_try_try(void)
1610{
1611 bool ret;
1612
1613 ret = WWT(&o);
1614 WARN_ON(!ret);
1615 ret = WWT(&o2);
1616 WARN_ON(!ret);
1617 WWU(&o2);
1618 WWU(&o);
1619}
1620
1621static void ww_test_try_context(void)
1622{
1623 int ret;
1624
1625 ret = WWT(&o);
1626 WARN_ON(!ret);
1627
1628 WWAI(&t);
1629
1630 ret = WWL(&o2, &t);
1631 WARN_ON(ret);
1632}
1633
1634static void ww_test_block_block(void)
1635{
1636 WWL1(&o);
1637 WWL1(&o2);
1638}
1639
1640static void ww_test_block_try(void)
1641{
1642 bool ret;
1643
1644 WWL1(&o);
1645 ret = WWT(&o2);
1646 WARN_ON(!ret);
1647}
1648
1649static void ww_test_block_context(void)
1650{
1651 int ret;
1652
1653 WWL1(&o);
1654 WWAI(&t);
1655
1656 ret = WWL(&o2, &t);
1657 WARN_ON(ret);
1658}
1659
1660static void ww_test_spin_block(void)
1661{
1662 L(A);
1663 U(A);
1664
1665 WWL1(&o);
1666 L(A);
1667 U(A);
1668 WWU(&o);
1669
1670 L(A);
1671 WWL1(&o);
1672 WWU(&o);
1673 U(A);
1674}
1675
1676static void ww_test_spin_try(void)
1677{
1678 bool ret;
1679
1680 L(A);
1681 U(A);
1682
1683 ret = WWT(&o);
1684 WARN_ON(!ret);
1685 L(A);
1686 U(A);
1687 WWU(&o);
1688
1689 L(A);
1690 ret = WWT(&o);
1691 WARN_ON(!ret);
1692 WWU(&o);
1693 U(A);
1694}
1695
1696static void ww_test_spin_context(void)
1697{
1698 int ret;
1699
1700 L(A);
1701 U(A);
1702
1703 WWAI(&t);
1704
1705 ret = WWL(&o, &t);
1706 WARN_ON(ret);
1707 L(A);
1708 U(A);
1709 WWU(&o);
1710
1711 L(A);
1712 ret = WWL(&o, &t);
1713 WARN_ON(ret);
1714 WWU(&o);
1715 U(A);
1716}
1717
1718static void ww_tests(void)
1719{
1720 printk(" --------------------------------------------------------------------------\n");
1721 printk(" | Wound/wait tests |\n");
1722 printk(" ---------------------\n");
1723
1724 print_testname("ww api failures");
1725 dotest(ww_test_fail_acquire, SUCCESS, LOCKTYPE_WW);
1726 dotest(ww_test_normal, SUCCESS, LOCKTYPE_WW);
1727 dotest(ww_test_unneeded_slow, FAILURE, LOCKTYPE_WW);
1728 printk("\n");
1729
1730 print_testname("ww contexts mixing");
1731 dotest(ww_test_two_contexts, FAILURE, LOCKTYPE_WW);
1732 dotest(ww_test_diff_class, FAILURE, LOCKTYPE_WW);
1733 printk("\n");
1734
1735 print_testname("finishing ww context");
1736 dotest(ww_test_context_done_twice, FAILURE, LOCKTYPE_WW);
1737 dotest(ww_test_context_unlock_twice, FAILURE, LOCKTYPE_WW);
1738 dotest(ww_test_context_fini_early, FAILURE, LOCKTYPE_WW);
1739 dotest(ww_test_context_lock_after_done, FAILURE, LOCKTYPE_WW);
1740 printk("\n");
1741
1742 print_testname("locking mismatches");
1743 dotest(ww_test_object_unlock_twice, FAILURE, LOCKTYPE_WW);
1744 dotest(ww_test_object_lock_unbalanced, FAILURE, LOCKTYPE_WW);
1745 dotest(ww_test_object_lock_stale_context, FAILURE, LOCKTYPE_WW);
1746 printk("\n");
1747
1748 print_testname("EDEADLK handling");
1749 dotest(ww_test_edeadlk_normal, SUCCESS, LOCKTYPE_WW);
1750 dotest(ww_test_edeadlk_normal_slow, SUCCESS, LOCKTYPE_WW);
1751 dotest(ww_test_edeadlk_no_unlock, FAILURE, LOCKTYPE_WW);
1752 dotest(ww_test_edeadlk_no_unlock_slow, FAILURE, LOCKTYPE_WW);
1753 dotest(ww_test_edeadlk_acquire_more, FAILURE, LOCKTYPE_WW);
1754 dotest(ww_test_edeadlk_acquire_more_slow, FAILURE, LOCKTYPE_WW);
1755 dotest(ww_test_edeadlk_acquire_more_edeadlk, FAILURE, LOCKTYPE_WW);
1756 dotest(ww_test_edeadlk_acquire_more_edeadlk_slow, FAILURE, LOCKTYPE_WW);
1757 dotest(ww_test_edeadlk_acquire_wrong, FAILURE, LOCKTYPE_WW);
1758 dotest(ww_test_edeadlk_acquire_wrong_slow, FAILURE, LOCKTYPE_WW);
1759 printk("\n");
1760
1761 print_testname("spinlock nest unlocked");
1762 dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW);
1763 printk("\n");
1764
1765 printk(" -----------------------------------------------------\n");
1766 printk(" |block | try |context|\n");
1767 printk(" -----------------------------------------------------\n");
1768
1769 print_testname("context");
1770 dotest(ww_test_context_block, FAILURE, LOCKTYPE_WW);
1771 dotest(ww_test_context_try, SUCCESS, LOCKTYPE_WW);
1772 dotest(ww_test_context_context, SUCCESS, LOCKTYPE_WW);
1773 printk("\n");
1774
1775 print_testname("try");
1776 dotest(ww_test_try_block, FAILURE, LOCKTYPE_WW);
1777 dotest(ww_test_try_try, SUCCESS, LOCKTYPE_WW);
1778 dotest(ww_test_try_context, FAILURE, LOCKTYPE_WW);
1779 printk("\n");
1780
1781 print_testname("block");
1782 dotest(ww_test_block_block, FAILURE, LOCKTYPE_WW);
1783 dotest(ww_test_block_try, SUCCESS, LOCKTYPE_WW);
1784 dotest(ww_test_block_context, FAILURE, LOCKTYPE_WW);
1785 printk("\n");
1786
1787 print_testname("spinlock");
1788 dotest(ww_test_spin_block, FAILURE, LOCKTYPE_WW);
1789 dotest(ww_test_spin_try, SUCCESS, LOCKTYPE_WW);
1790 dotest(ww_test_spin_context, FAILURE, LOCKTYPE_WW);
1791 printk("\n");
1792}
1111 1793
1112void locking_selftest(void) 1794void locking_selftest(void)
1113{ 1795{
@@ -1188,6 +1870,8 @@ void locking_selftest(void)
1188 DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); 1870 DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
1189// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); 1871// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
1190 1872
1873 ww_tests();
1874
1191 if (unexpected_testcase_failures) { 1875 if (unexpected_testcase_failures) {
1192 printk("-----------------------------------------------------------------\n"); 1876 printk("-----------------------------------------------------------------\n");
1193 debug_locks = 0; 1877 debug_locks = 0;
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 80a7d44bcf81..c5a872ca7703 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -152,6 +152,16 @@ config SND_HDA_CODEC_HDMI
152 snd-hda-codec-hdmi. 152 snd-hda-codec-hdmi.
153 This module is automatically loaded at probing. 153 This module is automatically loaded at probing.
154 154
155config SND_HDA_I915
156 bool "Build Display HD-audio controller/codec power well support for i915 cards"
157 depends on DRM_I915
158 help
159 Say Y here to include full HDMI and DisplayPort HD-audio controller/codec
160 power-well support for Intel Haswell graphics cards based on the i915 driver.
161
162 Note that this option must be enabled for Intel Haswell C+ stepping machines, otherwise
163 the GPU audio controller/codecs will not be initialized or damaged when exit from S3 mode.
164
155config SND_HDA_CODEC_CIRRUS 165config SND_HDA_CODEC_CIRRUS
156 bool "Build Cirrus Logic codec support" 166 bool "Build Cirrus Logic codec support"
157 default y 167 default y
diff --git a/sound/pci/hda/Makefile b/sound/pci/hda/Makefile
index 24a251497a1f..c091438286a3 100644
--- a/sound/pci/hda/Makefile
+++ b/sound/pci/hda/Makefile
@@ -1,4 +1,6 @@
1snd-hda-intel-objs := hda_intel.o 1snd-hda-intel-objs := hda_intel.o
2# for haswell power well
3snd-hda-intel-$(CONFIG_SND_HDA_I915) += hda_i915.o
2 4
3snd-hda-codec-y := hda_codec.o hda_jack.o hda_auto_parser.o 5snd-hda-codec-y := hda_codec.o hda_jack.o hda_auto_parser.o
4snd-hda-codec-$(CONFIG_SND_HDA_GENERIC) += hda_generic.o 6snd-hda-codec-$(CONFIG_SND_HDA_GENERIC) += hda_generic.o
diff --git a/sound/pci/hda/hda_i915.c b/sound/pci/hda/hda_i915.c
new file mode 100644
index 000000000000..76c13d5b3ca0
--- /dev/null
+++ b/sound/pci/hda/hda_i915.c
@@ -0,0 +1,75 @@
1/*
2 * hda_i915.c - routines for Haswell HDA controller power well support
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <sound/core.h>
22#include <drm/i915_powerwell.h>
23#include "hda_i915.h"
24
25static void (*get_power)(void);
26static void (*put_power)(void);
27
28void hda_display_power(bool enable)
29{
30 if (!get_power || !put_power)
31 return;
32
33 snd_printdd("HDA display power %s \n",
34 enable ? "Enable" : "Disable");
35 if (enable)
36 get_power();
37 else
38 put_power();
39}
40
41int hda_i915_init(void)
42{
43 int err = 0;
44
45 get_power = symbol_request(i915_request_power_well);
46 if (!get_power) {
47 snd_printk(KERN_WARNING "hda-i915: get_power symbol get fail\n");
48 return -ENODEV;
49 }
50
51 put_power = symbol_request(i915_release_power_well);
52 if (!put_power) {
53 symbol_put(i915_request_power_well);
54 get_power = NULL;
55 return -ENODEV;
56 }
57
58 snd_printd("HDA driver get symbol successfully from i915 module\n");
59
60 return err;
61}
62
63int hda_i915_exit(void)
64{
65 if (get_power) {
66 symbol_put(i915_request_power_well);
67 get_power = NULL;
68 }
69 if (put_power) {
70 symbol_put(i915_release_power_well);
71 put_power = NULL;
72 }
73
74 return 0;
75}
diff --git a/sound/pci/hda/hda_i915.h b/sound/pci/hda/hda_i915.h
new file mode 100644
index 000000000000..5a63da2c53e5
--- /dev/null
+++ b/sound/pci/hda/hda_i915.h
@@ -0,0 +1,35 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License as published by the Free
4 * Software Foundation; either version 2 of the License, or (at your option)
5 * any later version.
6 *
7 * This program is distributed in the hope that it will be useful, but WITHOUT
8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10 * more details.
11 *
12 * You should have received a copy of the GNU General Public License along with
13 * this program; if not, write to the Free Software Foundation, Inc., 59
14 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 */
16#ifndef __SOUND_HDA_I915_H
17#define __SOUND_HDA_I915_H
18
19#ifdef CONFIG_SND_HDA_I915
20void hda_display_power(bool enable);
21int hda_i915_init(void);
22int hda_i915_exit(void);
23#else
24static inline void hda_display_power(bool enable) {}
25static inline int hda_i915_init(void)
26{
27 return -ENODEV;
28}
29static inline int hda_i915_exit(void)
30{
31 return 0;
32}
33#endif
34
35#endif
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index de18722c4873..35e9f8b010a7 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -62,6 +62,7 @@
62#include <linux/vga_switcheroo.h> 62#include <linux/vga_switcheroo.h>
63#include <linux/firmware.h> 63#include <linux/firmware.h>
64#include "hda_codec.h" 64#include "hda_codec.h"
65#include "hda_i915.h"
65 66
66 67
67static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; 68static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
@@ -541,6 +542,10 @@ struct azx {
541 /* for pending irqs */ 542 /* for pending irqs */
542 struct work_struct irq_pending_work; 543 struct work_struct irq_pending_work;
543 544
545#ifdef CONFIG_SND_HDA_I915
546 struct work_struct probe_work;
547#endif
548
544 /* reboot notifier (for mysterious hangup problem at power-down) */ 549 /* reboot notifier (for mysterious hangup problem at power-down) */
545 struct notifier_block reboot_notifier; 550 struct notifier_block reboot_notifier;
546 551
@@ -594,6 +599,7 @@ enum {
594#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */ 599#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */
595#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ 600#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
596#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ 601#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
602#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 power well support */
597 603
598/* quirks for Intel PCH */ 604/* quirks for Intel PCH */
599#define AZX_DCAPS_INTEL_PCH_NOPM \ 605#define AZX_DCAPS_INTEL_PCH_NOPM \
@@ -2900,6 +2906,8 @@ static int azx_suspend(struct device *dev)
2900 pci_disable_device(pci); 2906 pci_disable_device(pci);
2901 pci_save_state(pci); 2907 pci_save_state(pci);
2902 pci_set_power_state(pci, PCI_D3hot); 2908 pci_set_power_state(pci, PCI_D3hot);
2909 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2910 hda_display_power(false);
2903 return 0; 2911 return 0;
2904} 2912}
2905 2913
@@ -2912,6 +2920,8 @@ static int azx_resume(struct device *dev)
2912 if (chip->disabled) 2920 if (chip->disabled)
2913 return 0; 2921 return 0;
2914 2922
2923 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2924 hda_display_power(true);
2915 pci_set_power_state(pci, PCI_D0); 2925 pci_set_power_state(pci, PCI_D0);
2916 pci_restore_state(pci); 2926 pci_restore_state(pci);
2917 if (pci_enable_device(pci) < 0) { 2927 if (pci_enable_device(pci) < 0) {
@@ -2944,6 +2954,8 @@ static int azx_runtime_suspend(struct device *dev)
2944 2954
2945 azx_stop_chip(chip); 2955 azx_stop_chip(chip);
2946 azx_clear_irq_pending(chip); 2956 azx_clear_irq_pending(chip);
2957 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2958 hda_display_power(false);
2947 return 0; 2959 return 0;
2948} 2960}
2949 2961
@@ -2952,6 +2964,8 @@ static int azx_runtime_resume(struct device *dev)
2952 struct snd_card *card = dev_get_drvdata(dev); 2964 struct snd_card *card = dev_get_drvdata(dev);
2953 struct azx *chip = card->private_data; 2965 struct azx *chip = card->private_data;
2954 2966
2967 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2968 hda_display_power(true);
2955 azx_init_pci(chip); 2969 azx_init_pci(chip);
2956 azx_init_chip(chip, 1); 2970 azx_init_chip(chip, 1);
2957 return 0; 2971 return 0;
@@ -3006,7 +3020,6 @@ static void azx_notifier_unregister(struct azx *chip)
3006 unregister_reboot_notifier(&chip->reboot_notifier); 3020 unregister_reboot_notifier(&chip->reboot_notifier);
3007} 3021}
3008 3022
3009static int azx_first_init(struct azx *chip);
3010static int azx_probe_continue(struct azx *chip); 3023static int azx_probe_continue(struct azx *chip);
3011 3024
3012#ifdef SUPPORT_VGA_SWITCHEROO 3025#ifdef SUPPORT_VGA_SWITCHEROO
@@ -3033,8 +3046,7 @@ static void azx_vs_set_state(struct pci_dev *pci,
3033 snd_printk(KERN_INFO SFX 3046 snd_printk(KERN_INFO SFX
3034 "%s: Start delayed initialization\n", 3047 "%s: Start delayed initialization\n",
3035 pci_name(chip->pci)); 3048 pci_name(chip->pci));
3036 if (azx_first_init(chip) < 0 || 3049 if (azx_probe_continue(chip) < 0) {
3037 azx_probe_continue(chip) < 0) {
3038 snd_printk(KERN_ERR SFX 3050 snd_printk(KERN_ERR SFX
3039 "%s: initialization error\n", 3051 "%s: initialization error\n",
3040 pci_name(chip->pci)); 3052 pci_name(chip->pci));
@@ -3120,8 +3132,13 @@ static int register_vga_switcheroo(struct azx *chip)
3120 */ 3132 */
3121static int azx_free(struct azx *chip) 3133static int azx_free(struct azx *chip)
3122{ 3134{
3135 struct pci_dev *pci = chip->pci;
3123 int i; 3136 int i;
3124 3137
3138 if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
3139 && chip->running)
3140 pm_runtime_get_noresume(&pci->dev);
3141
3125 azx_del_card_list(chip); 3142 azx_del_card_list(chip);
3126 3143
3127 azx_notifier_unregister(chip); 3144 azx_notifier_unregister(chip);
@@ -3173,6 +3190,10 @@ static int azx_free(struct azx *chip)
3173 if (chip->fw) 3190 if (chip->fw)
3174 release_firmware(chip->fw); 3191 release_firmware(chip->fw);
3175#endif 3192#endif
3193 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
3194 hda_display_power(false);
3195 hda_i915_exit();
3196 }
3176 kfree(chip); 3197 kfree(chip);
3177 3198
3178 return 0; 3199 return 0;
@@ -3398,6 +3419,13 @@ static void azx_check_snoop_available(struct azx *chip)
3398 } 3419 }
3399} 3420}
3400 3421
3422#ifdef CONFIG_SND_HDA_I915
3423static void azx_probe_work(struct work_struct *work)
3424{
3425 azx_probe_continue(container_of(work, struct azx, probe_work));
3426}
3427#endif
3428
3401/* 3429/*
3402 * constructor 3430 * constructor
3403 */ 3431 */
@@ -3473,7 +3501,13 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
3473 return err; 3501 return err;
3474 } 3502 }
3475 3503
3504#ifdef CONFIG_SND_HDA_I915
3505 /* continue probing in work context as may trigger request module */
3506 INIT_WORK(&chip->probe_work, azx_probe_work);
3507#endif
3508
3476 *rchip = chip; 3509 *rchip = chip;
3510
3477 return 0; 3511 return 0;
3478} 3512}
3479 3513
@@ -3730,11 +3764,6 @@ static int azx_probe(struct pci_dev *pci,
3730 } 3764 }
3731 3765
3732 probe_now = !chip->disabled; 3766 probe_now = !chip->disabled;
3733 if (probe_now) {
3734 err = azx_first_init(chip);
3735 if (err < 0)
3736 goto out_free;
3737 }
3738 3767
3739#ifdef CONFIG_SND_HDA_PATCH_LOADER 3768#ifdef CONFIG_SND_HDA_PATCH_LOADER
3740 if (patch[dev] && *patch[dev]) { 3769 if (patch[dev] && *patch[dev]) {
@@ -3749,15 +3778,22 @@ static int azx_probe(struct pci_dev *pci,
3749 } 3778 }
3750#endif /* CONFIG_SND_HDA_PATCH_LOADER */ 3779#endif /* CONFIG_SND_HDA_PATCH_LOADER */
3751 3780
3781 /* continue probing in work context, avoid request_module deadlock */
3782 if (probe_now && (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)) {
3783#ifdef CONFIG_SND_HDA_I915
3784 probe_now = false;
3785 schedule_work(&chip->probe_work);
3786#else
3787 snd_printk(KERN_ERR SFX "Haswell must build in CONFIG_SND_HDA_I915\n");
3788#endif
3789 }
3790
3752 if (probe_now) { 3791 if (probe_now) {
3753 err = azx_probe_continue(chip); 3792 err = azx_probe_continue(chip);
3754 if (err < 0) 3793 if (err < 0)
3755 goto out_free; 3794 goto out_free;
3756 } 3795 }
3757 3796
3758 if (pci_dev_run_wake(pci))
3759 pm_runtime_put_noidle(&pci->dev);
3760
3761 dev++; 3797 dev++;
3762 complete_all(&chip->probe_wait); 3798 complete_all(&chip->probe_wait);
3763 return 0; 3799 return 0;
@@ -3770,9 +3806,24 @@ out_free:
3770 3806
3771static int azx_probe_continue(struct azx *chip) 3807static int azx_probe_continue(struct azx *chip)
3772{ 3808{
3809 struct pci_dev *pci = chip->pci;
3773 int dev = chip->dev_index; 3810 int dev = chip->dev_index;
3774 int err; 3811 int err;
3775 3812
3813 /* Request power well for Haswell HDA controller and codec */
3814 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
3815 err = hda_i915_init();
3816 if (err < 0) {
3817 snd_printk(KERN_ERR SFX "Error request power-well from i915\n");
3818 goto out_free;
3819 }
3820 hda_display_power(true);
3821 }
3822
3823 err = azx_first_init(chip);
3824 if (err < 0)
3825 goto out_free;
3826
3776#ifdef CONFIG_SND_HDA_INPUT_BEEP 3827#ifdef CONFIG_SND_HDA_INPUT_BEEP
3777 chip->beep_mode = beep_mode[dev]; 3828 chip->beep_mode = beep_mode[dev];
3778#endif 3829#endif
@@ -3817,6 +3868,8 @@ static int azx_probe_continue(struct azx *chip)
3817 power_down_all_codecs(chip); 3868 power_down_all_codecs(chip);
3818 azx_notifier_register(chip); 3869 azx_notifier_register(chip);
3819 azx_add_card_list(chip); 3870 azx_add_card_list(chip);
3871 if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
3872 pm_runtime_put_noidle(&pci->dev);
3820 3873
3821 return 0; 3874 return 0;
3822 3875
@@ -3829,9 +3882,6 @@ static void azx_remove(struct pci_dev *pci)
3829{ 3882{
3830 struct snd_card *card = pci_get_drvdata(pci); 3883 struct snd_card *card = pci_get_drvdata(pci);
3831 3884
3832 if (pci_dev_run_wake(pci))
3833 pm_runtime_get_noresume(&pci->dev);
3834
3835 if (card) 3885 if (card)
3836 snd_card_free(card); 3886 snd_card_free(card);
3837 pci_set_drvdata(pci, NULL); 3887 pci_set_drvdata(pci, NULL);
@@ -3864,11 +3914,14 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
3864 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 3914 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
3865 /* Haswell */ 3915 /* Haswell */
3866 { PCI_DEVICE(0x8086, 0x0a0c), 3916 { PCI_DEVICE(0x8086, 0x0a0c),
3867 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, 3917 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH |
3918 AZX_DCAPS_I915_POWERWELL },
3868 { PCI_DEVICE(0x8086, 0x0c0c), 3919 { PCI_DEVICE(0x8086, 0x0c0c),
3869 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, 3920 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH |
3921 AZX_DCAPS_I915_POWERWELL },
3870 { PCI_DEVICE(0x8086, 0x0d0c), 3922 { PCI_DEVICE(0x8086, 0x0d0c),
3871 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, 3923 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH |
3924 AZX_DCAPS_I915_POWERWELL },
3872 /* 5 Series/3400 */ 3925 /* 5 Series/3400 */
3873 { PCI_DEVICE(0x8086, 0x3b56), 3926 { PCI_DEVICE(0x8086, 0x3b56),
3874 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, 3927 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },